text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
"""
Convert to and from Roman numerals
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
Note:
This has been modified to add optional characters
after the initial roman numbers by nlw.
"""
import re
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
# Define digit's regular expression mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(num):
"""convert integer to Roman numeral"""
if not 0 < num < 5000:
raise ValueError("number %n out of range (must be 1..4999)", num)
if int(num) != num:
raise TypeError("decimals %n can not be converted", num)
result = ""
for numeral, integer in romanNumeralMap:
while num >= integer:
result += numeral
num -= integer
return result
# Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
[A-Z] # optional suffix letter,
# but don't retain.
# differs from original roman.py
$ # end of string
""", re.VERBOSE)
def fromRoman(strng):
"""convert Roman numeral to integer"""
if not strng:
raise TypeError('Input can not be blank')
if not romanNumeralPattern.search(strng):
raise ValueError('Invalid Roman numeral: %s', strng)
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while strng[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| TomConlin/dipper | dipper/utils/romanplus.py | Python | bsd-3-clause | 2,785 | [
"VisIt"
] | 4a5bdaba1b7d0dfcf101d2bcfd8c3049767346054ff6ea797de0687104a524c7 |
########################################################################
# $HeadURL$
########################################################################
"""
:mod: DataLoggingHandler
.. module: DataLoggingHandler
:synopsis: DataLoggingHandler is the implementation of the Data Logging
service in the DISET framework.
The following methods are available in the Service interface::
* addFileRecord()
* addFileRecords()
* getFileLoggingInfo()
"""
__RCSID__ = "$Id$"
## imports
from types import StringType, ListType, TupleType
## from DIRAC
from DIRAC import S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.DataManagementSystem.DB.DataLoggingDB import DataLoggingDB
## global instance of the DataLoggingDB
gDataLoggingDB = False
def initializeDataLoggingHandler( serviceInfo ):
""" handler initialisation """
global gDataLoggingDB
gDataLoggingDB = DataLoggingDB()
res = gDataLoggingDB._connect()
if not res['OK']:
return res
res = gDataLoggingDB._checkTable()
if not res['OK'] and not res['Message'] == 'The requested table already exist':
return res
return S_OK()
class DataLoggingHandler( RequestHandler ):
"""
.. class:: DataLoggingClient
Request handler for DataLogging service.
"""
types_addFileRecord = [ [StringType, ListType], StringType, StringType, StringType, StringType ]
@staticmethod
def export_addFileRecord( lfn, status, minor, date, source ):
""" Add a logging record for the given file
:param self: self reference
:param mixed lfn: list of strings or a string with LFN
:param str status: file status
:param str minor: minor status (additional information)
:param mixed date: datetime.datetime or str(datetime.datetime) or ""
:param str source: source setting a new status
"""
if type( lfn ) == StringType:
lfns = [ lfn ]
else:
lfns = lfn
return gDataLoggingDB.addFileRecord( lfns, status, minor, date, source )
types_addFileRecords = [ [ ListType, TupleType ] ]
@staticmethod
def export_addFileRecords( fileTuples ):
""" Add a group of logging records
"""
return gDataLoggingDB.addFileRecords( fileTuples )
types_getFileLoggingInfo = [ StringType ]
@staticmethod
def export_getFileLoggingInfo( lfn ):
""" Get the file logging information
"""
return gDataLoggingDB.getFileLoggingInfo( lfn )
types_getUniqueStates = []
@staticmethod
def export_getUniqueStates():
""" Get all the unique states
"""
return gDataLoggingDB.getUniqueStates()
| avedaee/DIRAC | DataManagementSystem/Service/DataLoggingHandler.py | Python | gpl-3.0 | 2,561 | [
"DIRAC"
] | 65037d046403acafe48b5e22b6fe3d81a29da0b2c99e205fe7855b7997688bf8 |
"""
A module to automate CIGALE. Currently works for a single galaxy.
It generates a configuration file and runs the standard pcigale
script. Requires pcigale already installed on the system.
"""
import numpy as np
import sys, os, glob, multiprocessing, warnings
from collections import OrderedDict
from astropy.table import Table
try:
from pcigale.session.configuration import Configuration
except ImportError:
print("You will need to install pcigale to use the cigale.py module")
else:
from pcigale.analysis_modules import get_module
from pcigale.data import Database
from frb.surveys.catalog_utils import _detect_mag_cols, convert_mags_to_flux
from IPython import embed
# Default list of SED modules for CIGALE
_DEFAULT_SED_MODULES = ("sfhdelayed", "bc03", "nebular", "dustatt_calzleit", "dale2014",
"restframe_parameters", "redshifting")
#TODO Create a function to check the input filters
#Or create a translation file like eazy's.
#def check_filters(data_file):
def _sed_default_params(module):
"""
Set the default parameters for CIGALE
Args:
module (str):
Specify the SED using the CIGALE standard names, e.g. sfhdelayed, bc03, etc.
Returns:
params (dict): the default dict of SED modules
and their initial parameters.
"""
params = {}
if module == "sfhdelayed":
params['tau_main'] = (10**np.linspace(1,3,10)).tolist() #e-folding time of main population (Myr)
params['age_main'] = (10**np.linspace(3,4,10)).tolist() #age (Myr)
params['tau_burst'] = 50.0 #burst e-folding time (Myr)
params['age_burst'] = 20.0
params['f_burst'] = 0.0 #burst fraction by mass
params['sfr_A'] = 0.1 #SFR at t = 0 (Msun/yr)
params['normalise'] = False # Normalise SFH to produce one solar mass
elif module == "bc03":
params['imf'] = 1 #0: Salpeter 1: Chabrier
params['metallicity'] = [0.0001, 0.0004, 0.004, 0.008, 0.02, 0.05]
params['separation_age'] = 10 # Separation between yound and old stellar population (Myr)
elif module == 'nebular':
params['logU'] = -2.0 # Ionization parameter
params['f_esc'] = 0.0 # Escape fraction of Ly continuum photons
params['f_dust'] = 0.0 # Fraction of Ly continuum photons absorbed
params['lines_width'] = 300.0
params['emission'] = True
elif module == 'dustatt_calzleit':
params['E_BVs_young'] = [0.12, 0.25, 0.37, 0.5, 0.62, 0.74, 0.86] #Stellar color excess for young continuum
params['E_BVs_old_factor'] = 1.0 # Reduction of E(B-V) for the old population w.r.t. young
params['uv_bump_wavelength'] = 217.5 #central wavelength of UV bump (nm)
params['uv_bump_width'] = 35.6 #UV bump FWHM (nm)
params['uv_bump_amplitude'] = 1.3 # Amplitude of the UV bump. For the Milky Way: 3.
# The following parameter can have a significant affect on stellar mass
# We use the recommendation in Lo Faro+2017
params['powerlaw_slope'] = -0.13 # Slope delta of the power law modifying the attenuation curve.
# These filters have no effect
params['filters'] = 'B_B90 & V_B90 & FUV'
elif module == 'dale2014':
params['fracAGN'] = [0.0,0.05,0.1,0.2]
params['alpha'] = 2.0
elif module == 'restframe_parameters':
params['beta_calz94'] = False
params['D4000'] = False
params['IRX'] = False
params['EW_lines'] = '500.7/1.0 & 656.3/1.0'
params['luminosity_filters'] = 'u_prime & r_prime'
params['colours_filters'] = 'u_prime-r_prime'
elif module == 'redshifting':
params['redshift'] = '' #Use input redshifts
return params
def gen_cigale_in(photometry_table, zcol, idcol=None, infile="cigale_in.fits",
overwrite=True, **kwargs):
"""
Generates the input catalog from
a photometric catalog.
Args:
photometry_table (astropy Table):
A table from some photometric
catalog with magnitudes and
error measurements. Currently supports
DES, DECaLS, SDSS, Pan-STARRS and WISE
The naming convention follows those specified in frb.galaxies.defs
with the exception of WISE which use WISE-1, etc. although the code
also handles WISE-W1, etc.
zcol (str):
Name of the column with redshift estimates
idcol (str, optional):
Name of the column with object IDs. By default,
the code looks for the first column with "ID" in
its name. If that's not present, it creates a
column with row numbers for IDs.
infile (str, optional):
Output name + path for the CIGALE input file generated
overwrite (bool, optional):
If true, overwrites file if it already exists
kwargs: only here to catch extras
"""
#Table must have a column with redshift estimates
if not isinstance(zcol, str):
raise IOError("zcol must be a column name. i.e. a string")
assert zcol in photometry_table.colnames, "{} not found in the table. Please check".format(zcol)
magcols, mag_errcols = _detect_mag_cols(photometry_table)
cigtab = photometry_table.copy()
cigtab.rename_column(zcol,"redshift")
photom_cols = magcols+mag_errcols
# Rename any column with "ID" in it to "id"
if idcol is None:
try:
idcol = [col for col in cigtab.colnames if "ID" in col.upper()][0]
except IndexError:
print("No column with 'ID' in name. Adding a column.")
idcol = 'id'
cigtab[idcol] = np.arange(len(cigtab))+1
cigtab.rename_column(idcol,"id")
#First round of renaming
cigtab = convert_mags_to_flux(cigtab)
cigtab = cigtab[['id','redshift']+photom_cols]
# Rename our filters to CIGALE names, as needed
new_names = {
'SDSS_u': 'sdss.up',
'SDSS_g': 'sdss.gp',
'SDSS_r': 'sdss.rp',
'SDSS_i': 'sdss.ip',
'SDSS_z': 'sdss.zp',
'VLT_u': 'VLT_FORS2_u',
'VLT_g': 'VLT_FORS2_g',
'VLT_I': 'VLT_FORS2_I',
'VLT_z': 'VLT_FORS2_z',
'WISE_W1': 'WISE1',
'WISE_W2': 'WISE2',
'WISE_W3': 'WISE3',
'WISE_W4': 'WISE4',
'VISTA_Y': 'vista.vircam.Y',
'VISTA_J': 'vista.vircam.J',
'VISTA_H': 'vista.vircam.H',
'VISTA_Ks': 'vista.vircam.Ks',
'LRISr_I': 'LRIS_I',
'LRISb_V': 'LRIS_V',
'WFC3_F160W': 'hst.wfc3.F160W',
'Spitzer_3.6': 'spitzer.irac.ch1',
'Spitzer_4.5': 'spitzer.irac.ch2',
}
for key in new_names:
if key in photom_cols:
cigtab.rename_column(key, new_names[key])
# Try Error
if key+'_err' in photom_cols:
cigtab.rename_column(key+'_err', new_names[key]+'_err')
cigtab.write(infile,overwrite=overwrite)
return
def _initialise(data_file, config_file="pcigale.ini",
cores=None, sed_modules=_DEFAULT_SED_MODULES,
sed_modules_params=None, **kwargs):
"""
Initialise a CIGALE configuration file and write to disk.
Args:
data_file (str):
Path to the input photometry data file.
config_file (str, optional):
Path to the file where CIGALE's configuration
is stored.
cores (int, optional):
Number of CPU cores to be used. Defaults
to all cores on the system.
sed_modules (list or tuple, optional):
A list of SED modules to be used in the
PDF analysis. If this is being input, there
should be a corresponding correct dict
for sed_modules_params.
sed_module_params (dict, optional):
A dict containing parameter values for
the input SED modules. Better not use this
unless you know exactly what you're doing.
kwargs: only here to catch extras
Returns:
cigconf (pcigale.session.configuration.Configuration):
CIGALE Configuration object
"""
# Check
if sed_modules !=_DEFAULT_SED_MODULES:
assert sed_modules_params is not None,\
"If you're not using the default modules, you'll have to input SED parameters"
# Init
cigconf = Configuration(config_file) #a set of dicts, mostly
cigconf.create_blank_conf() #Initialises a pcigale.ini file
# fill in initial values
cigconf.pcigaleini_exists = True
cigconf.config['data_file'] = data_file
cigconf.config['param_file'] = ""
cigconf.config['sed_modules'] = sed_modules
cigconf.config['analysis_method'] = 'pdf_analysis'
if cores is None:
cores = multiprocessing.cpu_count() #Use all cores
cigconf.config['cores'] = cores
cigconf.generate_conf() #Writes defaults to config_file
cigconf.config['analysis_params']['variables'] = ""
cigconf.config['analysis_params']['save_best_sed'] = True
cigconf.config['analysis_params']['lim_flag'] = True
# Change the default values to new defaults:
if sed_modules_params is None:
sed_modules_params = {}
for module in sed_modules:
sed_modules_params[module] = _sed_default_params(module)
cigconf.config['sed_modules_params'] = sed_modules_params
# Overwrites the config file
cigconf.config.write()
# Return
return cigconf
def run(photometry_table, zcol, data_file="cigale_in.fits", config_file="pcigale.ini",
wait_for_input=False, plot=True, outdir='out', compare_obs_model=False, **kwargs):
"""
Input parameters and then run CIGALE.
Args:
photometry_table (astropy Table):
A table from some photometric catalog with magnitudes and
error measurements. Currently supports
DES, DECaLS, SDSS, Pan-STARRS and WISE
zcol (str):
Name of the column with redshift estimates.
data_file (str, optional):
Root name for the photometry data file generated used as input to CIGALE
config_file (str, optional):
Root name for the file where CIGALE's configuration is generated
wait_for_input (bool, optional):
If true, waits for the user to finish editing the auto-generated config file
before running.
plot (bool, optional):
Plots the best fit SED if true
cores (int, optional):
Number of CPU cores to be used. Defaults
to all cores on the system.
outdir (str, optional):
Path to the many outputs of CIGALE
If not supplied, the outputs will appear in a folder named out/
compare_obs_model (bool, optional):
If True compare the input observed fluxes with the model fluxes
This writes a Table to outdir named 'photo_observed_model.dat'
kwargs: These are passed into gen_cigale_in() and _initialise()
sed_modules (list of 'str', optional):
A list of SED modules to be used in the
PDF analysis. If this is being input, there
should be a corresponding correct dict
for sed_modules_params.
sed_module_params (dict, optional):
A dict containing parameter values for
the input SED modules. Better not use this
unless you know exactly what you're doing.
"""
gen_cigale_in(photometry_table,zcol,infile=data_file,overwrite=True, **kwargs)
_initialise(data_file, config_file=config_file,**kwargs)
if wait_for_input:
input("Edit the generated config file {:s} and press any key to run.".format(config_file))
cigconf = Configuration(config_file)
analysis_module = get_module(cigconf.configuration['analysis_method'])
analysis_module.process(cigconf.configuration)
if plot:
try:
from pcigale_plots import sed # This modifies the backend to Agg so I hide it here
old_version = True
except ImportError:
from pcigale_plots.plot_types.sed import sed
old_version = False
if old_version:
import pcigale
warnings.warn("You are using CIGALE version {:s}, for which support is deprecated. Please update to 2020.0 or higher.".format(pcigale.__version__))
sed(cigconf,"mJy",True)
else:
# TODO: Let the user customize the plot.
series = ['stellar_attenuated', 'stellar_unattenuated', 'dust', 'agn', 'model']
sed(cigconf,"mJy",True, (False, 1e5), (False, 1e2), series, "pdf", "out")
# Set back to a GUI
import matplotlib
matplotlib.use('TkAgg')
# Rename the default output directory?
if outdir != 'out':
try:
os.system("rm -rf {}".format(outdir))
os.system("mv out {:s}".format(outdir))
except:
print("Invalid output directory path. Output stored in out/")
# Move input files into outdir too
os.system("mv {:s} {:s}".format(data_file, outdir))
os.system("mv {:s} {:s}".format(config_file, outdir))
os.system("mv {:s}.spec {:s}".format(config_file, outdir))
# Compare?
if compare_obs_model:
#Generate an observation/model flux comparison table.
with Database() as base:
filters = OrderedDict([(name, base.get_filter(name))
for name in cigconf.configuration['bands']
if not (name.endswith('_err') or name.startswith('line')) ])
filters_wl = np.array([filt.pivot_wavelength
for filt in filters.values()])
mods = Table.read(outdir+'/results.fits')
try:
obs = Table.read(os.path.join(outdir, cigconf.configuration['data_file']))
except:
print("Something went wrong here. Astropy was unable to read the observations table. Please ensure it is in the fits format.")
return
for model, obj in zip(mods, obs):
photo_obs_model = Table()
photo_obs_model['lambda_filter'] = [wl/1000 for wl in filters_wl]
photo_obs_model['model_flux'] = np.array([model["best."+filt] for filt in filters.keys()])
photo_obs_model['observed_flux'] = np.array([obj[filt] for filt in filters.keys()])
photo_obs_model['observed_flux_err'] = np.array([obj[filt+'_err'] for filt in filters.keys()])
photo_obs_model.write(outdir+"/photo_observed_model_"+str(model['id'])+".dat",format="ascii",overwrite=True)
#import pdb; pdb.set_trace()
return
def host_run(host, cut_photom=None, cigale_file=None):
"""
Run CIGALE on an FRBGalaxy's photometry
and store results in a folder with the
FRBGalaxy's name.
Args
----
photom (astropy Table): Table containing
galaxy photometry. Table columns
must be in the format '<SOURCE>_<BAND>'
and '<SOURCE>_<BAND>_err'.
e.g. SDSS_u, SDSS_u_err, Pan-STARRS_g
host (FRBGalaxy): A host galaxy.
cigale_file (str, optional): Name of main
CIGALE output file. Must be in the format
`<something>_CIGALE.fits`. No file is
renamed if nothing is provided.
"""
cigale_tbl = Table()
cigale_tbl['z'] = [host.z]
cigale_tbl['ID'] = host.name
# Deal with photometry
if cut_photom is not None:
photom_obj = cut_photom
else:
photom_obj = host.photom
for key in photom_obj.keys():
cigale_tbl[key] = photom_obj[key]
# Run
run(cigale_tbl, 'z', outdir=host.name, compare_obs_model=True, idcol='ID')
# Rename/move
if cigale_file is not None:
os.system('cp -rp {:s}/results.fits {:s}'.format(host.name, cigale_file))
model_file = cigale_file.replace('CIGALE', 'CIGALE_model')
os.system('cp -rp {:s}/{:s}_best_model.fits {:s}'.format(host.name, host.name, model_file))
photo_file = cigale_file.replace('CIGALE.fits', 'CIGALE_photo.dat')
os.system('cp -rp {:s}/photo_observed_model_{:s}.dat {:s}'.format(host.name, host.name, photo_file))
# SFH
sfh_file = cigale_file.replace('CIGALE', 'CIGALE_SFH')
os.system('mv {:s}/{:s}_SFH.fits {:s}'.format(host.name, host.name, sfh_file))
return | FRBs/DM | frb/galaxies/cigale.py | Python | bsd-3-clause | 16,473 | [
"Galaxy"
] | 125b01957b080658309490fb19b625d71d28ad0818eef008f3a1d346fd746ac6 |
# proxy module
from __future__ import absolute_import
from mayavi.filters.quadric_decimation import *
| enthought/etsproxy | enthought/mayavi/filters/quadric_decimation.py | Python | bsd-3-clause | 102 | [
"Mayavi"
] | 5541a5ac84a43ec1ea8b60176e9b3d89814436d9df17ff91697a7b95cf25053d |
"""
Actions manager for transcripts ajax calls.
+++++++++++++++++++++++++++++++++++++++++++
Module do not support rollback (pressing "Cancel" button in Studio)
All user changes are saved immediately.
"""
import copy
import os
import logging
import json
import requests
from django.http import HttpResponse, Http404
from django.core.exceptions import PermissionDenied
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.utils.translation import ugettext as _
from opaque_keys import InvalidKeyError
from xmodule.contentstore.content import StaticContent
from xmodule.exceptions import NotFoundError
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import UsageKey
from xmodule.contentstore.django import contentstore
from xmodule.modulestore.exceptions import ItemNotFoundError
from util.json_request import JsonResponse
from xmodule.video_module.transcripts_utils import (
generate_subs_from_source,
generate_srt_from_sjson, remove_subs_from_store,
download_youtube_subs, get_transcripts_from_youtube,
copy_or_rename_transcript,
manage_video_subtitles_save,
GetTranscriptsFromYouTubeException,
TranscriptsRequestValidationException,
youtube_video_transcript_name,
)
from student.auth import has_course_author_access
__all__ = [
'upload_transcripts',
'download_transcripts',
'check_transcripts',
'choose_transcripts',
'replace_transcripts',
'rename_transcripts',
'save_transcripts',
]
log = logging.getLogger(__name__)
def error_response(response, message, status_code=400):
"""
Simplify similar actions: log message and return JsonResponse with message included in response.
By default return 400 (Bad Request) Response.
"""
log.debug(message)
response['status'] = message
return JsonResponse(response, status_code)
@login_required
def upload_transcripts(request):
"""
Upload transcripts for current module.
returns: response dict::
status: 'Success' and HTTP 200 or 'Error' and HTTP 400.
subs: Value of uploaded and saved html5 sub field in video item.
"""
response = {
'status': 'Unknown server error',
'subs': '',
}
locator = request.POST.get('locator')
if not locator:
return error_response(response, 'POST data without "locator" form data.')
try:
item = _get_item(request, request.POST)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
if 'transcript-file' not in request.FILES:
return error_response(response, 'POST data without "file" form data.')
video_list = request.POST.get('video_list')
if not video_list:
return error_response(response, 'POST data without video names.')
try:
video_list = json.loads(video_list)
except ValueError:
return error_response(response, 'Invalid video_list JSON.')
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
source_subs_filedata = request.FILES['transcript-file'].read().decode('utf-8-sig')
source_subs_filename = request.FILES['transcript-file'].name
if '.' not in source_subs_filename:
return error_response(response, "Undefined file extension.")
basename = os.path.basename(source_subs_filename)
source_subs_name = os.path.splitext(basename)[0]
source_subs_ext = os.path.splitext(basename)[1][1:]
if item.category != 'video':
return error_response(response, 'Transcripts are supported only for "video" modules.')
# Allow upload only if any video link is presented
if video_list:
sub_attr = source_subs_name
try:
# Generate and save for 1.0 speed, will create subs_sub_attr.srt.sjson subtitles file in storage.
generate_subs_from_source({1: sub_attr}, source_subs_ext, source_subs_filedata, item)
for video_dict in video_list:
video_name = video_dict['video']
# We are creating transcripts for every video source, if in future some of video sources would be deleted.
# Updates item.sub with `video_name` on success.
copy_or_rename_transcript(video_name, sub_attr, item, user=request.user)
response['subs'] = item.sub
response['status'] = 'Success'
except Exception as ex:
return error_response(response, ex.message)
else:
return error_response(response, 'Empty video sources.')
return JsonResponse(response)
@login_required
def download_transcripts(request):
"""
Passes to user requested transcripts file.
Raises Http404 if unsuccessful.
"""
locator = request.GET.get('locator')
if not locator:
log.debug('GET data without "locator" property.')
raise Http404
try:
item = _get_item(request, request.GET)
except (InvalidKeyError, ItemNotFoundError):
log.debug("Can't find item by locator.")
raise Http404
subs_id = request.GET.get('subs_id')
if not subs_id:
log.debug('GET data without "subs_id" property.')
raise Http404
if item.category != 'video':
log.debug('transcripts are supported only for video" modules.')
raise Http404
filename = u'subs_{0}.srt.sjson'.format(subs_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
sjson_transcripts = contentstore().find(content_location)
log.debug("Downloading subs for %s id", subs_id)
str_subs = generate_srt_from_sjson(json.loads(sjson_transcripts.data), speed=1.0)
if not str_subs:
log.debug('generate_srt_from_sjson produces no subtitles')
raise Http404
response = HttpResponse(str_subs, content_type='application/x-subrip')
response['Content-Disposition'] = 'attachment; filename="{0}.srt"'.format(subs_id)
return response
except NotFoundError:
log.debug("Can't find content in storage for %s subs", subs_id)
raise Http404
@login_required
def check_transcripts(request):
"""
Check state of transcripts availability.
request.GET['data'] has key `videos`, which can contain any of the following::
[
{u'type': u'youtube', u'video': u'OEoXaMPEzfM', u'mode': u'youtube'},
{u'type': u'html5', u'video': u'video1', u'mode': u'mp4'}
{u'type': u'html5', u'video': u'video2', u'mode': u'webm'}
]
`type` is youtube or html5
`video` is html5 or youtube video_id
`mode` is youtube, ,p4 or webm
Returns transcripts_presence dict::
html5_local: list of html5 ids, if subtitles exist locally for them;
is_youtube_mode: bool, if we have youtube_id, and as youtube mode is of higher priority, reflect this with flag;
youtube_local: bool, if youtube transcripts exist locally;
youtube_server: bool, if youtube transcripts exist on server;
youtube_diff: bool, if youtube transcripts exist on youtube server, and are different from local youtube ones;
current_item_subs: string, value of item.sub field;
status: string, 'Error' or 'Success';
subs: string, new value of item.sub field, that should be set in module;
command: string, action to front-end what to do and what to show to user.
"""
transcripts_presence = {
'html5_local': [],
'html5_equal': False,
'is_youtube_mode': False,
'youtube_local': False,
'youtube_server': False,
'youtube_diff': True,
'current_item_subs': None,
'status': 'Error',
}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(transcripts_presence, e.message)
transcripts_presence['status'] = 'Success'
filename = 'subs_{0}.srt.sjson'.format(item.sub)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['current_item_subs'] = item.sub
except NotFoundError:
pass
# Check for youtube transcripts presence
youtube_id = videos.get('youtube', None)
if youtube_id:
transcripts_presence['is_youtube_mode'] = True
# youtube local
filename = 'subs_{0}.srt.sjson'.format(youtube_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
local_transcripts = contentstore().find(content_location).data
transcripts_presence['youtube_local'] = True
except NotFoundError:
log.debug("Can't find transcripts in storage for youtube id: %s", youtube_id)
# youtube server
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
youtube_response = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if youtube_response.status_code == 200 and youtube_response.text:
transcripts_presence['youtube_server'] = True
#check youtube local and server transcripts for equality
if transcripts_presence['youtube_server'] and transcripts_presence['youtube_local']:
try:
youtube_server_subs = get_transcripts_from_youtube(
youtube_id,
settings,
item.runtime.service(item, "i18n")
)
if json.loads(local_transcripts) == youtube_server_subs: # check transcripts for equality
transcripts_presence['youtube_diff'] = False
except GetTranscriptsFromYouTubeException:
pass
# Check for html5 local transcripts presence
html5_subs = []
for html5_id in videos['html5']:
filename = 'subs_{0}.srt.sjson'.format(html5_id)
content_location = StaticContent.compute_location(item.location.course_key, filename)
try:
html5_subs.append(contentstore().find(content_location).data)
transcripts_presence['html5_local'].append(html5_id)
except NotFoundError:
log.debug("Can't find transcripts in storage for non-youtube video_id: %s", html5_id)
if len(html5_subs) == 2: # check html5 transcripts for equality
transcripts_presence['html5_equal'] = json.loads(html5_subs[0]) == json.loads(html5_subs[1])
command, subs_to_use = _transcripts_logic(transcripts_presence, videos)
transcripts_presence.update({
'command': command,
'subs': subs_to_use,
})
return JsonResponse(transcripts_presence)
def _transcripts_logic(transcripts_presence, videos):
"""
By `transcripts_presence` content, figure what show to user:
returns: `command` and `subs`.
`command`: string, action to front-end what to do and what show to user.
`subs`: string, new value of item.sub field, that should be set in module.
`command` is one of::
replace: replace local youtube subtitles with server one's
found: subtitles are found
import: import subtitles from youtube server
choose: choose one from two html5 subtitles
not found: subtitles are not found
"""
command = None
# new value of item.sub field, that should be set in module.
subs = ''
# youtube transcripts are of high priority than html5 by design
if (
transcripts_presence['youtube_diff'] and
transcripts_presence['youtube_local'] and
transcripts_presence['youtube_server']): # youtube server and local exist
command = 'replace'
subs = videos['youtube']
elif transcripts_presence['youtube_local']: # only youtube local exist
command = 'found'
subs = videos['youtube']
elif transcripts_presence['youtube_server']: # only youtube server exist
command = 'import'
else: # html5 part
if transcripts_presence['html5_local']: # can be 1 or 2 html5 videos
if len(transcripts_presence['html5_local']) == 1 or transcripts_presence['html5_equal']:
command = 'found'
subs = transcripts_presence['html5_local'][0]
else:
command = 'choose'
subs = transcripts_presence['html5_local'][0]
else: # html5 source have no subtitles
# check if item sub has subtitles
if transcripts_presence['current_item_subs'] and not transcripts_presence['is_youtube_mode']:
log.debug("Command is use existing %s subs", transcripts_presence['current_item_subs'])
command = 'use_existing'
else:
command = 'not_found'
log.debug(
"Resulted command: %s, current transcripts: %s, youtube mode: %s",
command,
transcripts_presence['current_item_subs'],
transcripts_presence['is_youtube_mode']
)
return command, subs
@login_required
def choose_transcripts(request):
"""
Replaces html5 subtitles, presented for both html5 sources, with chosen one.
Code removes rejected html5 subtitles and updates sub attribute with chosen html5_id.
It does nothing with youtube id's.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {
'status': 'Error',
'subs': '',
}
try:
data, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, e.message)
html5_id = data.get('html5_id') # html5_id chosen by user
# find rejected html5_id and remove appropriate subs from store
html5_id_to_remove = [x for x in videos['html5'] if x != html5_id]
if html5_id_to_remove:
remove_subs_from_store(html5_id_to_remove, item)
if item.sub != html5_id: # update sub value
item.sub = html5_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
@login_required
def replace_transcripts(request):
"""
Replaces all transcripts with youtube ones.
Downloads subtitles from youtube and replaces all transcripts with downloaded ones.
Returns: status `Success` and resulted item.sub value or status `Error` and HTTP 400.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, e.message)
youtube_id = videos['youtube']
if not youtube_id:
return error_response(response, 'YouTube id {} is not presented in request data.'.format(youtube_id))
try:
download_youtube_subs(youtube_id, item, settings)
except GetTranscriptsFromYouTubeException as e:
return error_response(response, e.message)
item.sub = youtube_id
item.save_with_metadata(request.user)
response = {
'status': 'Success',
'subs': item.sub,
}
return JsonResponse(response)
def _validate_transcripts_data(request):
"""
Validates, that request contains all proper data for transcripts processing.
Returns tuple of 3 elements::
data: dict, loaded json from request,
videos: parsed `data` to useful format,
item: video item from storage
Raises `TranscriptsRequestValidationException` if validation is unsuccessful
or `PermissionDenied` if user has no access.
"""
data = json.loads(request.GET.get('data', '{}'))
if not data:
raise TranscriptsRequestValidationException(_('Incoming video data is empty.'))
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
raise TranscriptsRequestValidationException(_("Can't find item by locator."))
if item.category != 'video':
raise TranscriptsRequestValidationException(_('Transcripts are supported only for "video" modules.'))
# parse data form request.GET.['data']['video'] to useful format
videos = {'youtube': '', 'html5': {}}
for video_data in data.get('videos'):
if video_data['type'] == 'youtube':
videos['youtube'] = video_data['video']
else: # do not add same html5 videos
if videos['html5'].get('video') != video_data['video']:
videos['html5'][video_data['video']] = video_data['mode']
return data, videos, item
@login_required
def rename_transcripts(request):
"""
Create copies of existing subtitles with new names of HTML5 sources.
Old subtitles are not deleted now, because we do not have rollback functionality.
If succeed, Item.sub will be chosen randomly from html5 video sources provided by front-end.
"""
response = {'status': 'Error', 'subs': ''}
try:
__, videos, item = _validate_transcripts_data(request)
except TranscriptsRequestValidationException as e:
return error_response(response, e.message)
old_name = item.sub
for new_name in videos['html5'].keys(): # copy subtitles for every HTML5 source
try:
# updates item.sub with new_name if it is successful.
copy_or_rename_transcript(new_name, old_name, item, user=request.user)
except NotFoundError:
# subtitles file `item.sub` is not presented in the system. Nothing to copy or rename.
error_response(response, "Can't find transcripts in storage for {}".format(old_name))
response['status'] = 'Success'
response['subs'] = item.sub # item.sub has been changed, it is not equal to old_name.
log.debug("Updated item.sub to %s", item.sub)
return JsonResponse(response)
@login_required
def save_transcripts(request):
"""
Saves video module with updated values of fields.
Returns: status `Success` or status `Error` and HTTP 400.
"""
response = {'status': 'Error'}
data = json.loads(request.GET.get('data', '{}'))
if not data:
return error_response(response, 'Incoming video data is empty.')
try:
item = _get_item(request, data)
except (InvalidKeyError, ItemNotFoundError):
return error_response(response, "Can't find item by locator.")
metadata = data.get('metadata')
if metadata is not None:
new_sub = metadata.get('sub')
for metadata_key, value in metadata.items():
setattr(item, metadata_key, value)
item.save_with_metadata(request.user) # item becomes updated with new values
if new_sub:
manage_video_subtitles_save(item, request.user)
else:
# If `new_sub` is empty, it means that user explicitly does not want to use
# transcripts for current video ids and we remove all transcripts from storage.
current_subs = data.get('current_subs')
if current_subs is not None:
for sub in current_subs:
remove_subs_from_store(sub, item)
response['status'] = 'Success'
return JsonResponse(response)
def _get_item(request, data):
"""
Obtains from 'data' the locator for an item.
Next, gets that item from the modulestore (allowing any errors to raise up).
Finally, verifies that the user has access to the item.
Returns the item.
"""
usage_key = UsageKey.from_string(data.get('locator'))
# This is placed before has_course_author_access() to validate the location,
# because has_course_author_access() raises r if location is invalid.
item = modulestore().get_item(usage_key)
# use the item's course_key, because the usage_key might not have the run
if not has_course_author_access(request.user, item.location.course_key):
raise PermissionDenied()
return item
| cognitiveclass/edx-platform | cms/djangoapps/contentstore/views/transcripts_ajax.py | Python | agpl-3.0 | 20,397 | [
"FEFF"
] | e3d57c02833fdd41b26919313fd6bec200c9bfa2359d4999cd1ac8caf9aa0a41 |
import pymol
from pymol import cmd, cgo
import numpy as np
def start():
pymol.finish_launching()
def draw_spheres(coords, model, frame, radius=0.5):
spheres=[]
for x in coords.reshape(coords.size/3,3):
spheres.extend([cgo.COLOR, 1.0, 0.0, 0.0])
spheres.extend([cgo.SPHERE, x[0], x[1], x[2], radius])
cmd.load_cgo(spheres, model, frame)
def draw_rigid(coords, model, frame, colour, bondslist=[], radius=0.5):
"""
Use pymol to draw a system of rigid body fragments
Parameters
----------
colour: 3-tuple
RBG colour for the spheres being drawn.
bondslist: list of 2-tuples, optional
List of atom pairs between which bonds should be drawn.
"""
spheres = []
for x in coords.reshape(coords.size/3,3):
spheres.extend([cgo.COLOR, colour[0], colour[1], colour[2]])
spheres.extend([cgo.SPHERE, x[0], x[1], x[2], radius])
coords = coords.reshape(coords.size/3,3)
Rcyl = .1
for i in bondslist:
spheres.extend([cgo.CYLINDER, coords[i[0]][0], coords[i[0]][1], coords[i[0]][2],
coords[i[1]][0], coords[i[1]][1], coords[i[1]][2],
Rcyl , 255., 255., 255. , 0., 0., 0.])
cmd.load_cgo(spheres, model, frame)
def draw_box(boxvec, model, frame):
"""
Draw a box around the system to easily visualise periodic boundaries
Parameters
----------
boxvec: np.array
The dimensions of the periodic box
"""
box = []
Rcyl = .1
box.extend([cgo.CYLINDER, -0.5*boxvec[0], -0.5*boxvec[1], -0.5*boxvec[2], 0.5*boxvec[0], -0.5*boxvec[1],
-0.5*boxvec[2], Rcyl, 1., 0., 0., 0., 0., 0.])
box.extend([cgo.CYLINDER, -0.5*boxvec[0], -0.5*boxvec[1], -0.5*boxvec[2], -0.5*boxvec[0], 0.5*boxvec[1],
-0.5*boxvec[2], Rcyl , 0., 1., 0. , 0., 0., 0.])
box.extend([cgo.CYLINDER, -0.5*boxvec[0], -0.5*boxvec[1], -0.5*boxvec[2], -0.5*boxvec[0], -0.5*boxvec[1],
0.5*boxvec[2], Rcyl , 0., 0., 1. , 0., 0., 0.])
box.extend([cgo.CYLINDER, 0.5*boxvec[0], -0.5*boxvec[1], -0.5*boxvec[2], 0.5*boxvec[0], 0.5*boxvec[1],
-0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, 0.5*boxvec[0], -0.5*boxvec[1], -0.5*boxvec[2], 0.5*boxvec[0], -0.5*boxvec[1],
0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, -0.5*boxvec[0], 0.5*boxvec[1], -0.5*boxvec[2], 0.5*boxvec[0], 0.5*boxvec[1],
-0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, -0.5*boxvec[0], 0.5*boxvec[1], -0.5*boxvec[2], -0.5*boxvec[0], 0.5*boxvec[1],
0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, -0.5*boxvec[0], -0.5*boxvec[1], 0.5*boxvec[2], 0.5*boxvec[0], -0.5*boxvec[1],
0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, -0.5*boxvec[0], -0.5*boxvec[1], 0.5*boxvec[2], -0.5*boxvec[0], 0.5*boxvec[1],
0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, 0.5*boxvec[0], 0.5*boxvec[1], 0.5*boxvec[2], 0.5*boxvec[0], 0.5*boxvec[1],
-0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, 0.5*boxvec[0], 0.5*boxvec[1], 0.5*boxvec[2], -0.5*boxvec[0], 0.5*boxvec[1],
0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
box.extend([cgo.CYLINDER, 0.5*boxvec[0], 0.5*boxvec[1], 0.5*boxvec[2], 0.5*boxvec[0], -0.5*boxvec[1],
0.5*boxvec[2], Rcyl , 255., 255., 255. , 0., 0., 0.])
cmd.load_cgo(box, model, frame)
| khs26/pele | pele/utils/pymolwrapper.py | Python | gpl-3.0 | 3,813 | [
"PyMOL"
] | ae035f601655f395bc071fed62da39b5da93cd1c7f5eb3cb8ba97c0a811678aa |
#!/usr/bin/env python
"""
Commands related to syncing copytext from Google Docs.
"""
import app_config
import os
from fabric.api import task
from oauth import get_document, get_credentials
from termcolor import colored
@task(default=True)
def update():
"""
Downloads a Google Doc as an Excel file.
"""
if app_config.COPY_GOOGLE_DOC_KEY == None:
print colored('You have set COPY_GOOGLE_DOC_KEY to None. If you want to use a Google Sheet, set COPY_GOOGLE_DOC_KEY to the key of your sheet in app_config.py', 'blue')
return
credentials = get_credentials()
if not credentials:
print colored('No Google OAuth credentials file found.', 'yellow')
print colored('Run `fab app` and visit `http://localhost:8000` to generate credentials.', 'yellow')
return
get_document(app_config.COPY_GOOGLE_DOC_KEY, app_config.COPY_PATH)
| TylerFisher/birdseye | template/fabfile/text.py | Python | mit | 888 | [
"VisIt"
] | 5faf4fff3e9e030865fe9b3d50b30bfadb5d036696785f0799caa923f60ffc8c |
# basic post processing hooks
import re
import pandas
from snakemakelib.odo import cutadapt
from snakemakelib_oliver.odo import fastqc, bowtie2, htseq
__all__ = ['qc_fastqc_summary_hook',
'qc_cutadapt_post_processing_hook',
'qc_bowtie2_post_processing_hook',
'qc_htseq_post_processing_hook']
def get_base(x):
return re.sub('\_fastqc.*', '', x)
def qc_fastqc_summary_hook(df, **kwargs):
df['PU'] = df['PU'].map(get_base)
df.drop('PlatformUnit', axis=1)
df_wide = df.reset_index().pivot_table(values=["flag"], index=["SM", "PU"], columns="statistic", aggfunc=lambda x: x)
df_wide.columns = df_wide.columns.droplevel()
return df_wide
def qc_cutadapt_post_processing_hook(df, **kwargs):
df_wide = df.reset_index().pivot_table(values=["value"], index=["SM", "PU", "PlatformUnit"], columns="statistic")
df_wide.columns = df_wide.columns.droplevel()
df_wide["Reads percent"] = 100.0 * df_wide["Reads with adapters"] /\
df_wide["Total reads processed"]
df = df_wide.stack()
df.name = "value"
return df
def qc_bowtie2_post_processing_hook(df, **kwargs):
df_wide = df.reset_index().pivot_table(values=['counts'], index=["SM", "PU"], columns="statistic")
df_wide.columns = df_wide.columns.droplevel()
df_wide['Percent uniquely aligned'] = 100 * df_wide['Number Uniquely Aligned'] / df_wide['Number of Reads']
return df_wide
def qc_htseq_post_processing_hook(df, **kwargs):
df_wide = df.reset_index().pivot_table(values=['count'], index=["FBgn"], columns="SM")
#df_wide.columns = df_wide.columns.droplevel()
return df
| Oliver-Lab/snakemakelib-oliver | snakemakelib_oliver/workflows/qc/app/hooks.py | Python | mit | 1,635 | [
"HTSeq"
] | 36fd9bc437524626cd980bd1ab4a58c562e8bbf02124cd8c0f5d1ada5a3e1edd |
# Copyright(C) 2011, 2015, 2018 by
# Ben Edwards <bedwards@cs.unm.edu>
# Aric Hagberg <hagberg@lanl.gov>
# Konstantinos Karakatsanis <dinoskarakas@gmail.com>
# All rights reserved.
# BSD license.
#
# Authors: Ben Edwards (bedwards@cs.unm.edu)
# Aric Hagberg (hagberg@lanl.gov)
# Konstantinos Karakatsanis (dinoskarakas@gmail.com)
# Jean-Gabriel Young (jean.gabriel.young@gmail.com)
"""Generators for classes of graphs used in studying social networks."""
from __future__ import division
import itertools
import math
import networkx as nx
from networkx.utils import py_random_state
__all__ = ['caveman_graph', 'connected_caveman_graph',
'relaxed_caveman_graph', 'random_partition_graph',
'planted_partition_graph', 'gaussian_random_partition_graph',
'ring_of_cliques', 'windmill_graph', 'stochastic_block_model']
def caveman_graph(l, k):
"""Returns a caveman graph of `l` cliques of size `k`.
Parameters
----------
l : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.caveman_graph(3, 3)
See also
--------
connected_caveman_graph
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
# l disjoint cliques of size k
G = nx.empty_graph(l * k)
if k > 1:
for start in range(0, l * k, k):
edges = itertools.combinations(range(start, start + k), 2)
G.add_edges_from(edges)
return G
def connected_caveman_graph(l, k):
"""Returns a connected caveman graph of `l` cliques of size `k`.
The connected caveman graph is formed by creating `n` cliques of size
`k`, then a single edge in each clique is rewired to a node in an
adjacent clique.
Parameters
----------
l : int
number of cliques
k : int
size of cliques
Returns
-------
G : NetworkX Graph
connected caveman graph
Notes
-----
This returns an undirected graph, it can be converted to a directed
graph using :func:`nx.to_directed`, or a multigraph using
``nx.MultiGraph(nx.caveman_graph(l, k))``. Only the undirected version is
described in [1]_ and it is unclear which of the directed
generalizations is most useful.
Examples
--------
>>> G = nx.connected_caveman_graph(3, 3)
References
----------
.. [1] Watts, D. J. 'Networks, Dynamics, and the Small-World Phenomenon.'
Amer. J. Soc. 105, 493-527, 1999.
"""
G = nx.caveman_graph(l, k)
for start in range(0, l * k, k):
G.remove_edge(start, start + 1)
G.add_edge(start, (start - 1) % (l * k))
return G
@py_random_state(3)
def relaxed_caveman_graph(l, k, p, seed=None):
"""Return a relaxed caveman graph.
A relaxed caveman graph starts with `l` cliques of size `k`. Edges are
then randomly rewired with probability `p` to link different cliques.
Parameters
----------
l : int
Number of groups
k : int
Size of cliques
p : float
Probabilty of rewiring each edge.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph
Relaxed Caveman Graph
Raises
------
NetworkXError:
If p is not in [0,1]
Examples
--------
>>> G = nx.relaxed_caveman_graph(2, 3, 0.1, seed=42)
References
----------
.. [1] Santo Fortunato, Community Detection in Graphs,
Physics Reports Volume 486, Issues 3-5, February 2010, Pages 75-174.
https://arxiv.org/abs/0906.0612
"""
G = nx.caveman_graph(l, k)
nodes = list(G)
for (u, v) in G.edges():
if seed.random() < p: # rewire the edge
x = seed.choice(nodes)
if G.has_edge(u, x):
continue
G.remove_edge(u, v)
G.add_edge(u, x)
return G
@py_random_state(3)
def random_partition_graph(sizes, p_in, p_out, seed=None, directed=False):
"""Return the random partition graph with a partition of sizes.
A partition graph is a graph of communities with sizes defined by
s in sizes. Nodes in the same group are connected with probability
p_in and nodes of different groups are connected with probability
p_out.
Parameters
----------
sizes : list of ints
Sizes of groups
p_in : float
probability of edges with in groups
p_out : float
probability of edges between groups
directed : boolean optional, default=False
Whether to create a directed graph
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph or DiGraph
random partition graph of size sum(gs)
Raises
------
NetworkXError
If p_in or p_out is not in [0,1]
Examples
--------
>>> G = nx.random_partition_graph([10,10,10],.25,.01)
>>> len(G)
30
>>> partition = G.graph['partition']
>>> len(partition)
3
Notes
-----
This is a generalization of the planted-l-partition described in
[1]_. It allows for the creation of groups of any size.
The partition is store as a graph attribute 'partition'.
References
----------
.. [1] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
"""
# Use geometric method for O(n+m) complexity algorithm
# partition = nx.community_sets(nx.get_node_attributes(G, 'affiliation'))
if not 0.0 <= p_in <= 1.0:
raise nx.NetworkXError("p_in must be in [0,1]")
if not 0.0 <= p_out <= 1.0:
raise nx.NetworkXError("p_out must be in [0,1]")
# create connection matrix
num_blocks = len(sizes)
p = [[p_out for s in range(num_blocks)] for r in range(num_blocks)]
for r in range(num_blocks):
p[r][r] = p_in
return stochastic_block_model(sizes, p, nodelist=None, seed=seed,
directed=directed, selfloops=False,
sparse=True)
@py_random_state(4)
def planted_partition_graph(l, k, p_in, p_out, seed=None, directed=False):
"""Return the planted l-partition graph.
This model partitions a graph with n=l*k vertices in
l groups with k vertices each. Vertices of the same
group are linked with a probability p_in, and vertices
of different groups are linked with probability p_out.
Parameters
----------
l : int
Number of groups
k : int
Number of vertices in each group
p_in : float
probability of connecting vertices within a group
p_out : float
probability of connected vertices between groups
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : bool,optional (default=False)
If True return a directed graph
Returns
-------
G : NetworkX Graph or DiGraph
planted l-partition graph
Raises
------
NetworkXError:
If p_in,p_out are not in [0,1] or
Examples
--------
>>> G = nx.planted_partition_graph(4, 3, 0.5, 0.1, seed=42)
See Also
--------
random_partition_model
References
----------
.. [1] A. Condon, R.M. Karp, Algorithms for graph partitioning
on the planted partition model,
Random Struct. Algor. 18 (2001) 116-140.
.. [2] Santo Fortunato 'Community Detection in Graphs' Physical Reports
Volume 486, Issue 3-5 p. 75-174. https://arxiv.org/abs/0906.0612
"""
return random_partition_graph([k] * l, p_in, p_out, seed, directed)
@py_random_state(6)
def gaussian_random_partition_graph(n, s, v, p_in, p_out, directed=False,
seed=None):
"""Generate a Gaussian random partition graph.
A Gaussian random partition graph is created by creating k partitions
each with a size drawn from a normal distribution with mean s and variance
s/v. Nodes are connected within clusters with probability p_in and
between clusters with probability p_out[1]
Parameters
----------
n : int
Number of nodes in the graph
s : float
Mean cluster size
v : float
Shape parameter. The variance of cluster size distribution is s/v.
p_in : float
Probabilty of intra cluster connection.
p_out : float
Probability of inter cluster connection.
directed : boolean, optional default=False
Whether to create a directed graph or not
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
G : NetworkX Graph or DiGraph
gaussian random partition graph
Raises
------
NetworkXError
If s is > n
If p_in or p_out is not in [0,1]
Notes
-----
Note the number of partitions is dependent on s,v and n, and that the
last partition may be considerably smaller, as it is sized to simply
fill out the nodes [1]
See Also
--------
random_partition_graph
Examples
--------
>>> G = nx.gaussian_random_partition_graph(100,10,10,.25,.1)
>>> len(G)
100
References
----------
.. [1] Ulrik Brandes, Marco Gaertler, Dorothea Wagner,
Experiments on Graph Clustering Algorithms,
In the proceedings of the 11th Europ. Symp. Algorithms, 2003.
"""
if s > n:
raise nx.NetworkXError("s must be <= n")
assigned = 0
sizes = []
while True:
size = int(seed.gauss(s, float(s) / v + 0.5))
if size < 1: # how to handle 0 or negative sizes?
continue
if assigned + size >= n:
sizes.append(n - assigned)
break
assigned += size
sizes.append(size)
return random_partition_graph(sizes, p_in, p_out, directed, seed)
def ring_of_cliques(num_cliques, clique_size):
"""Defines a "ring of cliques" graph.
A ring of cliques graph is consisting of cliques, connected through single
links. Each clique is a complete graph.
Parameters
----------
num_cliques : int
Number of cliques
clique_size : int
Size of cliques
Returns
-------
G : NetworkX Graph
ring of cliques graph
Raises
------
NetworkXError
If the number of cliques is lower than 2 or
if the size of cliques is smaller than 2.
Examples
--------
>>> G = nx.ring_of_cliques(8, 4)
See Also
--------
connected_caveman_graph
Notes
-----
The `connected_caveman_graph` graph removes a link from each clique to
connect it with the next clique. Instead, the `ring_of_cliques` graph
simply adds the link without removing any link from the cliques.
"""
if num_cliques < 2:
raise nx.NetworkXError('A ring of cliques must have at least '
'two cliques')
if clique_size < 2:
raise nx.NetworkXError('The cliques must have at least two nodes')
G = nx.Graph()
for i in range(num_cliques):
edges = itertools.combinations(range(i * clique_size, i * clique_size +
clique_size), 2)
G.add_edges_from(edges)
G.add_edge(i * clique_size + 1, (i + 1) * clique_size %
(num_cliques * clique_size))
return G
def windmill_graph(n, k):
"""Generate a windmill graph.
A windmill graph is a graph of `n` cliques each of size `k` that are all
joined at one node.
It can be thought of as taking a disjoint union of `n` cliques of size `k`,
selecting one point from each, and contracting all of the selected points.
Alternatively, one could generate `n` cliques of size `k-1` and one node
that is connected to all other nodes in the graph.
Parameters
----------
n : int
Number of cliques
k : int
Size of cliques
Returns
-------
G : NetworkX Graph
windmill graph with n cliques of size k
Raises
------
NetworkXError
If the number of cliques is less than two
If the size of the cliques are less than two
Examples
--------
>>> G = nx.windmill_graph(4, 5)
Notes
-----
The node labeled `0` will be the node connected to all other nodes.
Note that windmill graphs are usually denoted `Wd(k,n)`, so the parameters
are in the opposite order as the parameters of this method.
"""
if n < 2:
msg = 'A windmill graph must have at least two cliques'
raise nx.NetworkXError(msg)
if k < 2:
raise nx.NetworkXError('The cliques must have at least two nodes')
G = nx.disjoint_union_all(itertools.chain([nx.complete_graph(k)],
(nx.complete_graph(k - 1)
for _ in range(n - 1))))
G.add_edges_from((0, i) for i in range(k, G.number_of_nodes()))
return G
@py_random_state(3)
def stochastic_block_model(sizes, p, nodelist=None, seed=None,
directed=False, selfloops=False, sparse=True):
"""Return a stochastic block model graph.
This model partitions the nodes in blocks of arbitrary sizes, and places
edges between pairs of nodes independently, with a probability that depends
on the blocks.
Parameters
----------
sizes : list of ints
Sizes of blocks
p : list of list of floats
Element (r,s) gives the density of edges going from the nodes
of group r to nodes of group s.
p must match the number of groups (len(sizes) == len(p)),
and it must be symmetric if the graph is undirected.
nodelist : list, optional
The block tags are assigned according to the node identifiers
in nodelist. If nodelist is None, then the ordering is the
range [0,sum(sizes)-1].
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
directed : boolean optional, default=False
Whether to create a directed graph or not.
selfloops : boolean optional, default=False
Whether to include self-loops or not.
sparse: boolean optional, default=True
Use the sparse heuristic to speed up the generator.
Returns
-------
g : NetworkX Graph or DiGraph
Stochastic block model graph of size sum(sizes)
Raises
------
NetworkXError
If probabilities are not in [0,1].
If the probability matrix is not square (directed case).
If the probability matrix is not symmetric (undirected case).
If the sizes list does not match nodelist or the probability matrix.
If nodelist contains duplicate.
Examples
--------
>>> sizes = [75, 75, 300]
>>> probs = [[0.25, 0.05, 0.02],
... [0.05, 0.35, 0.07],
... [0.02, 0.07, 0.40]]
>>> g = nx.stochastic_block_model(sizes, probs, seed=0)
>>> len(g)
450
>>> H = nx.quotient_graph(g, g.graph['partition'], relabel=True)
>>> for v in H.nodes(data=True):
... print(round(v[1]['density'], 3))
...
0.245
0.348
0.405
>>> for v in H.edges(data=True):
... print(round(1.0 * v[2]['weight'] / (sizes[v[0]] * sizes[v[1]]), 3))
...
0.051
0.022
0.07
See Also
--------
random_partition_graph
planted_partition_graph
gaussian_random_partition_graph
gnp_random_graph
References
----------
.. [1] Holland, P. W., Laskey, K. B., & Leinhardt, S.,
"Stochastic blockmodels: First steps",
Social networks, 5(2), 109-137, 1983.
"""
# Check if dimensions match
if len(sizes) != len(p):
raise nx.NetworkXException("'sizes' and 'p' do not match.")
# Check for probability symmetry (undirected) and shape (directed)
for row in p:
if len(p) != len(row):
raise nx.NetworkXException("'p' must be a square matrix.")
if not directed:
p_transpose = [list(i) for i in zip(*p)]
for i in zip(p, p_transpose):
for j in zip(i[0], i[1]):
if abs(j[0] - j[1]) > 1e-08:
raise nx.NetworkXException("'p' must be symmetric.")
# Check for probability range
for row in p:
for prob in row:
if prob < 0 or prob > 1:
raise nx.NetworkXException("Entries of 'p' not in [0,1].")
# Check for nodelist consistency
if nodelist is not None:
if len(nodelist) != sum(sizes):
raise nx.NetworkXException("'nodelist' and 'sizes' do not match.")
if len(nodelist) != len(set(nodelist)):
raise nx.NetworkXException("nodelist contains duplicate.")
else:
nodelist = range(0, sum(sizes))
# Setup the graph conditionally to the directed switch.
block_range = range(len(sizes))
if directed:
g = nx.DiGraph()
block_iter = itertools.product(block_range, block_range)
else:
g = nx.Graph()
block_iter = itertools.combinations_with_replacement(block_range, 2)
# Split nodelist in a partition (list of sets).
size_cumsum = [sum(sizes[0:x]) for x in range(0, len(sizes) + 1)]
g.graph['partition'] = [set(nodelist[size_cumsum[x]:size_cumsum[x + 1]])
for x in range(0, len(size_cumsum) - 1)]
# Setup nodes and graph name
for block_id, nodes in enumerate(g.graph['partition']):
for node in nodes:
g.add_node(node, block=block_id)
g.name = "stochastic_block_model"
# Test for edge existence
parts = g.graph['partition']
for i, j in block_iter:
if i == j:
if directed:
if selfloops:
edges = itertools.product(parts[i], parts[i])
else:
edges = itertools.permutations(parts[i], 2)
else:
edges = itertools.combinations(parts[i], 2)
if selfloops:
edges = itertools.chain(edges, zip(parts[i], parts[i]))
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e)
else:
edges = itertools.product(parts[i], parts[j])
if sparse:
if p[i][j] == 1: # Test edges cases p_ij = 0 or 1
for e in edges:
g.add_edge(*e)
elif p[i][j] > 0:
while True:
try:
logrand = math.log(seed.random())
skip = math.floor(logrand / math.log(1 - p[i][j]))
# consume "skip" edges
next(itertools.islice(edges, skip, skip), None)
e = next(edges)
g.add_edge(*e) # __safe
except StopIteration:
break
else:
for e in edges:
if seed.random() < p[i][j]:
g.add_edge(*e) # __safe
return g
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/generators/community.py | Python | gpl-3.0 | 19,874 | [
"Gaussian"
] | 6f823e1ef855e659b02becdcc352a3f6f38093ddc5e142968601dd0d730ef2f9 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
# Python module to manipulate 2D meshes for hydrodynamics purposes
# TODO: Change the following docstring as it is no more up to date
"""
This module contains utility function to manipulate, load, save and
convert surface mesh files used by the hydrodynamics community.
Two numpy arrays are manipulated in this module : vertices and faces.
vertices is the array of nodes coordinates. It is an array of shape (nv, 3) where
nv is the number of nodes in the mesh.
faces is the array of cell connectivities. It is an array of shape (nf, 4) where
nf is the number of cells in the mesh. Not that it has 4 columns as we consider
flat polygonal cells up to 4 edges (quads). Triangles are obtained by repeating
the first node at the end of the cell node ID list.
IMPORTANT NOTE:
IDs of _vertices are internally idexed from 0 in meshmagick. However, several mesh
file format use indexing starting at 1. This different convention might be transparent
to user and 1-indexing may not be present outside the I/O functions
"""
""" Pour activer l'aucompletion meshmagick:
installer argcomplete (conda install argcomplete)
Activer la completion globale:
activate-global-python-argcomplete --dest=/home/<username>/
Ca installe un fichier /home/<username>/python-argcomplete
qu'il faut sourcer dans le .bashrc soit ajotuer la ligne suivante dans le bashrc:
source /home/<username>/python-argcomplete
L'autocompletion devrait etre maintenant active pour meshmagick
"""
# TODO: move meshmagick.py at the root level of the project ?
import os, sys
from datetime import datetime
from time import strftime
import argparse
from meshmagick.mesh import *
from meshmagick import mmio
from meshmagick.mesh_clipper import MeshClipper
from meshmagick import hydrostatics as hs
from meshmagick import densities
from meshmagick import __version__
__year__ = datetime.now().year
__author__ = "Francois Rongere"
__copyright__ = "Copyright 2014-%u, Ecole Centrale de Nantes / D-ICE Engineering" % __year__
__credits__ = "Francois Rongere"
__licence__ = "GPLv3"
__maintainer__ = "Francois Rongere"
__email__ = "Francois.Rongere@dice-engineering.com"
__all__ = ['main']
def list_medium():
return ', '.join(densities.list_medium())
# =======================================================================
# COMMAND LINE USAGE
# =======================================================================
try:
import argcomplete
has_argcomplete = True
except:
has_argcomplete = False
parser = argparse.ArgumentParser(
description=""" -- MESHMAGICK --
A python module and a command line utility to manipulate meshes from
different format used in hydrodynamics as well as for visualization.
The formats currently supported by meshmagick are :
+-----------+------------+-----------------+----------------------+
| File | R: Reading | Software | Keywords |
| extension | W: writing | | |
+===========+============+=================+======================+
| .mar | R/W | NEMOH [#f1]_ | nemoh, mar |
+-----------+------------+-----------------+----------------------+
| .nem | R | NEMOH [#f1]_ | nemoh_mesh, nem |
+-----------+------------+-----------------+----------------------+
| .gdf | R/W | WAMIT [#f2]_ | wamit, gdf |
+-----------+------------+-----------------+----------------------+
| .inp | R | DIODORE [#f3]_ | diodore-inp, inp |
+-----------+------------+-----------------+----------------------+
| .DAT | W | DIODORE [#f3]_ | diodore-dat |
+-----------+------------+-----------------+----------------------+
| .hst | R/W | HYDROSTAR [#f4]_| hydrostar, hst |
+-----------+------------+-----------------+----------------------+
| .nat | R/W | - | natural, nat |
+-----------+------------+-----------------+----------------------+
| .msh | R | GMSH [#f5]_ | gmsh, msh |
+-----------+------------+-----------------+----------------------+
| .rad | R | RADIOSS | rad, radioss |
+-----------+------------+-----------------+----------------------+
| .stl | R/W | - | stl |
+-----------+------------+-----------------+----------------------+
| .vtu | R/W | PARAVIEW [#f6]_ | vtu |
+-----------+------------+-----------------+----------------------+
| .vtp | R/W | PARAVIEW [#f6]_ | vtp |
+-----------+------------+-----------------+----------------------+
| .vtk | R/W | PARAVIEW [#f6]_ | paraview-legacy, vtk |
+-----------+------------+-----------------+----------------------+
| .tec | R/W | TECPLOT [#f7]_ | tecplot, tec |
+-----------+------------+-----------------+----------------------+
| .med | R | SALOME [#f8]_ | med, salome |
+-----------+------------+-----------------+----------------------+
| .obj | R | WAVEFRONT | obj |
+-----------+------------+-----------------+----------------------+
By default, Meshmagick uses the filename extensions to choose the
appropriate reader/writer. This behaviour might be bypassed using the
-ifmt and -ofmt optional arguments. When using these options, keywords
defined in the table above must be used as format identifiers.
.. rubric:: Footnotes
.. [#f1] NEMOH is an open source BEM Software for seakeeping developped at
Ecole Centrale de Nantes (LHHEA)
.. [#f2] WAMIT is a BEM Software for seakeeping developped by WAMIT, Inc.
.. [#f3] DIODORE is a BEM Software for seakeeping developped by PRINCIPIA
.. [#f4] HYDROSTAR is a BEM Software for seakeeping developped by
BUREAU VERITAS
.. [#f5] GMSH is an open source meshing software developped by C. Geuzaine
and J.-_faces. Remacle
.. [#f6] PARAVIEW is an open source visualization software developped by
Kitware
.. [#f7] TECPLOT is a visualization software developped by Tecplot
.. [#f8] SALOME-MECA is an open source software for computational mechanics
developped by EDF-R&D
""",
epilog='-- Copyright 2014-%u - Francois Rongere / Ecole Centrale de Nantes --' % __year__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# TODO: ajouter option pour voir l'ensemble des formats de fichier geres par meshmagick avec une explication du logiciel utilise
parser.add_argument('infilename', # TODO : voir pour un typ=file pour tester l'existence
help='path of the input mesh file in any supported format')
parser.add_argument('-o', '--outfilename', type=str,
help="""path of the output mesh file. The format of
this file is determined from the given extension.
""")
parser.add_argument('-ifmt', '--input-format',
help="""Input format. Meshmagick will read the input file considering the
INPUT_FORMAT rather than using the extension
""")
parser.add_argument('-ofmt', '--output-format',
help="""Output format. Meshmagick will write the output file considering
the OUTPUT_FORMAT rather than using the extension
""")
parser.add_argument('-q', '--quiet',
help="""switch of verbosity of meshmagick""",
action='store_true')
parser.add_argument('-i', '--info',
help="""extract informations on the mesh on the standard output""",
action='store_true')
parser.add_argument('--quality',
help="""prints mesh quality""",
action='store_true')
parser.add_argument('-t', '--translate', metavar=('Tx', 'Ty', 'Tz'),
nargs=3, type=float,
help="""translates the mesh in 3D
Usage -translate tx ty tz""")
parser.add_argument('-tx', '--translatex', type=float, metavar='Tx',
help="""translates the mesh following the x direction""")
parser.add_argument('-ty', '--translatey', type=float, metavar='Ty',
help="""translates the mesh following the y direction""")
parser.add_argument('-tz', '--translatez', type=float, metavar='Tz',
help="""translates the mesh following the z direction""")
parser.add_argument('-r', '--rotate', metavar=('Rx', 'Ry', 'Rz'),
nargs=3, type=float,
help="""rotates the mesh in 3D following a rotation
coordinate vector. It is done around fixed axis. Angles
must be given in degrees.""")
parser.add_argument('-rx', '--rotatex', type=float, metavar='Rx',
help="""rotates the mesh around the x direction.
Angles must be given in degrees.""")
parser.add_argument('-ry', '--rotatey', type=float, metavar='Ry',
help="""rotates the mesh around the y direction.
Angles must be given in degrees.""")
parser.add_argument('-rz', '--rotatez', type=float, metavar='Rz',
help="""rotates the mesh around the z direction.
Angles must be given in degrees.""")
parser.add_argument('-s', '--scale', type=float, metavar='S',
help="""scales the mesh. CAUTION : if used along
with a translation option, the scaling is done after
the translations. The translation magnitude should be set
accordingly to the original mesh.
""")
parser.add_argument('-sx', '--scalex', type=float, metavar='Sx',
help="""scales the mesh along x axis. CAUTION : if used along
with a translation option, the scaling is done after
the translations. The translation magnitude should be set
accordingly to the original mesh.
""")
parser.add_argument('-sy', '--scaley', type=float, metavar='Sy',
help="""scales the mesh along y axis. CAUTION : if used along
with a translation option, the scaling is done after
the translations. The translation magnitude should be set
accordingly to the original mesh.
""")
parser.add_argument('-sz', '--scalez', type=float, metavar='Sz',
help="""scales the mesh along z axis. CAUTION : if used along
with a translation option, the scaling is done after
the translations. The translation magnitude should be set
accordingly to the original mesh.
""")
parser.add_argument('-hn', '--heal-normals', action='store_true',
help="""Checks and heals the normals consistency and
verify if they are outward.
""")
parser.add_argument('-fn', '--flip-normals', action='store_true',
help="""flips the normals of the mesh""")
parser.add_argument('-hm', '--heal-mesh', action='store_true',
help="""Applies the following sanity transformation on the
mesh: Removes unused vertices, Removes degenerated faces,
Merge duplicate vertices, Heal triangles description,
Heal normal orientations.
""")
parser.add_argument('-p', '--plane', nargs='+', action='append', metavar='Arg',
help="""Defines a plane used by the --clip_by_plane and --symmetrize options.
It can be defined by the floats nx ny nz c where [nx, ny, nz]
is a normal vector to the plane and c defines its position
following the equation <N|X> = c with X a point belonging
to the plane.
It can also be defined by a string among [Oxy, Oxz, Oyz, /Oxy, /Oxz, /Oyz]
for quick definition. Several planes may be defined on the same command
line. Planes with a prepended '/' have normals inverted i.e. if Oxy has its
normal pointing upwards, /Oxy has its normal pointing downwards.
In that case, the planes are indexed by an integer starting by
0 following the order given in the command line.
""")
parser.add_argument('-c', '--clip-by-plane', nargs='*', action='append', metavar='Arg',
help="""cuts the mesh with a plane. Is no arguments are given, the Oxy plane
is used. If an integer is given, it should correspond to a plane defined with
the --plane option. If a key string is given, it should be a valid key (see
help of --plane option for valid plane keys). A normal and a scalar could
also be given for the plane definition just as for the --plane option. Several
clipping planes may be defined on the same command line.""")
parser.add_argument('-cc', '--concatenate-file', type=str,
help="""Concatenate a mesh from the specified path. The file format has to be
the same as the input file.""")
parser.add_argument('-md', '--merge-duplicates', nargs='?', const='1e-8',
default=None, metavar='Tol',
help="""merges the duplicate nodes in the mesh with the absolute tolerance
given as argument (default 1e-8). Tolerance must be lower than 1""")
parser.add_argument('-tq', '--triangulate-quadrangles', action='store_true',
help="""Triangulate all quadrangle _faces by a simple splitting procedure.
Twho triangles are generated and from both solution, the one with the best
aspect ratios is kept. This option may be used in conjunction with a
mesh export in a format that only deal with triangular cells like STL format.""")
parser.add_argument('-sym', '--symmetrize', nargs='*', action='append', metavar='Arg',
help="""Symmetrize the mesh by a plane defined wether by 4 scalars, i.e.
the plane normal vector coordinates and a scalar c such as N.X=c is the
plane equation (with X a point of the plane) or a string among ['Oxy',
'Oxz', 'Oyz', '/Oxy', '/Oxz', '/Oyz'] which are shortcuts for planes
passing by the origin and whose normals are the reference axes. Default
is Oxz if no argument is given to --sym option.
Be careful that symmetry is applied before any rotation so as the plane
equation is defined in the initial frame of reference.""")
parser.add_argument('--mirror', nargs='+', metavar='Arg',
help="""Mirror the mesh through the specified plane. Plane may be specified
with reference planes keys (see --plane option), or by 4 scalars, or by the
id of a plane defined with the --plane option. By default, the Oxy plane
is used when the option has no argument.""")
# FIXME: on devrait pouvoir laisser les valeurs par defaut --> creer une option --rho-medium
parser.add_argument('-pi', '--plain-inertia', action='store_true',
help="""Evaluates the inertia properties of the mesh condidering it as
uniformly plain of a medium of density rho_medium in kg/m**3. Default
is 1023 kg/m**3.""")
# TODO: creer une option --thickness
parser.add_argument('-si', '--shell-inertia', action='store_true',
help="""Evaluates the inertia properties of the mesh condidering it as
uniformly plain of a medium of density rho_medium in kg/m**3. Default
is 1023 kg/m**3.""")
parser.add_argument('--rho-medium', type=float,
help="""The density (in kg/m**3) of the medium used for evaluation of
inertia parameters of the mesh. For the hypothesis of plain homogeneous
mesh, the default is that of salt water (1023 kg/m**3) . For the
hypothesis of a shell, default is that of steel (7850 kg/m**3).
It is possible to specify medium by a name. Available medium are
currently: %s
""" % list_medium())
parser.add_argument('--list-medium', action='store_true',
help="""Lists the available medium keywords along with their density.
"""
)
parser.add_argument('--thickness', type=float,
help="""The thickness of the shell used for the evaluation of inertia
parameters of the mesh. The default value is 0.02m.""")
# Arguments for hydrostatics computations
# TODO: l'option -hs devrait etre une sous-commande au sens de argparse
# TODO: completer l'aide de -hs
parser.add_argument('-pn', '--project-name', default="NO_NAME", type=str, metavar='Project Name',
help="""The project name for hydrostatics ourput
""")
parser.add_argument('-hs', '--hydrostatics', action='store_true',
help="""Compute hydrostatics data and throws a report on the
command line. When used along with options -mdisp, --cog or
--zcog, the behaviour is different.""")
# TODO: replace disp by mass as it is more correct...
parser.add_argument('-mdisp', '--mass-displacement', default=None, type=float, metavar='Disp',
help="""Specifies the mass of the mesh for hydrostatic computations.
It MUST be given in tons.
""")
parser.add_argument('-cog', '--cog', nargs=3, metavar=('Xg', 'Yg', 'Zg'),
help="""Specifies the 3D position of the center of gravity.
The third coordinate given has priority over the value given
with the --zcog option.""")
parser.add_argument('-zg', '--zcog', default=None, type=float, metavar='Zcog',
help="""Specifies the z position of the center of gravity. This
is the minimal data needed for hydrostatic stiffness matrix
computation. It is however overwriten by the third component
of cog when --cog option is used. If none of these two option
is given, zcog is set to 0.
""")
parser.add_argument('-lpp', '--lpp', default=None, type=float, metavar='Lpp',
help="""Specifies the Lpp value as it cannot be calculated with
only the mesh as it depends on the AP position that is a
rudder position dependent information that the mesh does not
enclose. It helps do better inertia (Iyy & Izz) approximations
using standard formulas. See also the alternative -AP formula.
""")
parser.add_argument('-ap', '--orig-at-ap', action='store_true',
help="""Tell the solver that the origin is ar After perpendicular
sot that lpp can be calculated from this information.""")
parser.add_argument('-wd', '--water-density', default=1025., type=float, metavar='Rho',
help="""Specifies the density of salt water. Default is 1025 kg/m**3.
""")
parser.add_argument('-g', '--grav', default=9.81, type=float, metavar='G',
help="""Specifies the acceleration of gravity on the earth surface.
Default is 9.81 m/s**2.
""")
parser.add_argument('--hs-report', type=str, metavar='filename',
help="""Write the hydrostatic report into the file given as an argument""")
parser.add_argument('-lid', nargs='*', action='append', metavar='Arg',
help="""Generate a polygonal lid on the free surface z = 0from the set of points (x, y).
All the points are listed one after other such as: x1, y1, x2, y2, ...
At least three points are required. The number of coordinates must be even.""")
parser.add_argument('-mesh_size', '-ms', type=float,
help="""Mesh size used for generating the lid meshes. Default is the mean edge length.
""")
parser.add_argument('-sh', '--show', action='store_true',
help="""Shows the input mesh in an interactive window""")
parser.add_argument('-v', '--version', action='version',
version='meshmagick - version %s\n%s' % (__version__, __copyright__),
help="""Shows the version number and exit""")
def main():
if has_argcomplete:
argcomplete.autocomplete(parser)
# Parse command line arguments
args = parser.parse_args()
verbose = True
if args.quiet:
verbose = False
if verbose:
print('\n=============================================')
print(('meshmagick - version %s\n%s' % (__version__, __copyright__)))
print('=============================================')
# LOADING DATA FROM FILE
if args.input_format is not None:
format = args.input_format
else:
# Format based on extension
_, ext = os.path.splitext(args.infilename)
format = ext[1:].lower()
if format == '':
raise IOError(
'Unable to determine the input file format from its extension. Please specify an input format.')
# Loading mesh elements from file
if os.path.isfile(args.infilename):
V, F = mmio.load_mesh(args.infilename, format)
# Give the name of the mesh the filename
basename = os.path.basename(args.infilename)
mesh_name, _ = os.path.splitext(basename)
mesh = Mesh(V, F, name=mesh_name)
# Ensuring triangles are following the right convention (last id = first id)
mesh.heal_triangles()
if verbose:
mesh.verbose_on()
print(('%s successfully loaded' % args.infilename))
else:
raise IOError('file %s not found' % args.infilename)
if args.concatenate_file is not None:
print('Concatenate %s with %s' % (args.infilename, args.concatenate_file))
print("WARNING: the two meshes must have the same format.")
# Loading the file
if os.path.isfile(args.concatenate_file):
Vc, Fc = mmio.load_mesh(args.concatenate_file, format)
# Give the name of the mesh the filename
basename = os.path.basename(args.concatenate_file)
mesh_name, _ = os.path.splitext(basename)
mesh_c = Mesh(Vc, Fc, name=mesh_name)
# Ensuring triangles are following the right convention (last id = first id)
mesh_c.heal_triangles()
if verbose:
mesh_c.verbose_on()
print(('%s successfully loaded' % args.concatenate_file))
else:
raise IOError('file %s not found' % args.concatenate_file)
mesh += mesh_c
# Merge duplicate _vertices
if args.merge_duplicates is not None:
tol = float(args.merge_duplicates)
mesh.merge_duplicates(atol=tol)
# TODO : put that dict at the begining of the main function or in the module
plane_str_list = {'Oxy': [0., 0., 1.],
'Oxz': [0., 1., 0.],
'Oyz': [1., 0., 0.],
'/Oxy': [0., 0., -1.],
'/Oxz': [0., -1., 0.],
'/Oyz': [-1., 0., 0.]}
if args.quality:
mesh.print_quality()
# Defining planes
planes = []
if args.plane is not None:
nb_planes = len(args.plane)
if verbose:
if nb_planes == 1:
verb = 'plane has'
else:
verb = 'planes have'
print(('\n%u %s been defined:' % (nb_planes, verb)))
# TODO: ajouter un recapitulatif des plans definis
planes = [Plane() for i in range(nb_planes)]
for (iplane, plane) in enumerate(args.plane):
if len(plane) == 4:
# plane is defined by normal and scalar
try:
planes[iplane] = Plane(normal=list(map(float, plane[:3])), scalar=plane[3])
except:
raise AssertionError('Defining a plane by normal and scalar requires four scalars')
elif len(plane) == 1:
if plane[0] in plane_str_list:
planes[iplane].normal = np.array(plane_str_list[plane[0]], dtype=float)
planes[iplane].c = 0.
else:
raise AssertionError('%s key for plane is not known. Choices are [%s].'
% (plane[0], ', '.join(list(plane_str_list.keys()))))
else:
raise AssertionError('Planes should be defined by a normal and a scalar '
'or by a key to choose among [%s]' % (', '.join(list(plane_str_list.keys()))))
if verbose:
for plane_id, plane in enumerate(planes):
print(("\t%u: %s" % (plane_id, plane)))
# Mirroring the mesh
if args.mirror is not None:
sym_plane = Plane()
print((args.mirror))
if len(args.mirror) == 1:
# May be a standard plane or a plane id
if len(planes) > 0:
try:
plane_id = int(args.mirror[0])
if plane_id >= 0 and plane_id < len(planes):
sym_plane = planes[plane_id]
else:
raise AssertionError('Only planes IDs from 0 to %u have been defined. %u is outside the range.'
% (len(planes) - 1, plane_id))
except ValueError:
# Cannot be converted to an int, it should be the key of a plane
try:
sym_plane.normal = plane_str_list[args.mirror[0]]
except KeyError as err:
raise KeyError('%s is not a standard plane identifier' % err)
else:
try:
sym_plane.normal = plane_str_list[args.mirror[0]]
except KeyError as err:
raise KeyError('%s is not a standard plane identifier' % err)
elif len(args.mirror) == 4:
sym_plane.normal = args.mirror[:3]
sym_plane.c = args.mirror[3]
else:
raise ValueError('Bad plane definition.')
if verbose:
print(('Mirroring the mesh by :\n\t%s' % sym_plane))
mesh.mirror(sym_plane)
if verbose:
print('\t-> Done.')
# Symmetrizing the mesh
if args.symmetrize is not None:
nb_sym = len(args.symmetrize)
for iplane, plane in enumerate(args.symmetrize):
if len(plane) == 0:
args.symmetrize[iplane] = ['Oxz']
if verbose:
if nb_sym == 1:
verb = 'plane'
else:
verb = 'planes'
print(('\nMesh is being symmetrized by %u %s:' % (nb_sym, verb)))
for plane in args.symmetrize:
sym_plane = Plane()
if len(plane) == 1:
if len(planes) > 0:
try:
plane_id = int(plane[0])
if plane_id >= 0 and plane_id < len(planes):
sym_plane = planes[plane_id]
else:
raise AssertionError(
'Only planes IDs from 0 to %u have been defined. %u is outside the range.' % (
len(planes) - 1, plane_id))
except ValueError:
try:
sym_plane.normal = plane_str_list[plane[0]]
except KeyError as err:
raise KeyError('%s is not a standard plane identifier' % err)
else:
try:
sym_plane.normal = plane_str_list[plane[0]]
except KeyError as err:
raise KeyError('%s is not a standard plane identifier' % err)
elif len(plane) == 4:
sym_plane.normal = plane[:3]
sym_plane.c = plane[3]
else:
raise ValueError('Bad plane definition.')
if verbose:
print(('\t%s' % sym_plane))
mesh.symmetrize(sym_plane)
if verbose:
print('\t-> Done.')
# Globally heal the mesh
if args.heal_mesh:
if verbose:
print('\nOPERATION: heal the mesh')
mesh.heal_mesh()
if verbose:
print('\tDone.')
# Heal normals
if args.heal_normals and not args.heal_mesh:
if verbose:
print('\nOPERATION: heal normals')
mesh.heal_normals()
if verbose:
print('\t-> Done.')
# Mesh translations
if args.translate is not None:
if verbose:
print(('\nOPERATION: Translation by [%f, %f, %f]' % tuple(args.translate)))
mesh.translate(args.translate)
if verbose:
print('\t-> Done.')
if args.translatex is not None:
if verbose:
print(('\nOPERATION: Translation by %f along X' % args.translatex))
mesh.translate_x(args.translatex)
if verbose:
print('\t-> Done.')
if args.translatey is not None:
if verbose:
print(('\nOPERATION: Translation by %f along Y' % args.translatey))
mesh.translate_y(args.translatey)
if verbose:
print('\t-> Done.')
if args.translatez is not None:
if verbose:
print(('\nOPERATION: Translation by %f along Z' % args.translatez))
mesh.translate_z(args.translatez)
if verbose:
print('\t-> Done.')
# Mesh rotations
if args.rotate is not None:
if verbose:
print(('\nOPERATION: Rotation by [%f, %f, %f] (degrees)' % tuple(args.rotate)))
mesh.rotate(list(map(math.radians, args.rotate)))
if verbose:
print('\t-> Done.')
if args.rotatex is not None:
if verbose:
print(('\nOPERATION: Rotation by %f around X (Roll)' % args.rotatex))
mesh.rotate_x(math.radians(args.rotatex))
if verbose:
print('\t-> Done.')
if args.rotatey is not None:
if verbose:
print(('\nOPERATION: Rotation by %f around Y (Pitch)' % args.rotatey))
mesh.rotate_y(math.radians(args.rotatey))
if verbose:
print('\t-> Done.')
if args.rotatez is not None:
if verbose:
print(('\nOPERATION: Rotation by %f around Z (Yaw)' % args.rotatez))
mesh.rotate_z(math.radians(args.rotatez))
if verbose:
print('\t-> Done.')
if args.scale is not None:
if verbose:
print(('\nOPERATION: Scaling by %f' % args.scale))
mesh.scale(args.scale)
if verbose:
print('\t-> Done.')
if args.scalex is not None:
if verbose:
print(('\nOPERATION: Scaling by %f along the x axis' % args.scalex))
mesh.scalex(args.scalex)
if verbose:
print('\t-> Done.')
if args.scaley is not None:
if verbose:
print(('\nOPERATION: Scaling by %f along the y axis' % args.scaley))
mesh.scaley(args.scaley)
if verbose:
print('\t-> Done.')
if args.scalez is not None:
if verbose:
print(('\nOPERATION: Scaling by %f along the z axis' % args.scalez))
mesh.scalez(args.scalez)
if verbose:
print('\t-> Done.')
if args.flip_normals:
if verbose:
print('\nOPERATION: Flipping normals')
mesh.flip_normals()
if verbose:
print('\t-> Done.')
if args.triangulate_quadrangles:
mesh.triangulate_quadrangles()
# Clipping the mesh
if args.clip_by_plane is not None:
nb_clip = len(args.clip_by_plane)
for iplane, plane in enumerate(args.clip_by_plane):
if len(plane) == 0:
args.clip_by_plane[iplane] = ['Oxy']
if verbose:
if nb_clip == 1:
verb = 'plane'
else:
verb = 'planes'
print(('\nMesh is being clipped by %u %s' % (nb_clip, verb)))
for plane in args.clip_by_plane:
clipping_plane = Plane()
if len(plane) == 1:
if len(planes) > 0:
try:
plane_id = int(plane[0])
if plane_id >= 0 and plane_id < len(planes):
clipping_plane = planes[plane_id]
else:
raise AssertionError(
'Only planes IDs from 0 to %u have been defined. %u is outside the range.' % (
len(planes) - 1, plane_id))
except ValueError:
try:
clipping_plane.normal = plane_str_list[plane[0]]
except KeyError as err:
raise KeyError('%s is not a standard plane identifier' % err)
else:
try:
clipping_plane.normal = plane_str_list[plane[0]]
except KeyError as err:
raise KeyError('%s is not a standard plane identifier' % err)
elif len(plane) == 4:
clipping_plane.normal = plane[:3]
clipping_plane.c = plane[3]
else:
raise ValueError('Bad plane definition.')
if verbose:
print(('\t%s' % clipping_plane))
clipper = MeshClipper(mesh, plane=clipping_plane)
mesh = clipper.clipped_mesh
if verbose:
print('\t-> Done.')
if args.lid is not None:
try:
import gmsh
except:
raise ImportError('gmsh has to be available for generating lids.')
try:
import pygmsh
except:
raise ImportError('pygmsh has to be available for generating lids.')
nb_lid = len(args.lid)
# Face size.
if(args.mesh_size is not None):
mesh_size = args.mesh_size
else:
mesh_size = mesh.mean_edge_length
if(nb_lid == 1):
print("Mean edge length (%f m) is used as mesh size for generating the lid mesh." % mesh_size)
else:
print("Mean edge length (%f m) is used as mesh size for generating the lid meshes." % mesh_size)
# Generation of the lid mesh files.
for ilid, lid in enumerate(args.lid):
if(len(lid) % 2 == 1):
raise KeyError("\nNumber of vertices (x, y) for generating a lid must be even.")
if(int(len(lid) / 2.) < 3):
raise KeyError("\nAt least three vertices are necessary to define a lid.")
nb_points = int(len(lid) / 2.)
with pygmsh.geo.Geometry() as geom:
list_vertices = []
it = iter(lid)
for coord in it:
x = float(coord)
y = float(next(it))
list_vertices.append([x, y])
geom.add_polygon(list_vertices, mesh_size=mesh_size)
geom.generate_mesh()
pygmsh.write("Lid_" + str(ilid + 1) + ".msh")
# Conversation of the .msh lid files into .obj lid files.
V, F = mmio.load_mesh("Lid_" + str(ilid + 1) + ".msh", "msh")
mmio.write_OBJ("Lid_" + str(ilid + 1) + ".obj", V, F)
print("Lid_" + str(ilid + 1) + ".obj is generated.")
# Concatenation of the meshfile and all the lids.
mesh_c = Mesh(V, F, name="Lid_" + str(ilid + 1))
mesh += mesh_c
if(nb_lid == 1):
print("%s concatenated with the lid mesh file." % args.infilename)
else:
print("%s concatenated with the lid mesh files." % args.infilename)
# Listing available medium
if args.list_medium:
col_width = 22
hline = '+{0:s}+{0:s}+\n'.format('-' * col_width)
table = '\n' + hline
table += '|{:<{n}s}|{:>{n}s}|\n'.format('NAME', 'DENSITY (KG/M**3)', n=col_width)
table += hline
for medium in densities.list_medium():
table += '|{:<{n}s}|{:>{n}.3f}|\n'.format(medium, densities.get_density(medium), n=col_width)
table += hline
print(table)
# Calculate the plain inertia
if args.plain_inertia:
if args.rho_medium is None:
rho_medium = 1023.
else:
rho_medium = args.rho_medium
inertia = mesh.eval_plain_mesh_inertias(rho_medium=rho_medium)
if verbose:
print(("\nInertial parameters for a uniform distribution of a medium of density %.1f kg/m**3 in the mesh:\n" \
% rho_medium))
print(("\tMass = %.3f tons" % (inertia.mass / 1000.)))
cog = inertia.gravity_center
print(("\tCOG (m):\n\t\txg = %.3f\n\t\tyg = %.3f\n\t\tzg = %.3f" % (cog[0], cog[1], cog[2])))
mat = inertia.inertia_matrix
print("\tInertia matrix (SI):")
print(("\t\t%.3E\t%.3E\t%.3E" % (mat[0, 0], mat[0, 1], mat[0, 2])))
print(("\t\t%.3E\t%.3E\t%.3E" % (mat[1, 0], mat[1, 1], mat[1, 2])))
print(("\t\t%.3E\t%.3E\t%.3E" % (mat[2, 0], mat[2, 1], mat[2, 2])))
point = inertia.reduction_point
print(("\tExpressed at point : \t\t%.3E\t%.3E\t%.3E" % (point[0], point[1], point[2])))
if args.shell_inertia:
if args.rho_medium is None:
rho_medium = 7850.
else:
rho_medium = args.rho_medium
if args.thickness is None:
thickness = 0.02
else:
thickness = args.thickness
inertia = mesh.eval_shell_mesh_inertias(rho_medium=rho_medium, thickness=thickness)
if verbose:
print(("\nInertial parameters for a shell distribution of a medium of density %.1f kg/m**3 and a thickness " \
"of %.3f m over the mesh:\n" % (rho_medium, thickness)))
print(("\tMass = %.3f tons" % (inertia.mass / 1000.)))
cog = inertia.gravity_center
print(("\tCOG (m):\n\t\txg = %.3f\n\t\tyg = %.3f\n\t\tzg = %.3f" % (cog[0], cog[1], cog[2])))
mat = inertia.inertia_matrix
print("\tInertia matrix (SI):")
print(("\t\t%.3E\t%.3E\t%.3E" % (mat[0, 0], mat[0, 1], mat[0, 2])))
print(("\t\t%.3E\t%.3E\t%.3E" % (mat[1, 0], mat[1, 1], mat[1, 2])))
print(("\t\t%.3E\t%.3E\t%.3E" % (mat[2, 0], mat[2, 1], mat[2, 2])))
point = inertia.reduction_point
print(("\tExpressed at point : \t\t%.3E\t%.3E\t%.3E" % (point[0], point[1], point[2])))
if args.hydrostatics:
water_density = args.water_density
grav = args.grav
reltol = 1e-6
z_corr = 0.
rotmat_corr = np.eye(3, 3)
has_disp = has_cog = has_zcog = False
if args.mass_displacement is not None:
disp = args.mass_displacement
has_disp = True
if args.cog is not None:
cog = list(map(float, args.cog))
has_cog = True
if args.zcog is not None:
zcog = args.zcog
has_zcog = True
hs_data = dict()
if not has_disp and not has_cog:
print(">>>> Performing hydrostatic computation on the current hull configuration considered at equilibrium")
if not has_zcog:
raise RuntimeError("zcog should at least be given for correct stiffness values computations")
hs_data = hs.compute_hydrostatics(mesh, np.zeros(3), water_density, grav, at_cog=False)
xb, yb, _ = hs_data["buoyancy_center"]
cog = np.array([xb, yb, zcog])
hs_data = hs.compute_hydrostatics(mesh, cog, water_density, grav, at_cog=True)
elif has_disp and not has_cog:
print(">>>> Computing equilibrium of the hull for the given displacement of %f tons" % disp)
if not has_zcog:
raise RuntimeError("zcog should at least be given for correct stiffness values computations")
z_corr = hs.displacement_equilibrium(mesh, disp, water_density, grav, reltol=reltol, verbose=True)
hs_data = hs.compute_hydrostatics(mesh, np.zeros(3), water_density, grav, z_corr=z_corr, at_cog=False)
xb, yb, _ = hs_data["buoyancy_center"]
cog = np.array([xb, yb, zcog])
hs_data = hs.compute_hydrostatics(mesh, cog, water_density, grav, z_corr=z_corr, at_cog=True)
elif has_disp and has_cog:
print(">>>> Computing equilibrium in 3DOF for the given displacement and COG")
if has_zcog:
warn("zcog is redundant with cog, taking cog and ignoring zcog")
z_corr, rotmat_corr = hs.full_equilibrium(mesh, cog, disp, water_density, grav, reltol=reltol, verbose=True)
hs_data = hs.compute_hydrostatics(mesh, cog, water_density, grav, z_corr=z_corr, rotmat_corr=rotmat_corr, at_cog=True)
elif not has_disp and has_cog:
print(">>>> Computing equilibrium in 3DOF for the given COG, considering the current configuration presents the "
"target displacement")
if has_zcog:
warn("zcog is redundant with cog, taking cog and ignoring zcog")
hs_data = hs.compute_hydrostatics(mesh, np.zeros(3), water_density, grav, at_cog=False)
disp = hs_data['disp_mass'] / 1000
z_corr, rotmat_corr = hs.full_equilibrium(mesh, cog, disp, water_density, grav, reltol=reltol, verbose=True)
hs_data = hs.compute_hydrostatics(mesh, cog, water_density, grav, z_corr=z_corr, rotmat_corr=rotmat_corr,
at_cog=True)
mesh.rotate_matrix(rotmat_corr)
mesh.translate_z(z_corr)
hs_report = hs.get_hydrostatic_report(hs_data)
print(hs_report)
if args.hs_report is not None:
with open(args.hs_report, 'w') as f:
f.write('==============================================\n')
f.write('Hydrostatic report generated by Meshmagick\n')
f.write('Meshfile: %s\n' % os.path.abspath(args.infilename))
f.write('%s\n' % strftime('%c'))
f.write('meshmagick - version %s\n%s\n' % (__version__, __copyright__))
f.write('==============================================\n')
f.write(hs_report)
# WARNING : No more mesh modification should be released from this point until the end of the main
if args.info:
print(mesh)
if args.show:
mesh.show()
if args.outfilename is None:
base, ext = os.path.splitext(args.infilename)
write_file = False
# if write_file:
# args.outfilename = '%s_modified%s' % (base, ext)
# Case where only the output format is given
if args.output_format is not None:
write_file = True
args.outfilename = '%s.%s' % (base, args.output_format)
else:
write_file = True
# Writing an output file
if write_file:
if args.output_format is not None:
format = args.output_format
else:
if args.outfilename is None:
# We base the output format on the input format used
if args.input_format is not None:
format = args.input_format
else:
format = os.path.splitext(args.infilename)[1][1:].lower()
if not mmio.know_extension(format):
raise IOError('Could not determine a format from input file extension, please specify an input format or an extension')
else:
format = os.path.splitext(args.outfilename)[1][1:].lower()
if verbose:
print(('Writing %s' % args.outfilename))
mmio.write_mesh(args.outfilename, mesh.vertices, mesh.faces, format)
if verbose:
print('\t-> Done.')
if verbose:
print('\n=============================================================')
print(('Meshmagick - version %s\n%s' % (__version__, __copyright__)))
print(('Maintainer : %s <%s>' % (__maintainer__, __email__)))
print('Good Bye!')
print('=============================================================')
if __name__ == '__main__':
main()
| LHEEA/meshmagick | meshmagick_cli.py | Python | gpl-3.0 | 46,540 | [
"ParaView",
"VTK"
] | 7b7b53bccecda537715ec4c06261a46f18278f9cefd50f10bcdfcfcd33f39990 |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from unittest import TestCase
import ray
from ray.data import Dataset
import torch
import torch.nn as nn
import torch.optim as optim
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca.learn.pytorch import Estimator
from bigdl.orca.learn.metrics import Accuracy
def train_data_creator(a=5, b=10, size=1000):
def get_dataset(a, b, size) -> Dataset:
items = [i / size for i in range(size)]
dataset = ray.data.from_items([{
"x": x,
"y": a * x + b
} for x in items])
return dataset
ray_dataset = get_dataset(a, b, size)
return ray_dataset
def model_creator(config):
net = nn.Linear(1, 1)
net = net.double()
return net
def optim_creator(model, config):
optimizer = optim.SGD(model.parameters(),
lr=config.get("lr", 0.001),
momentum=config.get("momentum", 0.9))
return optimizer
class TestPytorchEstimator(TestCase):
def setUp(self):
init_orca_context(runtime="ray", address="localhost:6379")
def tearDown(self):
stop_orca_context()
def test_train(self):
dataset = train_data_creator()
orca_estimator = Estimator.from_torch(model=model_creator,
optimizer=optim_creator,
loss=nn.MSELoss(),
metrics=[Accuracy()],
config={"lr": 0.001},
workers_per_node=2,
backend="torch_distributed",
sync_stats=True)
train_stats = orca_estimator.fit(data=dataset,
epochs=2,
batch_size=32,
label_cols="y")
assert orca_estimator.get_model()
if __name__ == "__main__":
pytest.main([__file__])
| intel-analytics/BigDL | python/orca/test/bigdl/orca/learn/ray/pytorch/test_estimator_ray_dataset.py | Python | apache-2.0 | 2,640 | [
"ORCA"
] | 5e552f5e4fefed6be6da1b3207e15658d87af1bb93b8402421b2ec272dbd98a0 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The fsl module provides classes for interfacing with the `FSL
<http://www.fmrib.ox.ac.uk/fsl/index.html>`_ command line tools. This
was written to work with FSL version 4.1.4.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
import os
from glob import glob
import warnings
from shutil import rmtree
import numpy as np
from nibabel import load
from ... import LooseVersion
from .base import (FSLCommand, FSLCommandInputSpec, Info)
from ..base import (load_template, File, traits, isdefined,
TraitedSpec, BaseInterface, Directory,
InputMultiPath, OutputMultiPath,
BaseInterfaceInputSpec)
from ...utils.filemanip import (list_to_filename, filename_to_list)
from ...utils.misc import human_order_sorted
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class Level1DesignInputSpec(BaseInterfaceInputSpec):
interscan_interval = traits.Float(mandatory=True,
desc='Interscan interval (in secs)')
session_info = traits.Any(mandatory=True,
desc='Session specific information generated by ``modelgen.SpecifyModel``')
bases = traits.Either(
traits.Dict(traits.Enum(
'dgamma'), traits.Dict(traits.Enum('derivs'), traits.Bool)),
traits.Dict(traits.Enum('gamma'), traits.Dict(
traits.Enum('derivs'), traits.Bool)),
traits.Dict(traits.Enum('none'), traits.Enum(None)),
mandatory=True,
desc="name of basis function and options e.g., {'dgamma': {'derivs': True}}")
model_serial_correlations = traits.Bool(
desc="Option to model serial correlations using an \
autoregressive estimator (order 1). Setting this option is only \
useful in the context of the fsf file. If you set this to False, you need to repeat \
this option for FILMGLS by setting autocorr_noestimate to True", mandatory=True)
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum(
'T'),
traits.List(
traits.Str),
traits.List(
traits.Float)),
traits.Tuple(
traits.Str,
traits.Enum(
'T'),
traits.List(
traits.Str),
traits.List(
traits.Float),
traits.List(
traits.Float)))))),
desc="List of contrasts with each contrast being a list of the form - \
[('name', 'stat', [condition list], [weight list], [session list])]. if \
session list is None or not provided, all sessions are used. For F \
contrasts, the condition list should contain previously defined \
T-contrasts.")
class Level1DesignOutputSpec(TraitedSpec):
fsf_files = OutputMultiPath(File(exists=True),
desc='FSL feat specification files')
ev_files = OutputMultiPath(traits.List(File(exists=True)),
desc='condition information files')
class Level1Design(BaseInterface):
"""Generate FEAT specific files
Examples
--------
>>> level1design = Level1Design()
>>> level1design.inputs.interscan_interval = 2.5
>>> level1design.inputs.bases = {'dgamma':{'derivs': False}}
>>> level1design.inputs.session_info = 'session_info.npz'
>>> level1design.run() # doctest: +SKIP
"""
input_spec = Level1DesignInputSpec
output_spec = Level1DesignOutputSpec
def _create_ev_file(self, evfname, evinfo):
f = open(evfname, 'wt')
for i in evinfo:
if len(i) == 3:
f.write('%f %f %f\n' % (i[0], i[1], i[2]))
else:
f.write('%f\n' % i[0])
f.close()
def _create_ev_files(
self, cwd, runinfo, runidx, usetd, contrasts, no_bases,
do_tempfilter):
"""Creates EV files from condition and regressor information.
Parameters:
-----------
runinfo : dict
Generated by `SpecifyModel` and contains information
about events and other regressors.
runidx : int
Index to run number
usetd : int
Whether or not to use temporal derivatives for
conditions
contrasts : list of lists
Information on contrasts to be evaluated
"""
conds = {}
evname = []
ev_hrf = load_template('feat_ev_hrf.tcl')
ev_none = load_template('feat_ev_none.tcl')
ev_ortho = load_template('feat_ev_ortho.tcl')
ev_txt = ''
# generate sections for conditions and other nuisance
# regressors
num_evs = [0, 0]
for field in ['cond', 'regress']:
for i, cond in enumerate(runinfo[field]):
name = cond['name']
evname.append(name)
evfname = os.path.join(cwd, 'ev_%s_%d_%d.txt' % (name, runidx,
len(evname)))
evinfo = []
num_evs[0] += 1
num_evs[1] += 1
if field == 'cond':
for j, onset in enumerate(cond['onset']):
try:
amplitudes = cond['amplitudes']
if len(amplitudes) > 1:
amp = amplitudes[j]
else:
amp = amplitudes[0]
except KeyError:
amp = 1
if len(cond['duration']) > 1:
evinfo.insert(j, [onset, cond['duration'][j], amp])
else:
evinfo.insert(j, [onset, cond['duration'][0], amp])
if no_bases:
ev_txt += ev_none.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
cond_file=evfname)
else:
ev_txt += ev_hrf.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
temporalderiv=usetd,
cond_file=evfname)
if usetd:
evname.append(name + 'TD')
num_evs[1] += 1
elif field == 'regress':
evinfo = [[j] for j in cond['val']]
ev_txt += ev_none.substitute(ev_num=num_evs[0],
ev_name=name,
tempfilt_yn=do_tempfilter,
cond_file=evfname)
ev_txt += "\n"
conds[name] = evfname
self._create_ev_file(evfname, evinfo)
# add ev orthogonalization
for i in range(1, num_evs[0] + 1):
for j in range(0, num_evs[0] + 1):
ev_txt += ev_ortho.substitute(c0=i, c1=j)
ev_txt += "\n"
# add contrast info to fsf file
if isdefined(contrasts):
contrast_header = load_template('feat_contrast_header.tcl')
contrast_prolog = load_template('feat_contrast_prolog.tcl')
contrast_element = load_template('feat_contrast_element.tcl')
contrast_ftest_element = load_template(
'feat_contrast_ftest_element.tcl')
contrastmask_header = load_template('feat_contrastmask_header.tcl')
contrastmask_footer = load_template('feat_contrastmask_footer.tcl')
contrastmask_element = load_template(
'feat_contrastmask_element.tcl')
# add t/f contrast info
ev_txt += contrast_header.substitute()
con_names = []
for j, con in enumerate(contrasts):
con_names.append(con[0])
con_map = {}
ftest_idx = []
ttest_idx = []
for j, con in enumerate(contrasts):
if con[1] == 'F':
ftest_idx.append(j)
for c in con[2]:
if c[0] not in con_map.keys():
con_map[c[0]] = []
con_map[c[0]].append(j)
else:
ttest_idx.append(j)
for ctype in ['real', 'orig']:
for j, con in enumerate(contrasts):
if con[1] == 'F':
continue
tidx = ttest_idx.index(j) + 1
ev_txt += contrast_prolog.substitute(cnum=tidx,
ctype=ctype,
cname=con[0])
count = 0
for c in range(1, len(evname) + 1):
if evname[c - 1].endswith('TD') and ctype == 'orig':
continue
count = count + 1
if evname[c - 1] in con[2]:
val = con[3][con[2].index(evname[c - 1])]
else:
val = 0.0
ev_txt += contrast_element.substitute(cnum=tidx,
element=count,
ctype=ctype, val=val)
ev_txt += "\n"
if con[0] in con_map.keys():
for fconidx in con_map[con[0]]:
ev_txt += contrast_ftest_element.substitute(
cnum=ftest_idx.index(fconidx) + 1,
element=tidx,
ctype=ctype,
val=1)
ev_txt += "\n"
# add contrast mask info
ev_txt += contrastmask_header.substitute()
for j, _ in enumerate(contrasts):
for k, _ in enumerate(contrasts):
if j != k:
ev_txt += contrastmask_element.substitute(c1=j + 1,
c2=k + 1)
ev_txt += contrastmask_footer.substitute()
return num_evs, ev_txt
def _format_session_info(self, session_info):
if isinstance(session_info, dict):
session_info = [session_info]
return session_info
def _get_func_files(self, session_info):
"""Returns functional files in the order of runs
"""
func_files = []
for i, info in enumerate(session_info):
func_files.insert(i, info['scans'])
return func_files
def _run_interface(self, runtime):
cwd = os.getcwd()
fsf_header = load_template('feat_header_l1.tcl')
fsf_postscript = load_template('feat_nongui.tcl')
prewhiten = 0
if isdefined(self.inputs.model_serial_correlations):
prewhiten = int(self.inputs.model_serial_correlations)
usetd = 0
no_bases = False
basis_key = self.inputs.bases.keys()[0]
if basis_key in ['dgamma', 'gamma']:
usetd = int(self.inputs.bases[basis_key]['derivs'])
if basis_key == 'none':
no_bases = True
session_info = self._format_session_info(self.inputs.session_info)
func_files = self._get_func_files(session_info)
n_tcon = 0
n_fcon = 0
if isdefined(self.inputs.contrasts):
for i, c in enumerate(self.inputs.contrasts):
if c[1] == 'T':
n_tcon += 1
elif c[1] == 'F':
n_fcon += 1
for i, info in enumerate(session_info):
do_tempfilter = 1
if info['hpf'] == np.inf:
do_tempfilter = 0
num_evs, cond_txt = self._create_ev_files(cwd, info, i, usetd,
self.inputs.contrasts,
no_bases, do_tempfilter)
nim = load(func_files[i])
(_, _, _, timepoints) = nim.get_shape()
fsf_txt = fsf_header.substitute(run_num=i,
interscan_interval=self.inputs.interscan_interval,
num_vols=timepoints,
prewhiten=prewhiten,
num_evs=num_evs[0],
num_evs_real=num_evs[1],
num_tcon=n_tcon,
num_fcon=n_fcon,
high_pass_filter_cutoff=info[
'hpf'],
temphp_yn=do_tempfilter,
func_file=func_files[i])
fsf_txt += cond_txt
fsf_txt += fsf_postscript.substitute(overwrite=1)
f = open(os.path.join(cwd, 'run%d.fsf' % i), 'w')
f.write(fsf_txt)
f.close()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
cwd = os.getcwd()
outputs['fsf_files'] = []
outputs['ev_files'] = []
usetd = 0
basis_key = self.inputs.bases.keys()[0]
if basis_key in ['dgamma', 'gamma']:
usetd = int(self.inputs.bases[basis_key]['derivs'])
for runno, runinfo in enumerate(self._format_session_info(self.inputs.session_info)):
outputs['fsf_files'].append(os.path.join(cwd, 'run%d.fsf' % runno))
outputs['ev_files'].insert(runno, [])
evname = []
for field in ['cond', 'regress']:
for i, cond in enumerate(runinfo[field]):
name = cond['name']
evname.append(name)
evfname = os.path.join(
cwd, 'ev_%s_%d_%d.txt' % (name, runno,
len(evname)))
if field == 'cond':
if usetd:
evname.append(name + 'TD')
outputs['ev_files'][runno].append(
os.path.join(cwd, evfname))
return outputs
class FEATInputSpec(FSLCommandInputSpec):
fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0,
desc="File specifying the feat design spec file")
class FEATOutputSpec(TraitedSpec):
feat_dir = Directory(exists=True)
class FEAT(FSLCommand):
"""Uses FSL feat to calculate first level stats
"""
_cmd = 'feat'
input_spec = FEATInputSpec
output_spec = FEATOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
is_ica = False
outputs['feat_dir']=None
with open(self.inputs.fsf_file, 'rt') as fp:
text = fp.read()
if "set fmri(inmelodic) 1" in text:
is_ica = True
for line in text.split('\n'):
if line.find("set fmri(outputdir)")>-1:
try:
outputdir_spec=line.split('"')[-2]
if os.path.exists(outputdir_spec):
outputs['feat_dir']=outputdir_spec
except:
pass
if not outputs['feat_dir']:
if is_ica:
outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*ica'))[0]
else:
outputs['feat_dir'] = glob(os.path.join(os.getcwd(), '*feat'))[0]
print 'Outputs from FEATmodel:',outputs
return outputs
class FEATModelInputSpec(FSLCommandInputSpec):
fsf_file = File(exists=True, mandatory=True, argstr="%s", position=0,
desc="File specifying the feat design spec file",
copyfile=False)
ev_files = traits.List(File(exists=True),
mandatory=True, argstr="%s",
desc="Event spec files generated by level1design",
position=1, copyfile=False)
class FEATModelOutpuSpec(TraitedSpec):
design_file = File(
exists=True, desc='Mat file containing ascii matrix for design')
design_image = File(
exists=True, desc='Graphical representation of design matrix')
design_cov = File(
exists=True, desc='Graphical representation of design covariance')
con_file = File(
exists=True, desc='Contrast file containing contrast vectors')
fcon_file = File(desc='Contrast file containing contrast vectors')
class FEATModel(FSLCommand):
"""Uses FSL feat_model to generate design.mat files
"""
_cmd = 'feat_model'
input_spec = FEATModelInputSpec
output_spec = FEATModelOutpuSpec
def _format_arg(self, name, trait_spec, value):
if name == 'fsf_file':
return super(FEATModel, self)._format_arg(name, trait_spec, self._get_design_root(value))
elif name == 'ev_files':
return ''
else:
return super(FEATModel, self)._format_arg(name, trait_spec, value)
def _get_design_root(self, infile):
_, fname = os.path.split(infile)
return fname.split('.')[0]
def _list_outputs(self):
# TODO: figure out file names and get rid off the globs
outputs = self._outputs().get()
root = self._get_design_root(list_to_filename(self.inputs.fsf_file))
design_file = glob(os.path.join(os.getcwd(), '%s*.mat' % root))
assert len(design_file) == 1, 'No mat file generated by FEAT Model'
outputs['design_file'] = design_file[0]
design_image = glob(os.path.join(os.getcwd(), '%s.png' % root))
assert len(
design_image) == 1, 'No design image generated by FEAT Model'
outputs['design_image'] = design_image[0]
design_cov = glob(os.path.join(os.getcwd(), '%s_cov.png' % root))
assert len(
design_cov) == 1, 'No covariance image generated by FEAT Model'
outputs['design_cov'] = design_cov[0]
con_file = glob(os.path.join(os.getcwd(), '%s*.con' % root))
assert len(con_file) == 1, 'No con file generated by FEAT Model'
outputs['con_file'] = con_file[0]
fcon_file = glob(os.path.join(os.getcwd(), '%s*.fts' % root))
if fcon_file:
assert len(fcon_file) == 1, 'No fts file generated by FEAT Model'
outputs['fcon_file'] = fcon_file[0]
return outputs
class FILMGLSInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, position=-3,
argstr='%s',
desc='input data file')
design_file = File(exists=True, position=-2,
argstr='%s',
desc='design matrix file')
threshold = traits.Range(default=1000., low=0.0, argstr='%f',
position=-1, usedefault=True,
desc='threshold')
smooth_autocorr = traits.Bool(argstr='-sa',
desc='Smooth auto corr estimates')
mask_size = traits.Int(argstr='-ms %d',
desc="susan mask size")
brightness_threshold = traits.Range(low=0, argstr='-epith %d',
desc='susan brightness threshold, otherwise it is estimated')
full_data = traits.Bool(argstr='-v', desc='output full data')
_estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window',
'multitaper_product', 'use_pava', 'autocorr_noestimate']
autocorr_estimate_only = traits.Bool(argstr='-ac',
xor=_estimate_xor,
desc='perform autocorrelation estimatation only')
fit_armodel = traits.Bool(argstr='-ar', xor=_estimate_xor,
desc='fits autoregressive model - default is to use tukey with M=sqrt(numvols)')
tukey_window = traits.Int(argstr='-tukey %d', xor=_estimate_xor,
desc='tukey window size to estimate autocorr')
multitaper_product = traits.Int(argstr='-mt %d', xor=_estimate_xor,
desc='multitapering with slepian tapers and num is the time-bandwidth product')
use_pava = traits.Bool(
argstr='-pava', desc='estimates autocorr using PAVA')
autocorr_noestimate = traits.Bool(argstr='-noest', xor=_estimate_xor,
desc='do not estimate autocorrs')
output_pwdata = traits.Bool(argstr='-output_pwdata',
desc='output prewhitened data and average design matrix')
results_dir = Directory('results', argstr='-rn %s', usedefault=True,
desc='directory to store results in')
class FILMGLSInputSpec505(FSLCommandInputSpec):
in_file = File(exists=True, mandatory=True, position=-3,
argstr='--in=%s', desc='input data file')
design_file = File(exists=True, position=-2,
argstr='--pd=%s', desc='design matrix file')
threshold = traits.Range(default=1000., low=0.0, argstr='--thr=%f',
position=-1, usedefault=True, desc='threshold')
smooth_autocorr = traits.Bool(argstr='--sa',
desc='Smooth auto corr estimates')
mask_size = traits.Int(argstr='--ms=%d', desc="susan mask size")
brightness_threshold = traits.Range(low=0, argstr='--epith=%d',
desc=('susan brightness threshold, '
'otherwise it is estimated'))
full_data = traits.Bool(argstr='-v', desc='output full data')
_estimate_xor = ['autocorr_estimate_only', 'fit_armodel', 'tukey_window',
'multitaper_product', 'use_pava', 'autocorr_noestimate']
autocorr_estimate_only = traits.Bool(argstr='--ac', xor=_estimate_xor,
desc=('perform autocorrelation '
'estimation only'))
fit_armodel = traits.Bool(argstr='--ar', xor=_estimate_xor,
desc=('fits autoregressive model - default is to '
'use tukey with M=sqrt(numvols)'))
tukey_window = traits.Int(argstr='--tukey=%d', xor=_estimate_xor,
desc='tukey window size to estimate autocorr')
multitaper_product = traits.Int(argstr='--mt=%d', xor=_estimate_xor,
desc=('multitapering with slepian tapers '
'and num is the time-bandwidth '
'product'))
use_pava = traits.Bool(argstr='--pava', desc='estimates autocorr using PAVA')
autocorr_noestimate = traits.Bool(argstr='--noest', xor=_estimate_xor,
desc='do not estimate autocorrs')
output_pwdata = traits.Bool(argstr='--outputPWdata',
desc=('output prewhitened data and average '
'design matrix'))
results_dir = Directory('results', argstr='--rn=%s', usedefault=True,
desc='directory to store results in')
class FILMGLSInputSpec507(FILMGLSInputSpec505):
threshold = traits.Float(default=-1000., argstr='--thr=%f',
position=-1, usedefault=True,
desc='threshold')
tcon_file = File(exists=True, argstr='--con=%s',
desc='contrast file containing T-contrasts')
fcon_file = File(exists=True, argstr='--fcon=%s',
desc='contrast file containing F-contrasts')
mode = traits.Enum('volumetric', 'surface', argstr="--mode=%s",
desc="Type of analysis to be done")
surface = File(exists=True, argstr="--in2=%s",
desc=("input surface for autocorr smoothing in "
"surface-based analyses"))
class FILMGLSOutputSpec(TraitedSpec):
param_estimates = OutputMultiPath(File(exists=True),
desc='Parameter estimates for each column of the design matrix')
residual4d = File(exists=True,
desc='Model fit residual mean-squared error for each time point')
dof_file = File(exists=True, desc='degrees of freedom')
sigmasquareds = File(
exists=True, desc='summary of residuals, See Woolrich, et. al., 2001')
results_dir = Directory(exists=True,
desc='directory storing model estimation output')
corrections = File(exists=True,
desc='statistical corrections used within FILM modelling')
thresholdac = File(exists=True,
desc='The FILM autocorrelation parameters')
logfile = File(exists=True,
desc='FILM run logfile')
class FILMGLSOutputSpec507(TraitedSpec):
param_estimates = OutputMultiPath(File(exists=True),
desc='Parameter estimates for each column of the design matrix')
residual4d = File(exists=True,
desc='Model fit residual mean-squared error for each time point')
dof_file = File(exists=True, desc='degrees of freedom')
sigmasquareds = File(
exists=True, desc='summary of residuals, See Woolrich, et. al., 2001')
results_dir = Directory(exists=True,
desc='directory storing model estimation output')
thresholdac = File(exists=True,
desc='The FILM autocorrelation parameters')
logfile = File(exists=True,
desc='FILM run logfile')
copes = OutputMultiPath(File(exists=True),
desc='Contrast estimates for each contrast')
varcopes = OutputMultiPath(File(exists=True),
desc='Variance estimates for each contrast')
zstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each contrast')
tstats = OutputMultiPath(File(exists=True),
desc='t-stat file for each contrast')
fstats = OutputMultiPath(File(exists=True),
desc='f-stat file for each contrast')
zfstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each F contrast')
class FILMGLS(FSLCommand):
"""Use FSL film_gls command to fit a design matrix to voxel timeseries
Examples
--------
Initialize with no options, assigning them when calling run:
>>> from nipype.interfaces import fsl
>>> fgls = fsl.FILMGLS()
>>> res = fgls.run('in_file', 'design_file', 'thresh', rn='stats') #doctest: +SKIP
Assign options through the ``inputs`` attribute:
>>> fgls = fsl.FILMGLS()
>>> fgls.inputs.in_file = 'functional.nii'
>>> fgls.inputs.design_file = 'design.mat'
>>> fgls.inputs.threshold = 10
>>> fgls.inputs.results_dir = 'stats'
>>> res = fgls.run() #doctest: +SKIP
Specify options when creating an instance:
>>> fgls = fsl.FILMGLS(in_file='functional.nii', \
design_file='design.mat', \
threshold=10, results_dir='stats')
>>> res = fgls.run() #doctest: +SKIP
"""
_cmd = 'film_gls'
if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'):
input_spec = FILMGLSInputSpec507
elif Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.4'):
input_spec = FILMGLSInputSpec505
else:
input_spec = FILMGLSInputSpec
if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'):
output_spec = FILMGLSOutputSpec507
else:
output_spec = FILMGLSOutputSpec
def _get_pe_files(self, cwd):
files = None
if isdefined(self.inputs.design_file):
fp = open(self.inputs.design_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumWaves'):
numpes = int(line.split()[-1])
files = []
for i in range(numpes):
files.append(self._gen_fname('pe%d.nii' % (i + 1),
cwd=cwd))
break
fp.close()
return files
def _get_numcons(self):
numtcons = 0
numfcons = 0
if isdefined(self.inputs.tcon_file):
fp = open(self.inputs.tcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numtcons = int(line.split()[-1])
break
fp.close()
if isdefined(self.inputs.fcon_file):
fp = open(self.inputs.fcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numfcons = int(line.split()[-1])
break
fp.close()
return numtcons, numfcons
def _list_outputs(self):
outputs = self._outputs().get()
cwd = os.getcwd()
results_dir = os.path.join(cwd, self.inputs.results_dir)
outputs['results_dir'] = results_dir
pe_files = self._get_pe_files(results_dir)
if pe_files:
outputs['param_estimates'] = pe_files
outputs['residual4d'] = self._gen_fname('res4d.nii', cwd=results_dir)
outputs['dof_file'] = os.path.join(results_dir, 'dof')
outputs['sigmasquareds'] = self._gen_fname('sigmasquareds.nii',
cwd=results_dir)
outputs['thresholdac'] = self._gen_fname('threshac1.nii',
cwd=results_dir)
if Info.version() and LooseVersion(Info.version()) < LooseVersion('5.0.7'):
outputs['corrections'] = self._gen_fname('corrections.nii',
cwd=results_dir)
outputs['logfile'] = self._gen_fname('logfile',
change_ext=False,
cwd=results_dir)
if Info.version() and LooseVersion(Info.version()) > LooseVersion('5.0.6'):
pth = results_dir
numtcons, numfcons = self._get_numcons()
base_contrast = 1
copes = []
varcopes = []
zstats = []
tstats = []
neffs = []
for i in range(numtcons):
copes.append(self._gen_fname('cope%d.nii' % (base_contrast + i),
cwd=pth))
varcopes.append(
self._gen_fname('varcope%d.nii' % (base_contrast + i),
cwd=pth))
zstats.append(self._gen_fname('zstat%d.nii' % (base_contrast + i),
cwd=pth))
tstats.append(self._gen_fname('tstat%d.nii' % (base_contrast + i),
cwd=pth))
if copes:
outputs['copes'] = copes
outputs['varcopes'] = varcopes
outputs['zstats'] = zstats
outputs['tstats'] = tstats
fstats = []
zfstats = []
for i in range(numfcons):
fstats.append(self._gen_fname('fstat%d.nii' % (base_contrast + i),
cwd=pth))
zfstats.append(
self._gen_fname('zfstat%d.nii' % (base_contrast + i),
cwd=pth))
if fstats:
outputs['fstats'] = fstats
outputs['zfstats'] = zfstats
return outputs
class FEATRegisterInputSpec(BaseInterfaceInputSpec):
feat_dirs = InputMultiPath(
Directory(exists=True), desc="Lower level feat dirs",
mandatory=True)
reg_image = File(
exists=True, desc="image to register to (will be treated as standard)",
mandatory=True)
reg_dof = traits.Int(
12, desc="registration degrees of freedom", usedefault=True)
class FEATRegisterOutputSpec(TraitedSpec):
fsf_file = File(exists=True,
desc="FSL feat specification file")
class FEATRegister(BaseInterface):
"""Register feat directories to a specific standard
"""
input_spec = FEATRegisterInputSpec
output_spec = FEATRegisterOutputSpec
def _run_interface(self, runtime):
fsf_header = load_template('featreg_header.tcl')
fsf_footer = load_template('feat_nongui.tcl')
fsf_dirs = load_template('feat_fe_featdirs.tcl')
num_runs = len(self.inputs.feat_dirs)
fsf_txt = fsf_header.substitute(num_runs=num_runs,
regimage=self.inputs.reg_image,
regdof=self.inputs.reg_dof)
for i, rundir in enumerate(filename_to_list(self.inputs.feat_dirs)):
fsf_txt += fsf_dirs.substitute(runno=i + 1,
rundir=os.path.abspath(rundir))
fsf_txt += fsf_footer.substitute()
f = open(os.path.join(os.getcwd(), 'register.fsf'), 'wt')
f.write(fsf_txt)
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['fsf_file'] = os.path.abspath(
os.path.join(os.getcwd(), 'register.fsf'))
return outputs
class FLAMEOInputSpec(FSLCommandInputSpec):
cope_file = File(exists=True, argstr='--copefile=%s', mandatory=True,
desc='cope regressor data file')
var_cope_file = File(exists=True, argstr='--varcopefile=%s',
desc='varcope weightings data file')
dof_var_cope_file = File(exists=True, argstr='--dofvarcopefile=%s',
desc='dof data file for varcope data')
mask_file = File(exists=True, argstr='--maskfile=%s', mandatory=True,
desc='mask file')
design_file = File(exists=True, argstr='--designfile=%s', mandatory=True,
desc='design matrix file')
t_con_file = File(
exists=True, argstr='--tcontrastsfile=%s', mandatory=True,
desc='ascii matrix specifying t-contrasts')
f_con_file = File(exists=True, argstr='--fcontrastsfile=%s',
desc='ascii matrix specifying f-contrasts')
cov_split_file = File(
exists=True, argstr='--covsplitfile=%s', mandatory=True,
desc='ascii matrix specifying the groups the covariance is split into')
run_mode = traits.Enum(
'fe', 'ols', 'flame1', 'flame12', argstr='--runmode=%s',
mandatory=True, desc='inference to perform')
n_jumps = traits.Int(
argstr='--njumps=%d', desc='number of jumps made by mcmc')
burnin = traits.Int(argstr='--burnin=%d',
desc='number of jumps at start of mcmc to be discarded')
sample_every = traits.Int(argstr='--sampleevery=%d',
desc='number of jumps for each sample')
fix_mean = traits.Bool(argstr='--fixmean', desc='fix mean for tfit')
infer_outliers = traits.Bool(argstr='--inferoutliers',
desc='infer outliers - not for fe')
no_pe_outputs = traits.Bool(argstr='--nopeoutput',
desc='do not output pe files')
sigma_dofs = traits.Int(argstr='--sigma_dofs=%d',
desc='sigma (in mm) to use for Gaussian smoothing the DOFs in FLAME 2. Default is 1mm, -1 indicates no smoothing')
outlier_iter = traits.Int(argstr='--ioni=%d',
desc='Number of max iterations to use when inferring outliers. Default is 12.')
log_dir = Directory("stats", argstr='--ld=%s', usedefault=True) # ohinds
# no support for ven, vef
class FLAMEOOutputSpec(TraitedSpec):
pes = OutputMultiPath(File(exists=True),
desc=("Parameter estimates for each column of the "
"design matrix for each voxel"))
res4d = OutputMultiPath(File(exists=True),
desc=("Model fit residual mean-squared error for "
"each time point"))
copes = OutputMultiPath(File(exists=True),
desc="Contrast estimates for each contrast")
var_copes = OutputMultiPath(File(exists=True),
desc="Variance estimates for each contrast")
zstats = OutputMultiPath(File(exists=True),
desc="z-stat file for each contrast")
tstats = OutputMultiPath(File(exists=True),
desc="t-stat file for each contrast")
zfstats = OutputMultiPath(File(exists=True),
desc="z stat file for each f contrast")
fstats = OutputMultiPath(File(exists=True),
desc="f-stat file for each contrast")
mrefvars = OutputMultiPath(File(exists=True),
desc=("mean random effect variances for each "
"contrast"))
tdof = OutputMultiPath(File(exists=True),
desc="temporal dof file for each contrast")
weights = OutputMultiPath(File(exists=True),
desc="weights file for each contrast")
stats_dir = Directory(File(exists=True),
desc="directory storing model estimation output")
class FLAMEO(FSLCommand):
"""Use FSL flameo command to perform higher level model fits
Examples
--------
Initialize FLAMEO with no options, assigning them when calling run:
>>> from nipype.interfaces import fsl
>>> import os
>>> flameo = fsl.FLAMEO(cope_file='cope.nii.gz', \
var_cope_file='varcope.nii.gz', \
cov_split_file='cov_split.mat', \
design_file='design.mat', \
t_con_file='design.con', \
mask_file='mask.nii', \
run_mode='fe')
>>> flameo.cmdline
'flameo --copefile=cope.nii.gz --covsplitfile=cov_split.mat --designfile=design.mat --ld=stats --maskfile=mask.nii --runmode=fe --tcontrastsfile=design.con --varcopefile=varcope.nii.gz'
"""
_cmd = 'flameo'
input_spec = FLAMEOInputSpec
output_spec = FLAMEOOutputSpec
# ohinds: 2010-04-06
def _run_interface(self, runtime):
log_dir = self.inputs.log_dir
cwd = os.getcwd()
if os.access(os.path.join(cwd, log_dir), os.F_OK):
rmtree(os.path.join(cwd, log_dir))
return super(FLAMEO, self)._run_interface(runtime)
# ohinds: 2010-04-06
# made these compatible with flameo
def _list_outputs(self):
outputs = self._outputs().get()
pth = os.path.join(os.getcwd(), self.inputs.log_dir)
pes = human_order_sorted(glob(os.path.join(pth, 'pe[0-9]*.*')))
assert len(pes) >= 1, 'No pe volumes generated by FSL Estimate'
outputs['pes'] = pes
res4d = human_order_sorted(glob(os.path.join(pth, 'res4d.*')))
assert len(res4d) == 1, 'No residual volume generated by FSL Estimate'
outputs['res4d'] = res4d[0]
copes = human_order_sorted(glob(os.path.join(pth, 'cope[0-9]*.*')))
assert len(copes) >= 1, 'No cope volumes generated by FSL CEstimate'
outputs['copes'] = copes
var_copes = human_order_sorted(
glob(os.path.join(pth, 'varcope[0-9]*.*')))
assert len(
var_copes) >= 1, 'No varcope volumes generated by FSL CEstimate'
outputs['var_copes'] = var_copes
zstats = human_order_sorted(glob(os.path.join(pth, 'zstat[0-9]*.*')))
assert len(zstats) >= 1, 'No zstat volumes generated by FSL CEstimate'
outputs['zstats'] = zstats
if isdefined(self.inputs.f_con_file):
zfstats = human_order_sorted(
glob(os.path.join(pth, 'zfstat[0-9]*.*')))
assert len(
zfstats) >= 1, 'No zfstat volumes generated by FSL CEstimate'
outputs['zfstats'] = zfstats
fstats = human_order_sorted(
glob(os.path.join(pth, 'fstat[0-9]*.*')))
assert len(
fstats) >= 1, 'No fstat volumes generated by FSL CEstimate'
outputs['fstats'] = fstats
tstats = human_order_sorted(glob(os.path.join(pth, 'tstat[0-9]*.*')))
assert len(tstats) >= 1, 'No tstat volumes generated by FSL CEstimate'
outputs['tstats'] = tstats
mrefs = human_order_sorted(
glob(os.path.join(pth, 'mean_random_effects_var[0-9]*.*')))
assert len(
mrefs) >= 1, 'No mean random effects volumes generated by FLAMEO'
outputs['mrefvars'] = mrefs
tdof = human_order_sorted(glob(os.path.join(pth, 'tdof_t[0-9]*.*')))
assert len(tdof) >= 1, 'No T dof volumes generated by FLAMEO'
outputs['tdof'] = tdof
weights = human_order_sorted(
glob(os.path.join(pth, 'weights[0-9]*.*')))
assert len(weights) >= 1, 'No weight volumes generated by FLAMEO'
outputs['weights'] = weights
outputs['stats_dir'] = pth
return outputs
class ContrastMgrInputSpec(FSLCommandInputSpec):
tcon_file = File(exists=True, mandatory=True,
argstr='%s', position=-1,
desc='contrast file containing T-contrasts')
fcon_file = File(exists=True, argstr='-f %s',
desc='contrast file containing F-contrasts')
param_estimates = InputMultiPath(File(exists=True),
argstr='', copyfile=False,
mandatory=True,
desc='Parameter estimates for each column of the design matrix')
corrections = File(exists=True, copyfile=False, mandatory=True,
desc='statistical corrections used within FILM modelling')
dof_file = File(exists=True, argstr='', copyfile=False, mandatory=True,
desc='degrees of freedom')
sigmasquareds = File(exists=True, argstr='', position=-2,
copyfile=False, mandatory=True,
desc='summary of residuals, See Woolrich, et. al., 2001')
contrast_num = traits.Range(low=1, argstr='-cope',
desc='contrast number to start labeling copes from')
suffix = traits.Str(argstr='-suffix %s',
desc='suffix to put on the end of the cope filename before the contrast number, default is nothing')
class ContrastMgrOutputSpec(TraitedSpec):
copes = OutputMultiPath(File(exists=True),
desc='Contrast estimates for each contrast')
varcopes = OutputMultiPath(File(exists=True),
desc='Variance estimates for each contrast')
zstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each contrast')
tstats = OutputMultiPath(File(exists=True),
desc='t-stat file for each contrast')
fstats = OutputMultiPath(File(exists=True),
desc='f-stat file for each contrast')
zfstats = OutputMultiPath(File(exists=True),
desc='z-stat file for each F contrast')
neffs = OutputMultiPath(File(exists=True),
desc='neff file ?? for each contrast')
class ContrastMgr(FSLCommand):
"""Use FSL contrast_mgr command to evaluate contrasts
In interface mode this file assumes that all the required inputs are in the
same location.
"""
_cmd = 'contrast_mgr'
input_spec = ContrastMgrInputSpec
output_spec = ContrastMgrOutputSpec
def _run_interface(self, runtime):
# The returncode is meaningless in ContrastMgr. So check the output
# in stderr and if it's set, then update the returncode
# accordingly.
runtime = super(ContrastMgr, self)._run_interface(runtime)
if runtime.stderr:
self.raise_exception(runtime)
return runtime
def _format_arg(self, name, trait_spec, value):
if name in ['param_estimates', 'corrections', 'dof_file']:
return ''
elif name in ['sigmasquareds']:
path, _ = os.path.split(value)
return path
else:
return super(ContrastMgr, self)._format_arg(name, trait_spec, value)
def _get_design_root(self, infile):
_, fname = os.path.split(infile)
return fname.split('.')[0]
def _get_numcons(self):
numtcons = 0
numfcons = 0
if isdefined(self.inputs.tcon_file):
fp = open(self.inputs.tcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numtcons = int(line.split()[-1])
break
fp.close()
if isdefined(self.inputs.fcon_file):
fp = open(self.inputs.fcon_file, 'rt')
for line in fp.readlines():
if line.startswith('/NumContrasts'):
numfcons = int(line.split()[-1])
break
fp.close()
return numtcons, numfcons
def _list_outputs(self):
outputs = self._outputs().get()
pth, _ = os.path.split(self.inputs.sigmasquareds)
numtcons, numfcons = self._get_numcons()
base_contrast = 1
if isdefined(self.inputs.contrast_num):
base_contrast = self.inputs.contrast_num
copes = []
varcopes = []
zstats = []
tstats = []
neffs = []
for i in range(numtcons):
copes.append(self._gen_fname('cope%d.nii' % (base_contrast + i),
cwd=pth))
varcopes.append(
self._gen_fname('varcope%d.nii' % (base_contrast + i),
cwd=pth))
zstats.append(self._gen_fname('zstat%d.nii' % (base_contrast + i),
cwd=pth))
tstats.append(self._gen_fname('tstat%d.nii' % (base_contrast + i),
cwd=pth))
neffs.append(self._gen_fname('neff%d.nii' % (base_contrast + i),
cwd=pth))
if copes:
outputs['copes'] = copes
outputs['varcopes'] = varcopes
outputs['zstats'] = zstats
outputs['tstats'] = tstats
outputs['neffs'] = neffs
fstats = []
zfstats = []
for i in range(numfcons):
fstats.append(self._gen_fname('fstat%d.nii' % (base_contrast + i),
cwd=pth))
zfstats.append(
self._gen_fname('zfstat%d.nii' % (base_contrast + i),
cwd=pth))
if fstats:
outputs['fstats'] = fstats
outputs['zfstats'] = zfstats
return outputs
class L2ModelInputSpec(BaseInterfaceInputSpec):
num_copes = traits.Range(low=1, mandatory=True,
desc='number of copes to be combined')
class L2ModelOutputSpec(TraitedSpec):
design_mat = File(exists=True, desc='design matrix file')
design_con = File(exists=True, desc='design contrast file')
design_grp = File(exists=True, desc='design group file')
class L2Model(BaseInterface):
"""Generate subject specific second level model
Examples
--------
>>> from nipype.interfaces.fsl import L2Model
>>> model = L2Model(num_copes=3) # 3 sessions
"""
input_spec = L2ModelInputSpec
output_spec = L2ModelOutputSpec
def _run_interface(self, runtime):
cwd = os.getcwd()
mat_txt = ['/NumWaves 1',
'/NumPoints %d' % self.inputs.num_copes,
'/PPheights %e' % 1,
'',
'/Matrix']
for i in range(self.inputs.num_copes):
mat_txt += ['%e' % 1]
mat_txt = '\n'.join(mat_txt)
con_txt = ['/ContrastName1 group mean',
'/NumWaves 1',
'/NumContrasts 1',
'/PPheights %e' % 1,
'/RequiredEffect 100.0', # XX where does this
# number come from
'',
'/Matrix',
'%e' % 1]
con_txt = '\n'.join(con_txt)
grp_txt = ['/NumWaves 1',
'/NumPoints %d' % self.inputs.num_copes,
'',
'/Matrix']
for i in range(self.inputs.num_copes):
grp_txt += ['1']
grp_txt = '\n'.join(grp_txt)
txt = {'design.mat': mat_txt,
'design.con': con_txt,
'design.grp': grp_txt}
# write design files
for i, name in enumerate(['design.mat', 'design.con', 'design.grp']):
f = open(os.path.join(cwd, name), 'wt')
f.write(txt[name])
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for field in outputs.keys():
outputs[field] = os.path.join(os.getcwd(),
field.replace('_', '.'))
return outputs
class MultipleRegressDesignInputSpec(BaseInterfaceInputSpec):
contrasts = traits.List(
traits.Either(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(traits.Str),
traits.List(traits.Float)),
traits.Tuple(traits.Str,
traits.Enum('F'),
traits.List(traits.Tuple(traits.Str,
traits.Enum('T'),
traits.List(
traits.Str),
traits.List(
traits.Float)),
))),
mandatory=True,
desc="List of contrasts with each contrast being a list of the form - \
[('name', 'stat', [condition list], [weight list])]. if \
session list is None or not provided, all sessions are used. For F \
contrasts, the condition list should contain previously defined \
T-contrasts without any weight list.")
regressors = traits.Dict(traits.Str, traits.List(traits.Float),
mandatory=True,
desc='dictionary containing named lists of regressors')
groups = traits.List(traits.Int,
desc='list of group identifiers (defaults to single group)')
class MultipleRegressDesignOutputSpec(TraitedSpec):
design_mat = File(exists=True, desc='design matrix file')
design_con = File(exists=True, desc='design t-contrast file')
design_fts = File(exists=True, desc='design f-contrast file')
design_grp = File(exists=True, desc='design group file')
class MultipleRegressDesign(BaseInterface):
"""Generate multiple regression design
.. note::
FSL does not demean columns for higher level analysis.
Please see `FSL documentation <http://www.fmrib.ox.ac.uk/fsl/feat5/detail.html#higher>`_
for more details on model specification for higher level analysis.
Examples
--------
>>> from nipype.interfaces.fsl import MultipleRegressDesign
>>> model = MultipleRegressDesign()
>>> model.inputs.contrasts = [['group mean', 'T',['reg1'],[1]]]
>>> model.inputs.regressors = dict(reg1=[1, 1, 1], reg2=[2.,-4, 3])
>>> model.run() # doctest: +SKIP
"""
input_spec = MultipleRegressDesignInputSpec
output_spec = MultipleRegressDesignOutputSpec
def _run_interface(self, runtime):
cwd = os.getcwd()
regs = sorted(self.inputs.regressors.keys())
nwaves = len(regs)
npoints = len(self.inputs.regressors[regs[0]])
ntcons = sum([1 for con in self.inputs.contrasts if con[1] == 'T'])
nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F'])
# write mat file
mat_txt = ['/NumWaves %d' % nwaves,
'/NumPoints %d' % npoints]
ppheights = []
for reg in regs:
maxreg = np.max(self.inputs.regressors[reg])
minreg = np.min(self.inputs.regressors[reg])
if np.sign(maxreg) == np.sign(minreg):
regheight = max([abs(minreg), abs(maxreg)])
else:
regheight = abs(maxreg - minreg)
ppheights.append('%e' % regheight)
mat_txt += ['/PPheights ' + ' '.join(ppheights)]
mat_txt += ['',
'/Matrix']
for cidx in range(npoints):
mat_txt.append(' '.join(
['%e' % self.inputs.regressors[key][cidx] for key in regs]))
mat_txt = '\n'.join(mat_txt) + '\n'
# write t-con file
con_txt = []
counter = 0
tconmap = {}
for conidx, con in enumerate(self.inputs.contrasts):
if con[1] == 'T':
tconmap[conidx] = counter
counter += 1
con_txt += ['/ContrastName%d %s' % (counter, con[0])]
con_txt += ['/NumWaves %d' % nwaves,
'/NumContrasts %d' % ntcons,
'/PPheights %s' % ' '.join(
['%e' % 1 for i in range(counter)]),
'/RequiredEffect %s' % ' '.join(
['%.3f' % 100 for i in range(counter)]),
'',
'/Matrix']
for idx in sorted(tconmap.keys()):
convals = np.zeros((nwaves, 1))
for regidx, reg in enumerate(self.inputs.contrasts[idx][2]):
convals[regs.index(reg)
] = self.inputs.contrasts[idx][3][regidx]
con_txt.append(' '.join(['%e' % val for val in convals]))
con_txt = '\n'.join(con_txt) + '\n'
# write f-con file
fcon_txt = ''
if nfcons:
fcon_txt = ['/NumWaves %d' % ntcons,
'/NumContrasts %d' % nfcons,
'',
'/Matrix']
for conidx, con in enumerate(self.inputs.contrasts):
if con[1] == 'F':
convals = np.zeros((ntcons, 1))
for tcon in con[2]:
convals[tconmap[self.inputs.contrasts.index(tcon)]] = 1
fcon_txt.append(' '.join(['%d' % val for val in convals]))
fcon_txt = '\n'.join(fcon_txt)
fcon_txt += '\n'
# write group file
grp_txt = ['/NumWaves 1',
'/NumPoints %d' % npoints,
'',
'/Matrix']
for i in range(npoints):
if isdefined(self.inputs.groups):
grp_txt += ['%d' % self.inputs.groups[i]]
else:
grp_txt += ['1']
grp_txt = '\n'.join(grp_txt) + '\n'
txt = {'design.mat': mat_txt,
'design.con': con_txt,
'design.fts': fcon_txt,
'design.grp': grp_txt}
# write design files
for key, val in txt.items():
if ('fts' in key) and (nfcons == 0):
continue
filename = key.replace('_', '.')
f = open(os.path.join(cwd, filename), 'wt')
f.write(val)
f.close()
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
nfcons = sum([1 for con in self.inputs.contrasts if con[1] == 'F'])
for field in outputs.keys():
if ('fts' in field) and (nfcons == 0):
continue
outputs[field] = os.path.join(os.getcwd(),
field.replace('_', '.'))
return outputs
class SMMInputSpec(FSLCommandInputSpec):
spatial_data_file = File(
exists=True, position=0, argstr='--sdf="%s"', mandatory=True,
desc="statistics spatial map", copyfile=False)
mask = File(exists=True, position=1, argstr='--mask="%s"', mandatory=True,
desc="mask file", copyfile=False)
no_deactivation_class = traits.Bool(position=2, argstr="--zfstatmode",
desc="enforces no deactivation class")
class SMMOutputSpec(TraitedSpec):
null_p_map = File(exists=True)
activation_p_map = File(exists=True)
deactivation_p_map = File(exists=True)
class SMM(FSLCommand):
'''
Spatial Mixture Modelling. For more detail on the spatial mixture modelling see
Mixture Models with Adaptive Spatial Regularisation for Segmentation with an Application to FMRI Data;
Woolrich, M., Behrens, T., Beckmann, C., and Smith, S.; IEEE Trans. Medical Imaging, 24(1):1-11, 2005.
'''
_cmd = 'mm --ld=logdir'
input_spec = SMMInputSpec
output_spec = SMMOutputSpec
def _list_outputs(self):
outputs = self._outputs().get()
# TODO get the true logdir from the stdout
outputs['null_p_map'] = self._gen_fname(basename="w1_mean",
cwd="logdir")
outputs['activation_p_map'] = self._gen_fname(
basename="w2_mean", cwd="logdir")
if not isdefined(self.inputs.no_deactivation_class) or not self.inputs.no_deactivation_class:
outputs['deactivation_p_map'] = self._gen_fname(
basename="w3_mean", cwd="logdir")
return outputs
class MELODICInputSpec(FSLCommandInputSpec):
in_files = InputMultiPath(
File(exists=True), argstr="-i %s", mandatory=True, position=0,
desc="input file names (either single file name or a list)",
sep=",")
out_dir = Directory(
argstr="-o %s", desc="output directory name", genfile=True)
mask = File(exists=True, argstr="-m %s",
desc="file name of mask for thresholding")
no_mask = traits.Bool(argstr="--nomask", desc="switch off masking")
update_mask = traits.Bool(
argstr="--update_mask", desc="switch off mask updating")
no_bet = traits.Bool(argstr="--nobet", desc="switch off BET")
bg_threshold = traits.Float(
argstr="--bgthreshold=%f", desc="brain/non-brain threshold used to mask non-brain voxels, as a percentage (only if --nobet selected)")
dim = traits.Int(argstr="-d %d", desc="dimensionality reduction into #num dimensions"
"(default: automatic estimation)")
dim_est = traits.Str(argstr="--dimest=%s", desc="use specific dim. estimation technique:"
" lap, bic, mdl, aic, mean (default: lap)")
sep_whiten = traits.Bool(
argstr="--sep_whiten", desc="switch on separate whitening")
sep_vn = traits.Bool(
argstr="--sep_vn", desc="switch off joined variance normalization")
num_ICs = traits.Int(
argstr="-n %d", desc="number of IC's to extract (for deflation approach)")
approach = traits.Str(argstr="-a %s", desc="approach for decomposition, 2D: defl, symm (default), "
" 3D: tica (default), concat")
non_linearity = traits.Str(
argstr="--nl=%s", desc="nonlinearity: gauss, tanh, pow3, pow4")
var_norm = traits.Bool(
argstr="--vn", desc="switch off variance normalization")
pbsc = traits.Bool(
argstr="--pbsc", desc="switch off conversion to percent BOLD signal change")
cov_weight = traits.Float(argstr="--covarweight=%f", desc="voxel-wise weights for the covariance "
"matrix (e.g. segmentation information)")
epsilon = traits.Float(argstr="--eps=%f", desc="minimum error change")
epsilonS = traits.Float(
argstr="--epsS=%f", desc="minimum error change for rank-1 approximation in TICA")
maxit = traits.Int(argstr="--maxit=%d",
desc="maximum number of iterations before restart")
max_restart = traits.Int(
argstr="--maxrestart=%d", desc="maximum number of restarts")
mm_thresh = traits.Float(
argstr="--mmthresh=%f", desc="threshold for Mixture Model based inference")
no_mm = traits.Bool(
argstr="--no_mm", desc="switch off mixture modelling on IC maps")
ICs = File(exists=True, argstr="--ICs=%s",
desc="filename of the IC components file for mixture modelling")
mix = File(exists=True, argstr="--mix=%s",
desc="mixing matrix for mixture modelling / filtering")
smode = File(exists=True, argstr="--smode=%s",
desc="matrix of session modes for report generation")
rem_cmp = traits.List(
traits.Int, argstr="-f %d", desc="component numbers to remove")
report = traits.Bool(argstr="--report", desc="generate Melodic web report")
bg_image = File(exists=True, argstr="--bgimage=%s", desc="specify background image for report"
" (default: mean image)")
tr_sec = traits.Float(argstr="--tr=%f", desc="TR in seconds")
log_power = traits.Bool(
argstr="--logPower", desc="calculate log of power for frequency spectrum")
t_des = File(exists=True, argstr="--Tdes=%s",
desc="design matrix across time-domain")
t_con = File(exists=True, argstr="--Tcon=%s",
desc="t-contrast matrix across time-domain")
s_des = File(exists=True, argstr="--Sdes=%s",
desc="design matrix across subject-domain")
s_con = File(exists=True, argstr="--Scon=%s",
desc="t-contrast matrix across subject-domain")
out_all = traits.Bool(argstr="--Oall", desc="output everything")
out_unmix = traits.Bool(argstr="--Ounmix", desc="output unmixing matrix")
out_stats = traits.Bool(
argstr="--Ostats", desc="output thresholded maps and probability maps")
out_pca = traits.Bool(argstr="--Opca", desc="output PCA results")
out_white = traits.Bool(
argstr="--Owhite", desc="output whitening/dewhitening matrices")
out_orig = traits.Bool(argstr="--Oorig", desc="output the original ICs")
out_mean = traits.Bool(argstr="--Omean", desc="output mean volume")
report_maps = traits.Str(argstr="--report_maps=%s",
desc="control string for spatial map images (see slicer)")
remove_deriv = traits.Bool(argstr="--remove_deriv", desc="removes every second entry in paradigm"
" file (EV derivatives)")
class MELODICOutputSpec(TraitedSpec):
out_dir = Directory(exists=True)
report_dir = Directory(exists=True)
class MELODIC(FSLCommand):
"""Multivariate Exploratory Linear Optimised Decomposition into Independent Components
Examples
--------
>>> melodic_setup = MELODIC()
>>> melodic_setup.inputs.approach = 'tica'
>>> melodic_setup.inputs.in_files = ['functional.nii', 'functional2.nii', 'functional3.nii']
>>> melodic_setup.inputs.no_bet = True
>>> melodic_setup.inputs.bg_threshold = 10
>>> melodic_setup.inputs.tr_sec = 1.5
>>> melodic_setup.inputs.mm_thresh = 0.5
>>> melodic_setup.inputs.out_stats = True
>>> melodic_setup.inputs.t_des = 'timeDesign.mat'
>>> melodic_setup.inputs.t_con = 'timeDesign.con'
>>> melodic_setup.inputs.s_des = 'subjectDesign.mat'
>>> melodic_setup.inputs.s_con = 'subjectDesign.con'
>>> melodic_setup.inputs.out_dir = 'groupICA.out'
>>> melodic_setup.cmdline
'melodic -i functional.nii,functional2.nii,functional3.nii -a tica --bgthreshold=10.000000 --mmthresh=0.500000 --nobet -o groupICA.out --Ostats --Scon=subjectDesign.con --Sdes=subjectDesign.mat --Tcon=timeDesign.con --Tdes=timeDesign.mat --tr=1.500000'
>>> melodic_setup.run() # doctest: +SKIP
"""
input_spec = MELODICInputSpec
output_spec = MELODICOutputSpec
_cmd = 'melodic'
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_dir'] = self.inputs.out_dir
if not isdefined(outputs['out_dir']):
outputs['out_dir'] = self._gen_filename("out_dir")
if isdefined(self.inputs.report) and self.inputs.report:
outputs['report_dir'] = os.path.join(
self._gen_filename("out_dir"), "report")
return outputs
def _gen_filename(self, name):
if name == "out_dir":
return os.getcwd()
class SmoothEstimateInputSpec(FSLCommandInputSpec):
dof = traits.Int(argstr='--dof=%d', mandatory=True,
xor=['zstat_file'],
desc='number of degrees of freedom')
mask_file = File(argstr='--mask=%s',
exists=True, mandatory=True,
desc='brain mask volume')
residual_fit_file = File(argstr='--res=%s',
exists=True, requires=['dof'],
desc='residual-fit image file')
zstat_file = File(argstr='--zstat=%s',
exists=True, xor=['dof'],
desc='zstat image file')
class SmoothEstimateOutputSpec(TraitedSpec):
dlh = traits.Float(desc='smoothness estimate sqrt(det(Lambda))')
volume = traits.Int(desc='number of voxels in mask')
resels = traits.Float(desc='number of resels')
class SmoothEstimate(FSLCommand):
""" Estimates the smoothness of an image
Examples
--------
>>> est = SmoothEstimate()
>>> est.inputs.zstat_file = 'zstat1.nii.gz'
>>> est.inputs.mask_file = 'mask.nii'
>>> est.cmdline
'smoothest --mask=mask.nii --zstat=zstat1.nii.gz'
"""
input_spec = SmoothEstimateInputSpec
output_spec = SmoothEstimateOutputSpec
_cmd = 'smoothest'
def aggregate_outputs(self, runtime=None, needed_outputs=None):
outputs = self._outputs()
stdout = runtime.stdout.split('\n')
outputs.dlh = float(stdout[0].split()[1])
outputs.volume = int(stdout[1].split()[1])
outputs.resels = float(stdout[2].split()[1])
return outputs
class ClusterInputSpec(FSLCommandInputSpec):
in_file = File(argstr='--in=%s', mandatory=True,
exists=True, desc='input volume')
threshold = traits.Float(argstr='--thresh=%.10f',
mandatory=True,
desc='threshold for input volume')
out_index_file = traits.Either(traits.Bool, File,
argstr='--oindex=%s',
desc='output of cluster index (in size order)', hash_files=False)
out_threshold_file = traits.Either(traits.Bool, File,
argstr='--othresh=%s',
desc='thresholded image', hash_files=False)
out_localmax_txt_file = traits.Either(traits.Bool, File,
argstr='--olmax=%s',
desc='local maxima text file', hash_files=False)
out_localmax_vol_file = traits.Either(traits.Bool, File,
argstr='--olmaxim=%s',
desc='output of local maxima volume', hash_files=False)
out_size_file = traits.Either(traits.Bool, File,
argstr='--osize=%s',
desc='filename for output of size image', hash_files=False)
out_max_file = traits.Either(traits.Bool, File,
argstr='--omax=%s',
desc='filename for output of max image', hash_files=False)
out_mean_file = traits.Either(traits.Bool, File,
argstr='--omean=%s',
desc='filename for output of mean image', hash_files=False)
out_pval_file = traits.Either(traits.Bool, File,
argstr='--opvals=%s',
desc='filename for image output of log pvals', hash_files=False)
pthreshold = traits.Float(argstr='--pthresh=%.10f',
requires=['dlh', 'volume'],
desc='p-threshold for clusters')
peak_distance = traits.Float(argstr='--peakdist=%.10f',
desc='minimum distance between local maxima/minima, in mm (default 0)')
cope_file = traits.File(argstr='--cope=%s',
desc='cope volume')
volume = traits.Int(argstr='--volume=%d',
desc='number of voxels in the mask')
dlh = traits.Float(argstr='--dlh=%.10f',
desc='smoothness estimate = sqrt(det(Lambda))')
fractional = traits.Bool('--fractional',
desc='interprets the threshold as a fraction of the robust range')
connectivity = traits.Int(argstr='--connectivity=%d',
desc='the connectivity of voxels (default 26)')
use_mm = traits.Bool('--mm', desc='use mm, not voxel, coordinates')
find_min = traits.Bool('--min', desc='find minima instead of maxima')
no_table = traits.Bool(
'--no_table', desc='suppresses printing of the table info')
minclustersize = traits.Bool(argstr='--minclustersize',
desc='prints out minimum significant cluster size')
xfm_file = File(argstr='--xfm=%s',
desc='filename for Linear: input->standard-space transform. Non-linear: input->highres transform')
std_space_file = File(argstr='--stdvol=%s',
desc='filename for standard-space volume')
num_maxima = traits.Int(argstr='--num=%d',
desc='no of local maxima to report')
warpfield_file = File(argstr='--warpvol=%s',
desc='file contining warpfield')
class ClusterOutputSpec(TraitedSpec):
index_file = File(desc='output of cluster index (in size order)')
threshold_file = File(desc='thresholded image')
localmax_txt_file = File(desc='local maxima text file')
localmax_vol_file = File(desc='output of local maxima volume')
size_file = File(desc='filename for output of size image')
max_file = File(desc='filename for output of max image')
mean_file = File(desc='filename for output of mean image')
pval_file = File(desc='filename for image output of log pvals')
class Cluster(FSLCommand):
""" Uses FSL cluster to perform clustering on statistical output
Examples
--------
>>> cl = Cluster()
>>> cl.inputs.threshold = 2.3
>>> cl.inputs.in_file = 'zstat1.nii.gz'
>>> cl.inputs.out_localmax_txt_file = 'stats.txt'
>>> cl.cmdline
'cluster --in=zstat1.nii.gz --olmax=stats.txt --thresh=2.3000000000'
"""
input_spec = ClusterInputSpec
output_spec = ClusterOutputSpec
_cmd = 'cluster'
filemap = {'out_index_file': 'index', 'out_threshold_file': 'threshold',
'out_localmax_txt_file': 'localmax.txt',
'out_localmax_vol_file': 'localmax',
'out_size_file': 'size', 'out_max_file': 'max',
'out_mean_file': 'mean', 'out_pval_file': 'pval'}
def _list_outputs(self):
outputs = self.output_spec().get()
for key, suffix in self.filemap.items():
outkey = key[4:]
inval = getattr(self.inputs, key)
if isdefined(inval):
if isinstance(inval, bool):
if inval:
change_ext = True
if suffix.endswith('.txt'):
change_ext = False
outputs[outkey] = self._gen_fname(self.inputs.in_file,
suffix='_' + suffix,
change_ext=change_ext)
else:
outputs[outkey] = os.path.abspath(inval)
return outputs
def _format_arg(self, name, spec, value):
if name in self.filemap.keys():
if isinstance(value, bool):
fname = self._list_outputs()[name[4:]]
else:
fname = value
return spec.argstr % fname
return super(Cluster, self)._format_arg(name, spec, value)
class RandomiseInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, desc='4D input file', argstr='-i %s',
position=0, mandatory=True)
base_name = traits.Str(
'tbss_', desc='the rootname that all generated files will have',
argstr='-o "%s"', position=1, usedefault=True)
design_mat = File(
exists=True, desc='design matrix file', argstr='-d %s', position=2)
tcon = File(
exists=True, desc='t contrasts file', argstr='-t %s', position=3)
fcon = File(exists=True, desc='f contrasts file', argstr='-f %s')
mask = File(exists=True, desc='mask image', argstr='-m %s')
x_block_labels = File(
exists=True, desc='exchangeability block labels file', argstr='-e %s')
demean = traits.Bool(
desc='demean data temporally before model fitting', argstr='-D')
one_sample_group_mean = traits.Bool(
desc='perform 1-sample group-mean test instead of generic permutation test',
argstr='-1')
show_total_perms = traits.Bool(
desc='print out how many unique permutations would be generated and exit',
argstr='-q')
show_info_parallel_mode = traits.Bool(
desc='print out information required for parallel mode and exit',
argstr='-Q')
vox_p_values = traits.Bool(
desc='output voxelwise (corrected and uncorrected) p-value images',
argstr='-x')
tfce = traits.Bool(
desc='carry out Threshold-Free Cluster Enhancement', argstr='-T')
tfce2D = traits.Bool(
desc='carry out Threshold-Free Cluster Enhancement with 2D optimisation',
argstr='--T2')
f_only = traits.Bool(desc='calculate f-statistics only', argstr='--f_only')
raw_stats_imgs = traits.Bool(
desc='output raw ( unpermuted ) statistic images', argstr='-R')
p_vec_n_dist_files = traits.Bool(
desc='output permutation vector and null distribution text files',
argstr='-P')
num_perm = traits.Int(
argstr='-n %d', desc='number of permutations (default 5000, set to 0 for exhaustive)')
seed = traits.Int(
argstr='--seed=%d', desc='specific integer seed for random number generator')
var_smooth = traits.Int(
argstr='-v %d', desc='use variance smoothing (std is in mm)')
c_thresh = traits.Float(
argstr='-c %.2f', desc='carry out cluster-based thresholding')
cm_thresh = traits.Float(
argstr='-C %.2f', desc='carry out cluster-mass-based thresholding')
f_c_thresh = traits.Float(
argstr='-F %.2f', desc='carry out f cluster thresholding')
f_cm_thresh = traits.Float(
argstr='-S %.2f', desc='carry out f cluster-mass thresholding')
tfce_H = traits.Float(
argstr='--tfce_H=%.2f', desc='TFCE height parameter (default=2)')
tfce_E = traits.Float(
argstr='--tfce_E=%.2f', desc='TFCE extent parameter (default=0.5)')
tfce_C = traits.Float(
argstr='--tfce_C=%.2f', desc='TFCE connectivity (6 or 26; default=6)')
class RandomiseOutputSpec(TraitedSpec):
tstat_files = traits.List(
File(exists=True),
desc='t contrast raw statistic')
fstat_files = traits.List(
File(exists=True),
desc='f contrast raw statistic')
t_p_files = traits.List(
File(exists=True),
desc='f contrast uncorrected p values files')
f_p_files = traits.List(
File(exists=True),
desc='f contrast uncorrected p values files')
t_corrected_p_files = traits.List(
File(exists=True),
desc='t contrast FWE (Family-wise error) corrected p values files')
f_corrected_p_files = traits.List(
File(exists=True),
desc='f contrast FWE (Family-wise error) corrected p values files')
class Randomise(FSLCommand):
"""XXX UNSTABLE DO NOT USE
FSL Randomise: feeds the 4D projected FA data into GLM
modelling and thresholding
in order to find voxels which correlate with your model
Example
-------
>>> import nipype.interfaces.fsl as fsl
>>> rand = fsl.Randomise(in_file='allFA.nii', mask = 'mask.nii', tcon='design.con', design_mat='design.mat')
>>> rand.cmdline
'randomise -i allFA.nii -o "tbss_" -d design.mat -t design.con -m mask.nii'
"""
_cmd = 'randomise'
input_spec = RandomiseInputSpec
output_spec = RandomiseOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tstat_files'] = glob(self._gen_fname(
'%s_tstat*.nii' % self.inputs.base_name))
outputs['fstat_files'] = glob(self._gen_fname(
'%s_fstat*.nii' % self.inputs.base_name))
prefix = False
if self.inputs.tfce or self.inputs.tfce2D:
prefix = 'tfce'
elif self.inputs.vox_p_values:
prefix = 'vox'
elif self.inputs.c_thresh or self.inputs.f_c_thresh:
prefix = 'clustere'
elif self.inputs.cm_thresh or self.inputs.f_cm_thresh:
prefix = 'clusterm'
if prefix:
outputs['t_p_files'] = glob(self._gen_fname(
'%s_%s_p_tstat*' % (self.inputs.base_name, prefix)))
outputs['t_corrected_p_files'] = glob(self._gen_fname(
'%s_%s_corrp_tstat*.nii' % (self.inputs.base_name, prefix)))
outputs['f_p_files'] = glob(self._gen_fname(
'%s_%s_p_fstat*.nii' % (self.inputs.base_name, prefix)))
outputs['f_corrected_p_files'] = glob(self._gen_fname(
'%s_%s_corrp_fstat*.nii' % (self.inputs.base_name, prefix)))
return outputs
class GLMInputSpec(FSLCommandInputSpec):
in_file = File(exists=True, argstr='-i %s', mandatory=True, position=1,
desc='input file name (text matrix or 3D/4D image file)')
out_file = File(name_template="%s_glm", argstr='-o %s', position=3,
desc=('filename for GLM parameter estimates'
+ ' (GLM betas)'),
name_source="in_file", keep_extension=True)
design = File(exists=True, argstr='-d %s', mandatory=True, position=2,
desc=('file name of the GLM design matrix (text time'
+ ' courses for temporal regression or an image'
+ ' file for spatial regression)'))
contrasts = File(exists=True, argstr='-c %s', desc=('matrix of t-statics'
+ ' contrasts'))
mask = File(exists=True, argstr='-m %s', desc=('mask image file name if'
+ ' input is image'))
dof = traits.Int(argstr='--dof=%d', desc=('set degrees of freedom'
+ ' explicitly'))
des_norm = traits.Bool(argstr='--des_norm', desc=('switch on normalization'
+ ' of the design matrix'
+ ' columns to unit std'
+ ' deviation'))
dat_norm = traits.Bool(argstr='--dat_norm', desc=('switch on normalization'
+ ' of the data time'
+ ' series to unit std'
+ ' deviation'))
var_norm = traits.Bool(argstr='--vn', desc=('perform MELODIC variance-'
+ 'normalisation on data'))
demean = traits.Bool(argstr='--demean', desc=('switch on demeaining of '
+ ' design and data'))
out_cope = File(argstr='--out_cope=%s',
desc='output file name for COPE (either as txt or image')
out_z_name = File(argstr='--out_z=%s',
desc='output file name for Z-stats (either as txt or image')
out_t_name = File(argstr='--out_t=%s',
desc='output file name for t-stats (either as txt or image')
out_p_name = File(argstr='--out_p=%s',
desc=('output file name for p-values of Z-stats (either as'
+ ' text file or image)'))
out_f_name = File(argstr='--out_f=%s',
desc='output file name for F-value of full model fit')
out_pf_name = File(argstr='--out_pf=%s',
desc='output file name for p-value for full model fit')
out_res_name = File(argstr='--out_res=%s',
desc='output file name for residuals')
out_varcb_name = File(argstr='--out_varcb=%s',
desc='output file name for variance of COPEs')
out_sigsq_name = File(argstr='--out_sigsq=%s',
desc=('output file name for residual noise variance'
+ ' sigma-square'))
out_data_name = File(argstr='--out_data=%s',
desc='output file name for pre-processed data')
out_vnscales_name = File(argstr='--out_vnscales=%s',
desc=('output file name for scaling factors for variance'
+ ' normalisation'))
class GLMOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=('file name of GLM parameters'
' (if generated)'))
out_cope = OutputMultiPath(File(exists=True),
desc=('output file name for COPEs (either as '
'text file or image)'))
out_z = OutputMultiPath(File(exists=True),
desc=('output file name for COPEs (either as text '
'file or image)'))
out_t = OutputMultiPath(File(exists=True),
desc=('output file name for t-stats (either as '
'text file or image)'))
out_p = OutputMultiPath(File(exists=True),
desc=('output file name for p-values of Z-stats '
'(either as text file or image)'))
out_f = OutputMultiPath(File(exists=True),
desc=('output file name for F-value of full model '
'fit'))
out_pf = OutputMultiPath(File(exists=True),
desc=('output file name for p-value for full '
'model fit'))
out_res = OutputMultiPath(File(exists=True),
desc='output file name for residuals')
out_varcb = OutputMultiPath(File(exists=True),
desc='output file name for variance of COPEs')
out_sigsq = OutputMultiPath(File(exists=True),
desc=('output file name for residual noise '
'variance sigma-square'))
out_data = OutputMultiPath(File(exists=True),
desc='output file for preprocessed data')
out_vnscales = OutputMultiPath(File(exists=True),
desc=('output file name for scaling factors '
'for variance normalisation'))
class GLM(FSLCommand):
"""
FSL GLM:
Example
-------
>>> import nipype.interfaces.fsl as fsl
>>> glm = fsl.GLM(in_file='functional.nii', design='maps.nii', output_type='NIFTI')
>>> glm.cmdline
'fsl_glm -i functional.nii -d maps.nii -o functional_glm.nii'
"""
_cmd = 'fsl_glm'
input_spec = GLMInputSpec
output_spec = GLMOutputSpec
def _list_outputs(self):
outputs = super(GLM, self)._list_outputs()
if isdefined(self.inputs.out_cope):
outputs['out_cope'] = os.path.abspath(self.inputs.out_cope)
if isdefined(self.inputs.out_z_name):
outputs['out_z'] = os.path.abspath(self.inputs.out_z_name)
if isdefined(self.inputs.out_t_name):
outputs['out_t'] = os.path.abspath(self.inputs.out_t_name)
if isdefined(self.inputs.out_p_name):
outputs['out_p'] = os.path.abspath(self.inputs.out_p_name)
if isdefined(self.inputs.out_f_name):
outputs['out_f'] = os.path.abspath(self.inputs.out_f_name)
if isdefined(self.inputs.out_pf_name):
outputs['out_pf'] = os.path.abspath(self.inputs.out_pf_name)
if isdefined(self.inputs.out_res_name):
outputs['out_res'] = os.path.abspath(self.inputs.out_res_name)
if isdefined(self.inputs.out_varcb_name):
outputs['out_varcb'] = os.path.abspath(self.inputs.out_varcb_name)
if isdefined(self.inputs.out_sigsq_name):
outputs['out_sigsq'] = os.path.abspath(self.inputs.out_sigsq_name)
if isdefined(self.inputs.out_data_name):
outputs['out_data'] = os.path.abspath(self.inputs.out_data_name)
if isdefined(self.inputs.out_vnscales_name):
outputs['out_vnscales'] = os.path.abspath(
self.inputs.out_vnscales_name)
return outputs
| JohnGriffiths/nipype | nipype/interfaces/fsl/model.py | Python | bsd-3-clause | 88,382 | [
"Gaussian"
] | a12238ea9a44bc8b1d839fd33f927327bd7249145e9bdee3504aa83b429a288b |
"""BSSE: Basis Set Superposition Error module.
Defines a Setup-like class which has no properties that change anything,
except for an atomic basis set."""
import numpy as np
from ase.data import atomic_numbers
from gpaw.setup import BaseSetup
from gpaw.setup_data import SetupData
from gpaw.basis_data import Basis
from gpaw.spline import Spline
# Some splines are mandatory, but should then be zero to avoid affecting things
zero_function = Spline(0, 0.5, [0.0, 0.0, 0.0])
# Some operations fail horribly if the splines are zero, due to weird
# divisions and assumptions that various quantities are nonzero
#
# We'll use a function which is almost zero for these things
nonzero_function = Spline(0, 0.5, [0.0, 1.0e-12, 0.0]) # XXX
class GhostSetup(BaseSetup):
def __init__(self, basis, data):
self.symbol = data.symbol
self.data = data
self.phit_j = basis.tosplines()
self.basis = basis
self.nao = sum([2 * phit.get_angular_momentum_number() + 1
for phit in self.phit_j])
self.HubU = None
self.filename = None
self.fingerprint = None
self.type = 'ghost'
self.Z = 0
self.Nv = 0
self.Nc = 0
self.ni = 1
self.pt_j = [zero_function]
self.wg_lg = None
self.g_lg = None
self.Nct = 1e-12 # XXX XXX XXX XXX
self.nct = nonzero_function # XXXXXX
self.lmax = 0
self.xc_correction = None
self.ghat_l = [nonzero_function] * (self.lmax + 1) # XXXXXX
self.rcgauss = 1e12 # XXX XXX XXX XXX
self.vbar = zero_function
self.Delta0 = 0.0
self.Delta_pL = np.zeros((1, self.lmax + 1))
self.E = 0.0
self.Kc = 0.0
self.M = 0.0
self.M_p = np.zeros(1)
self.M_pp = np.zeros((1, 1))
self.K_p = np.zeros(1)
self.MB = 0.0
self.MB_p = np.zeros(1)
self.dO_ii = np.zeros((1, 1))
self.f_j = [0.0]
self.n_j = [0]
self.l_j = [0]
self.l_orb_j = [0]
self.nj = 1
self.lq = None # XXXX
self.rcutfilter = None
self.rcore = None
self.N0_p = np.zeros(1)
self.nabla_iiv = None
self.rnabla_iiv = None
self.rxp_iiv = None
self.phicorehole_g = None
self.rgd = None
self.rcut_j = [0.5]
self.tauct = None
self.Delta_iiL = None
self.B_ii = None
self.dC_ii = None
self.X_p = None
self.ExxC = None
self.dEH0 = 0.0
self.dEH_p = np.zeros(1)
self.extra_xc_data = {}
class GhostSetupData:
def __init__(self, symbol):
self.chemsymbol = symbol
self.symbol = symbol + '.ghost'
self.Z = atomic_numbers[symbol]
def build(self, xcfunc, lmax, basis, filter=None):
if basis is None:
raise ValueError('Loading partial waves not supported right now')
setup = GhostSetup(basis, self)
return setup
def print_info(self, text, _setup):
text('Ghost setup for %s' % self.chemsymbol)
| robwarm/gpaw-symm | gpaw/lcao/bsse.py | Python | gpl-3.0 | 3,101 | [
"ASE",
"GPAW"
] | c2c93f3a55462fe1681b22dbf34ab59fe131163e22c4c23f8d82f5eee4b50755 |
# Copyright (c) 2016 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" Code generation for processing columns
"""
import ast
def file_loc():
"""Return file and line number"""
import sys
import inspect
try:
raise Exception
except:
file_ = '.../' + '/'.join((inspect.currentframe().f_code.co_filename.split('/'))[-3:])
line_ = sys.exc_info()[2].tb_frame.f_back.f_lineno
return "{}:{}".format(file_, line_)
const_args = ('row', 'row_n', 'scratch', 'errors', 'accumulator', 'pipe', 'manager', 'source')
var_args = ('v', 'i_s', 'i_d', 'header_s', 'header_d')
all_args = var_args + const_args
# Full lambda definition for a column, including variable parts
col_code_def = 'lambda {}:'.format(','.join(all_args))
# lambda definition for the who;e row. Includes only the arguments
# that are the same for every column
code_def = 'lambda {}:'.format(','.join(const_args))
col_args_t = """col_args = dict(v=v, i_s=i_s, i_d=i_d, header_s=header_s, header_d=header_d,
scratch=scratch, errors=errors, accumulator = accumulator,
row=row, row_n=row_n)"""
file_header = """
# {}
import sys
from rowgenerators.valuetype import resolve_value_type
from rowgenerators.rowpipe.exceptions import CasterExceptionError
""".format(file_loc())
column_template = """
# {loc}
def {f_name}(v, i_s, i_d, header_s, header_d, row, row_n, errors, scratch, accumulator, pipe, manager, source):
try:
{stack}
except Exception as exc:
{exception}
return v
"""
indent = ' '
row_template = """
# {loc}
def row_{table}_{stage}(row, row_n, errors, scratch, accumulator, pipe, manager, source):
return [
{stack}
]
"""
class CodeGenError(Exception):
pass
def exec_context(**kwargs):
"""Base environment for evals, the stuff that is the same for all evals. Primarily used in the
Caster pipe"""
import dateutil.parser
import datetime
import random
from functools import partial
from rowgenerators.valuetype.types import parse_date, parse_time, parse_datetime
import rowgenerators.valuetype.types
import rowgenerators.valuetype.exceptions
import rowgenerators.valuetype.test
import rowgenerators.valuetype
import rowgenerators.rowpipe.transforms
def set_from(f, frm): # This maybe isn't used anymore, or maybe it is for debugging?
try:
try:
f.ambry_from = frm
except AttributeError: # for instance methods
f.im_func.ambry_from = frm
except (TypeError, AttributeError): # Builtins, non python code
pass
return f
test_env = dict(
parse_date=parse_date,
parse_time=parse_time,
parse_datetime=parse_datetime,
partial=partial
)
test_env.update(kwargs)
test_env.update(dateutil.parser.__dict__)
test_env.update(datetime.__dict__)
test_env.update(random.__dict__)
test_env.update(rowgenerators.valuetype.core.__dict__)
test_env.update(rowgenerators.valuetype.types.__dict__)
test_env.update(rowgenerators.valuetype.exceptions.__dict__)
test_env.update(rowgenerators.valuetype.test.__dict__)
test_env.update(rowgenerators.valuetype.__dict__)
test_env.update(rowgenerators.rowpipe.transforms.__dict__)
localvars = {}
for f_name, func in test_env.items():
if not isinstance(func, (str, tuple)):
localvars[f_name] = set_from(func, 'env')
# The 'b' parameter of randint is assumed to be a manager, but
# replacing it with a lambda prevents the param assignment
localvars['randint'] = lambda a, b: random.randint(a, b)
localvars['round'] = lambda a, b: round(a, b)
return localvars
def make_row_processors(source_headers, dest_table, env=None):
"""
Make multiple row processors for all of the columns in a table.
:param source_headers:
:param dest_table:
:param env:
:return:
"""
import re
if env is None:
env = exec_context()
assert len(dest_table.columns) > 0
# Convert the transforms to a list of list, with each list being a
# segment of column transformations, and each segment having one entry per column.
row_processors = []
out = []
preamble = []
transforms = dest_table.stage_transforms
for i, segments in enumerate(transforms): # Iterate over each stage
column_names = []
column_types = []
seg_funcs = []
# Iterate over each column, linking it to the segments for this stage
for col_num, (segment, column) in enumerate(zip(segments, dest_table), 0):
assert column
assert column.name, (dest_table.name, i)
assert column.name == segment['column'].name
col_name = column.name
preamble_parts, try_lines, exception, passthrough = make_stack(env, i, segment)
preamble += preamble_parts
column_names.append(col_name)
column_types.append(column.datatype)
# Optimization to remove unecessary functions. Without this, the column function will
# have just 'v = v'
if len(segment['transforms']) == 1 and segment['transforms'][0] == 'v':
seg_funcs.append('row[{}]'.format(col_num))
continue
column_name = re.sub(r'[^\w]+', '_', col_name, )
table_name = re.sub(r'[^\w]+', '_', dest_table.name)
assert column_name, (dest_table.name, i, col_name)
assert table_name
f_name = "{}_{}_{}".format(table_name, column_name, i)
exception = (exception if exception
else 'raise CasterExceptionError("' + f_name + '",header_d, v, exc, sys.exc_info())')
try:
# The input values for the first stage is the input dataset,
# which may have different columns that the later stages
if i == 0:
i_s = source_headers.index(column.name)
header_s = column.name
else:
i_s = col_num
header_s = None
v = 'row[{}]'.format(i_s)
except ValueError as e: # The col name is not in the source dataset
# This is the typical case when the output dataset has different columns from the
# input, whcih should only occur on the first stage.
i_s = 'None'
header_s = None
v = 'None' if col_num >= 1 else 'row_n' # Give the id column (first column) the row number
header_d = column.name
# Seg funcs holds the calls to the function for each column, called in the row stage function
seg_funcs.append(f_name
+ ('({v}, {i_s}, {i_d}, {header_s}, \'{header_d}\', '
'row, row_n, errors, scratch, accumulator, pipe, manager, source)')
.format(v=v,
i_s=i_s,
i_d=col_num,
header_s="'" + header_s + "'" if header_s else 'None',
header_d=header_d))
# This creates the column manipulation function.
out.append(column_template.format(
f_name=f_name,
table_name=dest_table.name,
column_name=col_name,
stage=i,
i_s=i_s,
i_d=col_num,
header_s=header_s,
header_d=header_d,
v=v,
exception=indent + exception,
stack='\n'.join(indent + l for l in try_lines),
col_args='', # col_args not implemented yet
loc=file_loc()))
# This stack assembles all of the function calls that will generate the next row
stack = '\n'.join("{}{}, # column {}".format(indent, l, cn)
for l, cn, dt in zip(seg_funcs, column_names, column_types))
out.append(row_template.format(
table=re.sub(r'[^\w]+', '_', dest_table.name),
stage=i,
stack=stack,
loc=file_loc()
))
row_processors.append('row_{table}_{stage}'.format(stage=i,
table=re.sub(r'[^\w]+', '_', dest_table.name)))
# Add the final datatype cast, which is done seperately to avoid an unecessary function call.
stack = '\n'.join("{}cast_{}(row[{}], '{}', errors),".format(indent, c.datatype.__name__, i, c.name)
for i, c in enumerate(dest_table))
out.append(row_template.format(
table=re.sub(r'[^\w]+', '_', dest_table.name),
stage=len(transforms),
stack=stack,
loc=file_loc()
))
row_processors.append('row_{table}_{stage}'.format(stage=len(transforms),
table=re.sub(r'[^\w]+', '_', dest_table.name)))
out.append('row_processors = [{}]'.format(','.join(row_processors)))
return '\n'.join([file_header] + list(set(preamble)) + out)
def calling_code(f, f_name=None, raise_for_missing=True):
"""Return the code string for calling a function. """
import inspect
from rowgenerators.exceptions import ConfigurationError
if inspect.isclass(f):
try:
args = inspect.signature(f.__init__).parameters.keys()
except TypeError as e:
raise TypeError("Failed to inspect {}: {}".format(f, e))
else:
args = inspect.signature(f).parameters.keys()
if len(args) > 1 and list(args)[0] == 'self':
args = list(args)[1:]
if 'self' in args: # Python3 gets self, but not Python2
args.remove('self')
for a in args:
if a not in all_args + ('exception',): # exception arg is only for exception handlers
if raise_for_missing:
# In CPython, inspecting __init__ for IntMeasure, FloatMeasure, etc,
# raises a TypeError 12 lines up, but that does not happen in PyPy. This hack
# raises the TypeError.
if a == 'obj':
raise TypeError()
raise TypeError()
raise ConfigurationError('Caster code {} has unknown argument '
'name: \'{}\'. Must be one of: {} '.format(f, a, ','.join(all_args)))
arg_map = {e: e for e in var_args}
args = [arg_map.get(a, a) for a in args]
return "{}({})".format(f_name if f_name else f.__name__, ','.join(args))
def make_stack(env, stage, segment):
"""For each transform segment, create the code in the try/except block with the
assignements for pipes in the segment """
import string
import random
from rowgenerators.valuetype import ValueType
passthrough = False # If true, signal that the stack will just return its input value
column = segment['column']
def make_line(column, t):
preamble = []
line_t = "v = {} # {}"
env_t = env.get(t,t)
if isinstance(env_t, type) and issubclass(env_t, ValueType): # A valuetype class, from the datatype column.
try:
cc, fl = calling_code(env_t, env_t.__name__), file_loc()
except TypeError as e:
cc, fl = "{}(v)".format(env_t.__name__), file_loc()
preamble.append("{} = resolve_value_type('{}') # {}".format(env_t.__name__, env_t.vt_code, file_loc()))
elif isinstance(t, type): # A python type, from the datatype columns.
cc, fl = "parse_{}(v, header_d)".format(t.__name__), file_loc()
elif callable(env.get(t)): # Transform function
try:
cc, fl = calling_code(env.get(t), t), file_loc()
except TypeError as e:
raise
else: # A transform generator, or python code.
rnd = (''.join(random.choice(string.ascii_lowercase) for _ in range(6)))
name = 'tg_{}_{}_{}'.format(column.name, stage, rnd)
try:
a, b, fl = rewrite_tg(env, name, t)
except (CodeGenError, AttributeError) as e:
raise CodeGenError("Failed to re-write pipe code '{}' in column '{}': {} "
.format(t, column, e))
cc = str(a)
if b:
preamble.append("{} = {} # {}".format(name, b, file_loc()))
line = line_t.format(cc, fl)
return line, preamble
preamble = []
try_lines = []
for t in list(segment):
if not t:
continue
line, col_preamble = make_line(column, t)
preamble += col_preamble
try_lines.append(line)
if segment['exception']:
exception, col_preamble = make_line(column, segment['exception'])
else:
exception = None
if len(try_lines) == 0:
try_lines.append('pass # Empty pipe segment')
assert len(try_lines) > 0, column.name
return preamble, try_lines, exception, passthrough
def mk_kwd_args(fn, fn_name=None):
import inspect
fn_name = fn_name or fn.__name__
fn_args = inspect.getargspec(fn).args
if len(fn_args) > 1 and fn_args[0] == 'self':
args = fn_args[1:]
kwargs = dict((a, a) for a in all_args if a in args)
return "{}({})".format(fn_name, ','.join(a + '=' + v for a, v in kwargs.items()))
class ReplaceTG(ast.NodeTransformer):
"""Replace a transform generator with the transform function"""
def __init__(self, env, tg_name):
super(ReplaceTG, self).__init__()
self.tg_name = tg_name
self.trans_gen = None
self.env = env
self.loc = ''
def missing_args(self):
pass
def visit_Call(self, node): # pragma: no cover
import inspect
from rowgenerators.valuetype import is_transform_generator
import types
if not isinstance(node.func, ast.Name):
self.generic_visit(node)
# Python 3.5 removed starargs, which the meta module, which dumps the AST to code,
# still expects. Not sure about the need for kwargs, but meta expects that too
node.starargs = None
node.kwargs = None
return node
fn_name = node.func.id
fn_args = None
use_kw_args = True
fn = self.env.get(node.func.id)
self.loc = file_loc() # Not a builtin, not a type, not a transform generator
# In this case, the code line is a type that has a parse function, so rename it.
if not fn:
t_fn_name = 'parse_' + fn_name
t_fn = self.env.get(t_fn_name)
if t_fn:
self.loc = file_loc() # The function is a type
fn, fn_name = t_fn, t_fn_name
# Ok, maybe it is a builtin
if not fn:
o = eval(fn_name)
if isinstance(o, types.BuiltinFunctionType):
self.loc = file_loc() # The function is a builtin
fn = o
fn_args = ['v']
use_kw_args = False
if not fn:
raise CodeGenError("Failed to get function named '{}' from the environment".format(node.func.id))
if not fn_args:
fn_args = inspect.getargspec(fn).args
# Create a dict of the arguments that have been specified
used_args = dict(tuple(zip(fn_args, node.args))
+ tuple((kw.arg, kw.value) for kw in node.keywords)
)
# Add in the arguments that were not, but only for args that are specified to be
# part of the local environment
for arg in fn_args:
if arg not in used_args and arg in all_args:
used_args[arg] = ast.Name(id=arg, ctx=ast.Load())
# Now, all of the args are in a dict, so we'll re-build them as
# as if they were all kwargs. Any arguments that were not provided by the
# signature in the input are added as keywords, with the value being
# a variable of the same name as the argument: ie. if 'manager' was defined
# but not provided, the signature has an added 'manager=manager' kwarg
keywords = [ast.keyword(arg=k, value=v) for k, v in used_args.items()]
tg_ast = ast.copy_location(
ast.Call(
func=ast.Name(id=fn_name, ctx=ast.Load()),
args=[e.value for e in keywords] if not use_kw_args else [], # For builtins, which only take one arg
keywords=keywords if use_kw_args else [],
starargs=[],
kwargs=[]
), node)
if is_transform_generator(fn):
raise Exception("Deprecated?")
self.loc = file_loc() # The function is a transform generator.
self.trans_gen = tg_ast
replace_node = ast.copy_location(
ast.Call(
func=ast.Name(id=self.tg_name, ctx=ast.Load()),
args=[],
keywords=[],
kwargs=ast.Name(id='col_args', ctx=ast.Load()),
starargs=[]
), node)
else:
replace_node = tg_ast
return replace_node
def rewrite_tg(env, tg_name, code):
"""Re-write a transform generating function pipe specification by extracting the transform generating part,
and replacing it with the generated transform. so:
tgen(a,b,c).foo.bar
becomes:
tg = tgen(a,b,c)
tg.foo.bar
"""
import meta # Byte-code and ast programming tools
import codegen
visitor = ReplaceTG(env, tg_name)
assert visitor.tg_name
try:
tree = visitor.visit(ast.parse(code.strip()))
except SyntaxError as e:
raise SyntaxError(str(e) + "\nIn code: \n" + code)
if visitor.loc:
loc = ' #' + visitor.loc
else:
loc = file_loc() # The AST visitor didn't match a call node
if visitor.trans_gen:
tg = meta.dump_python_source(visitor.trans_gen).strip()
else:
tg = None
return meta.dump_python_source(tree).strip(), tg, loc
| CivicKnowledge/rowgenerators | rowgenerators/rowpipe/codegen.py | Python | mit | 18,422 | [
"VisIt"
] | edf834cf1fe60a14d0b983534306b09736826fe5494f2d873224e92832e27ab7 |
###########################################
# IMPORTS
###########################################
from perses.annihilation.lambda_protocol import LambdaProtocol
from nose.tools import raises
import os
running_on_github_actions = os.environ.get('GITHUB_ACTIONS', None) == 'true'
#############################################
# TESTS
#############################################
def test_lambda_protocol():
"""
Tests LambdaProtocol, ensures that it can be instantiated with defaults, and that it fails if disallowed functions are tried
"""
# check that it's possible to instantiate a LambdaProtocol for all the default types
for protocol in ['default', 'namd', 'quarters']:
lp = LambdaProtocol(functions=protocol)
# check that if we give an incomplete set of parameters it will add in the missing terms
missing_functions = {'lambda_sterics_delete': lambda x : x}
lp = LambdaProtocol(functions=missing_functions)
assert (len(missing_functions) == 1)
assert(len(lp.get_functions()) == 9)
@raises(AssertionError)
def test_lambda_protocol_failure_ends():
bad_function = {'lambda_sterics_delete': lambda x : -x}
lp = LambdaProtocol(functions=bad_function)
@raises(AssertionError)
def test_lambda_protocol_naked_charges():
naked_charge_functions = {'lambda_sterics_insert':
lambda x: 0.0 if x < 0.5 else 2.0 * (x - 0.5),
'lambda_electrostatics_insert':
lambda x: 2.0 * x if x < 0.5 else 1.0}
lp = LambdaProtocol(functions=naked_charge_functions)
| choderalab/perses | perses/tests/test_lambda_protocol.py | Python | mit | 1,566 | [
"NAMD"
] | cb242389103305798b3f1ae0fa4d80bd9fb0cbfa357fc087af0bc84d3abf9c46 |
from _metadata import Metadata, MetadataException
from _template_metadata import TemplateMetadata
from _mtl_metadata import MTLMetadata
from _report_metadata import ReportMetadata
from _xml_metadata import XMLMetadata
from _ers_metadata import ERSMetadata
from _netcdf_metadata import NetCDFMetadata
from _jetcat_metadata import JetCatMetadata
from _survey_metadata import SurveyMetadata
try:
from _argus_metadata import ArgusMetadata # This needs cx_Oracle - can't run outside GA
except:
pass
def metadata_class(metadata_type_tag):
metadata_class_map = {'MTL': MTLMetadata,
'REPORT': ReportMetadata,
'XML': XMLMetadata,
'ERS': ERSMetadata,
'ISI': ERSMetadata,
'NetCDF': NetCDFMetadata,
'JetCat': JetCatMetadata,
'Survey': SurveyMetadata
}
try:
metadata_class_map['Argus'] = ArgusMetadata
except:
pass
return metadata_class_map.get(metadata_type_tag.strip().upper())
| alex-ip/geophys2netcdf | geophys2netcdf/metadata/__init__.py | Python | apache-2.0 | 1,121 | [
"NetCDF"
] | 02575e90fd8daaecee5f61bf2752618c3b6c2c2e36f13d2eded05e1bcd9d71ef |
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-pilot-logging-info.py
# Author : Stuart Paterson
########################################################################
"""
Retrieve logging info of a Grid pilot
"""
from __future__ import print_function
__RCSID__ = "$Id$"
# pylint: disable=wrong-import-position
from DIRAC.Core.Base import Script
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... PilotID ...' % Script.scriptName,
'Arguments:',
' PilotID: Grid ID of the pilot']))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) < 1:
Script.showHelp()
from DIRAC import exit as DIRACExit
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
errorList = []
for gridID in args:
result = diracAdmin.getPilotLoggingInfo(gridID)
if not result['OK']:
errorList.append((gridID, result['Message']))
exitCode = 2
else:
print('Pilot Reference: %s', gridID)
print(result['Value'])
print()
for error in errorList:
print("ERROR %s: %s" % error)
DIRACExit(exitCode)
| fstagni/DIRAC | Interfaces/scripts/dirac-admin-get-pilot-logging-info.py | Python | gpl-3.0 | 1,337 | [
"DIRAC"
] | 24ac1bdc82fb49d0a82bb88f59d259c60cf129af07ca1e02e6c710a2fc664970 |
import random
def do_me_a_unique_slug(model_cls, field):
"""
model_cls: A Django model to enforce uniqueness
field: The field that stores the unique name
"""
slug = do_me_a_slug()
while model_cls.objects.filter(**{'%s__iexact' % field: slug}).exists():
slug = do_me_a_slug()
return slug
def do_me_a_slug():
"""
Generates a random slug using nouns, adjectives and a number
e.g morning-waterfall-23
"""
adjectives = [
"autumn", "hidden", "bitter", "misty", "silent", "empty", "dry",
"dark", "summer", "icy", "delicate", "quiet", "white", "cool",
"spring", "winter", "patient", "twilight", "dawn", "crimson", "wispy",
"weathered", "blue", "billowing", "broken", "cold", "damp", "falling",
"frosty", "green", "long", "late", "lingering", "bold", "little",
"morning", "muddy", "old", "red", "rough", "still", "small",
"sparkling", "throbbing", "shy", "wandering", "withered", "wild",
"black", "young", "holy", "solitary", "fragrant", "aged", "snowy",
"proud", "floral", "restless", "divine", "polished", "ancient",
"purple", "lively", "nameless"
]
nouns = [
"waterfall", "river", "breeze", "moon", "rain", "wind", "sea",
"snow", "lake", "sunset", "pine", "shadow", "leaf", "dawn", "glitter",
"forest", "hill", "cloud", "meadow", "sun", "glade", "bird", "brook",
"butterfly", "bush", "dew", "dust", "field", "fire", "flower",
"feather", "grass", "haze", "mountain", "night", "pond", "darkness",
"snowflake", "silence", "sound", "sky", "shape", "surf", "thunder",
"violet", "water", "wildflower", "wave", "water", "resonance", "sun",
"wood", "dream", "cherry", "tree", "fog", "frost", "voice", "paper",
"frog", "smoke", "star", "morning", "firefly",
]
return "%s-%s-%s" % (
random.choice(adjectives),
random.choice(nouns),
random.randint(1, 1000))
| praekelt/mc2 | mc2/controllers/base/namers.py | Python | bsd-2-clause | 1,993 | [
"Firefly"
] | 463f4116597022fa5b3801026c98ba59e97e1e0d9a62be76963b995bbe4ecf3d |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.explorer Contains the ParameterExplorer class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import traceback
from collections import OrderedDict
# Import the relevant PTS classes and modules
from .component import FittingComponent
from ...core.basics.log import log
from ...core.tools import filesystem as fs
from ...core.launch.batch import BatchLauncher
from .modelgenerators.grid import GridModelGenerator
from .modelgenerators.genetic import GeneticModelGenerator
from ...core.tools import time
from .tables import ParametersTable, ChiSquaredTable, IndividualsTable
from ...core.tools.stringify import stringify_not_list, stringify
from ...core.simulation.wavelengthgrid import WavelengthGrid
from ...core.remote.host import load_host
from ...core.basics.configuration import ConfigurationDefinition, create_configuration_interactive
from .evaluate import prepare_simulation, get_parameter_values_for_named_individual, make_test_definition
from ...core.simulation.input import SimulationInput
from .generation import GenerationInfo, Generation
from ...core.tools.stringify import tostr
from ...core.basics.configuration import prompt_proceed, prompt_integer
from ...core.prep.smile import SKIRTSmileSchema
from ...core.tools.utils import lazyproperty
from ...core.prep.deploy import Deployer
from ...core.basics.range import QuantityRange
from ...core.tools import formatting as fmt
from ...magic.plot.imagegrid import StandardImageGridPlotter
# -----------------------------------------------------------------
class ParameterExplorer(FittingComponent):
"""
This class...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
:return:
"""
# Call the constructor of the base class
super(ParameterExplorer, self).__init__(*args, **kwargs)
# -- Attributes --
# The fitting run
self.fitting_run = None
# The ski template
self.ski = None
# The SKIRT batch launcher
self.launcher = BatchLauncher()
# The generation info
self.generation_info = GenerationInfo()
# The generation object
self.generation = None
# The individuals table
self.individuals_table = None
# The parameters table
self.parameters_table = None
# The chi squared table
self.chi_squared_table = None
# The parameter ranges
self.ranges = dict()
# Fixed initial parameters
self.fixed_initial_parameters = None
# The generation index and name
self.generation_index = None
self.generation_name = None
# The model generator
self.generator = None
# The simulation input
self.simulation_input = None
# Extra input for the model generator
self.scales = None
self.most_sampled_parameters = None
self.sampling_weights = None
# -----------------------------------------------------------------
@property
def needs_input(self):
"""
This function ...
:return:
"""
return self.fitting_run.needs_input
# -----------------------------------------------------------------
@property
def testing(self):
"""
This function ...
:return:
"""
return self.config.test
# -----------------------------------------------------------------
@property
def do_set_ranges(self):
"""
This function ...
:return:
"""
return not self.has_all_ranges
# -----------------------------------------------------------------
@property
def do_create_generation(self):
"""
This function ...
:return:
"""
return not self.testing
# -----------------------------------------------------------------
@property
def do_set_input(self):
"""
This function ...
:return:
"""
return self.needs_input
# -----------------------------------------------------------------
@property
def do_write(self):
"""
This function ...
:return:
"""
return not self.testing
# -----------------------------------------------------------------
@property
def do_plot(self):
"""
This function ...
:return:
"""
return self.config.plot
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# 2. Load the ski template
self.load_ski()
# 3. Set the parameter ranges
if self.do_set_ranges: self.set_ranges()
# 4. Set the generation info
self.set_info()
# 5. Create the generation
if self.do_create_generation: self.create_generation()
# 6. Generate the model parameters
self.generate_models()
# 7. Set the paths to the input files
if self.do_set_input: self.set_input()
# 8. Adjust the ski template
self.adjust_ski()
# 9. Fill the tables for the current generation
self.fill_tables()
# 10. Writing
if self.do_write: self.write()
# 11. Show stuff
self.show()
# 12. Plot
if self.do_plot: self.plot()
# 13. Launch the simulations for different parameter values
self.launch_or_finish()
# -----------------------------------------------------------------
@property
def uses_remotes(self):
"""
This function ...
:return:
"""
return self.launcher.uses_remotes
# -----------------------------------------------------------------
@property
def only_local(self):
"""
This function ...
:return:
"""
return self.launcher.only_local
# -----------------------------------------------------------------
@property
def parameter_labels(self):
"""
This function ...
:return:
"""
return self.fitting_run.free_parameter_labels
# -----------------------------------------------------------------
@property
def has_all_ranges(self):
"""
This function ...
:return:
"""
# Loop over the free parameter labels
for label in self.parameter_labels:
# If range is already defined
if label not in self.ranges: return False
# All ranges are defined
return True
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ParameterExplorer, self).setup(**kwargs)
# Run locally?
if self.config.local: self.config.remotes = []
# Load the fitting run
self.fitting_run = self.load_fitting_run(self.config.name)
# Get ranges
if "ranges" in kwargs: self.ranges = kwargs.pop("ranges")
# Get the initial parameter values
if "fixed_initial_parameters" in kwargs: self.fixed_initial_parameters = kwargs.pop("fixed_initial_parameters")
# Set options for the batch launcher
self.set_launcher_options()
# Check for restarting generations
if self.config.restart_from_generation is not None: self.clear_for_restart()
# Set the model generator
self.set_generator()
# Check whether this is not the first generation so that we can use remotes with a scheduling system
#if self.ngenerations == 0 and self.uses_schedulers:
# raise ValueError("The specified remote hosts cannot be used for the first generation: at least one remote uses a scheduling system")
# Check whether the wavelength grids table is present
if self.is_galaxy_modeling:
if not fs.is_file(self.fitting_run.wavelength_grids_table_path): raise RuntimeError("Call initialize_fit_galaxy before starting the parameter exploration")
# Get grid generation settings
self.scales = kwargs.pop("scales", None)
self.most_sampled_parameters = kwargs.pop("most_sampled_parameters", None)
self.sampling_weights = kwargs.pop("sampling_weigths", None)
# Deploy SKIRT
if self.has_host_ids and self.config.deploy: self.deploy()
# Initialize tables
self.initialize_generation_tables()
# -----------------------------------------------------------------
@property
def nprevious_generations(self):
"""
This function ...
:return:
"""
return self.fitting_run.ngenerations
# -----------------------------------------------------------------
@property
def previous_generation_names(self):
"""
This function ...
:return:
"""
return self.fitting_run.generation_names
# -----------------------------------------------------------------
@property
def last_previous_generation_name(self):
"""
This function ...
:return:
"""
return self.previous_generation_names[-1]
# -----------------------------------------------------------------
@lazyproperty
def last_previous_generation(self):
"""
This function ...
:return:
"""
return self.fitting_run.get_generation(self.last_previous_generation_name)
# -----------------------------------------------------------------
@property
def first_generation(self):
"""
This function ...
:return:
"""
return self.nprevious_generations == 0
# -----------------------------------------------------------------
@property
def nprevious_genetic_generations(self):
"""
This function ...
:return:
"""
return self.fitting_run.ngenetic_generations
# -----------------------------------------------------------------
@property
def previous_genetic_generation_names(self):
"""
This function ...
:return:
"""
return self.fitting_run.genetic_generations
# -----------------------------------------------------------------
@property
def first_genetic_generation(self):
"""
This function ...
:return:
"""
if not self.genetic_fitting: raise ValueError("Not a genetic generation")
else: return self.nprevious_generations == 0
# -----------------------------------------------------------------
def get_description(self, label):
"""
This function ...
:param label:
:return:
"""
return self.fitting_run.parameter_descriptions[label]
# -----------------------------------------------------------------
@property
def grid_settings(self):
"""
This function ...
:return:
"""
return self.fitting_run.grid_config
# -----------------------------------------------------------------
@property
def genetic_settings(self):
"""
This function ...
:return:
"""
return self.fitting_run.genetic_config
# -----------------------------------------------------------------
def get_default_npoints(self, label):
"""
This function ...
:param label:
:return:
"""
if self.grid_fitting: return self.grid_settings[label + "_npoints"]
else: return None
# -----------------------------------------------------------------
@lazyproperty
def npoints(self):
"""
This function ...
:return:
"""
# Prompt
if self.config.prompt_npoints:
# Check
if self.config.npoints_all is not None: raise ValueError("Npoints is specified already through 'npoints_all'")
# Prompt
npoints_dict = dict()
for label in self.parameter_labels: npoints_dict[label] = prompt_integer("npoints_" + label, "number of points for the " + self.get_description(label), default=self.get_default_npoints(label))
return npoints_dict
# Npoints all
elif self.config.npoints_all is not None:
npoints_dict = dict()
for label in self.parameter_labels: npoints_dict[label] = self.config.npoints_all
return npoints_dict
# Npoints dict
else: return self.config.npoints
# -----------------------------------------------------------------
def deploy(self):
"""
Thisf unction ...
:return:
"""
# Inform the user
log.info("Deploying SKIRT where necessary ...")
# Create the deployer
deployer = Deployer()
# Don't deploy PTS
deployer.config.skirt = True
deployer.config.pts = False
# Don't do anything locally
deployer.config.local = False
# Set the host ids
deployer.config.hosts = self.remote_host_ids
# Check versions between local and remote
deployer.config.check = self.config.check_versions
# Update PTS dependencies
deployer.config.update_dependencies = self.config.update_dependencies
# Do clean install
deployer.config.clean = self.config.deploy_clean
# Pubkey password
deployer.config.pubkey_password = self.config.pubkey_password
# Run the deployer
deployer.run()
# -----------------------------------------------------------------
@lazyproperty
def remote_host_ids(self):
"""
This function ...
:return:
"""
# Set remote host IDs
remote_host_ids = []
if self.fitting_run.ngenerations == 0:
for host_id in self.config.remotes:
if load_host(host_id).scheduler:
log.warning("Not using remote host '" + host_id + "' for the initial generation because it uses a scheduling system for launching jobs")
else: remote_host_ids.append(host_id)
else: remote_host_ids = self.config.remotes
# Return the host IDs
return remote_host_ids
# -----------------------------------------------------------------
@lazyproperty
def remote_hosts(self):
"""
This function ...
:return:
"""
return [load_host(host_id) for host_id in self.remote_host_ids]
# -----------------------------------------------------------------
@lazyproperty
def nhost_ids(self):
"""
This function ...
:return:
"""
return len(self.remote_host_ids)
# -----------------------------------------------------------------
@lazyproperty
def has_host_ids(self):
"""
This function ...
:return:
"""
return self.nhost_ids > 0
# -----------------------------------------------------------------
@property
def modeling_config(self):
"""
This function ...
:return:
"""
return self.environment.modeling_configuration
# -----------------------------------------------------------------
@property
def other_host_ids(self):
"""
This function ...
:return:
"""
if self.config.local_analysis: return []
elif self.modeling_config.host_ids is None: return []
else: return self.modeling_config.host_ids
# -----------------------------------------------------------------
@property
def nother_host_ids(self):
"""
This function ...
:return:
"""
return len(self.other_host_ids)
# -----------------------------------------------------------------
@property
def has_other_host_ids(self):
"""
This function ...
:return:
"""
return self.nother_host_ids > 0
# -----------------------------------------------------------------
@property
def other_host_id(self):
"""
This function ...
:return:
"""
if not self.has_other_host_ids: return None
else: return self.other_host_ids[0]
# -----------------------------------------------------------------
@property
def record_timing(self):
"""
This function ...
:return:
"""
if self.config.record_timing: return True
elif len(self.remote_host_ids) > 0:
log.warning("Record timing will be enabled because remote execution is used")
return True
else: return False
# -----------------------------------------------------------------
@property
def record_memory(self):
"""
This function ...
:return:
"""
if self.config.record_memory: return True
elif len(self.remote_host_ids) > 0:
log.warning("Record memory will be enabled because remote execution is used")
return True
else: return False
# -----------------------------------------------------------------
@property
def extract_timeline(self):
"""
This
:return:
"""
return self.record_timing or self.config.extract_timeline
# -----------------------------------------------------------------
@property
def extract_memory(self):
"""
This function ...
:return:
"""
return self.record_memory or self.config.extract_memory
# -----------------------------------------------------------------
@property
def reference_component_name(self):
"""
This function ...
:return:
"""
return self.representation.reference_deprojection_name
# -----------------------------------------------------------------
@property
def reference_map_path(self):
"""
This function ...
:return:
"""
return self.representation.reference_map_path
# -----------------------------------------------------------------
@lazyproperty
def reference_wcs(self):
"""
This function ...
:return:
"""
from ...magic.basics.coordinatesystem import CoordinateSystem
if self.reference_map_path is None: return None
else: return CoordinateSystem.from_file(self.reference_map_path)
# -----------------------------------------------------------------
@property
def reference_wcs_path(self):
"""
This function ...
:return:
"""
return self.reference_map_path
# -----------------------------------------------------------------
@property
def timing_table_path(self):
"""
This function ...
:return:
"""
return self.fitting_run.timing_table_path
# -----------------------------------------------------------------
@property
def memory_table_path(self):
"""
This function ...
:return:
"""
return self.fitting_run.memory_table_path
# -----------------------------------------------------------------
def set_launcher_options(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting options for the batch simulation launcher ...")
# Basic launcher options
self.set_basic_launcher_options()
# Simulation options
self.set_simulation_options()
# Analysis options
self.set_analysis_options()
# -----------------------------------------------------------------
def set_basic_launcher_options(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting basic launcher options ...")
# Write assignments and leftover queues (if not testing)
self.launcher.config.write = not self.testing
# Simulations have approximately the same requirements
self.launcher.config.same_requirements = True
# Basic options
self.launcher.config.shared_input = True # The input directories (or files) for the different simulations are shared
self.launcher.config.remotes = self.remote_host_ids # The remote host(s) on which to run the simulations
self.launcher.config.attached = self.config.attached # Run remote simulations in attached mode
self.launcher.config.group_simulations = self.config.group # Group multiple simulations into a single job (because a very large number of simulations will be scheduled) TODO: IMPLEMENT THIS
self.launcher.config.group_walltime = self.config.group_walltime # The preferred walltime for jobs of a group of simulations
self.launcher.config.cores_per_process = self.config.cores_per_process # The number of cores per process, for non-schedulers
self.launcher.config.dry = self.config.dry # Dry run (don't actually launch simulations, but allow them to be launched manually)
self.launcher.config.progress_bar = True # show progress bars for local execution
self.launcher.config.keep = self.config.keep # keep remote input and output
self.launcher.config.attached = self.config.attached # run SKIRT in attached mode
self.launcher.config.show_progress = self.config.show_progress
# Memory and timing table paths (for recording and for estimating)
self.launcher.config.timing_table_path = self.timing_table_path # The path to the timing table file
self.launcher.config.memory_table_path = self.memory_table_path # The path to the memory table file
# Record timing and memory?
self.launcher.config.add_timing = self.record_timing
self.launcher.config.add_memory = self.record_memory
# Set runtimes plot path
self.launcher.config.runtimes_plot_path = self.visualisation_path
# Advanced parallelization options
self.launcher.config.all_sockets = self.config.all_sockets
self.launcher.config.nsockets = self.config.nsockets
self.launcher.config.allow_multisocket_processes = self.config.allow_multisocket_processes
# -----------------------------------------------------------------
@property
def nremotes(self):
"""
This function ...
:return:
"""
return len(self.config.remotes)
# -----------------------------------------------------------------
@property
def has_single_remote(self):
"""
This function ...
:return:
"""
#return self.launcher.has_single_remote
return self.nremotes == 1
# -----------------------------------------------------------------
@property
def single_host_id(self):
"""
Thisn function ...
:return:
"""
if not self.has_single_remote: raise RuntimeError("Not a single remote host")
return self.config.remotes[0]
# -----------------------------------------------------------------
def set_simulation_options(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting simulation options ...")
## General
self.launcher.config.relative = True
## Logging
self.launcher.config.logging.verbose = True
self.launcher.config.logging.memory = True
# Set number of local processes
self.launcher.set_nprocesses_local(self.config.nprocesses_local)
# Set number of remote processes
if self.config.nprocesses_remote is not None:
if not self.has_single_remote: raise ValueError("Cannot specify number of remote processes when using more than one remote host")
self.launcher.set_nprocesses_for_host(self.single_host_id, self.config.nprocesses_remote)
# Set data parallel flag for local execution
self.launcher.set_data_parallel_local(self.config.data_parallel_local)
# Set data parallel flag for remote execution
if self.config.data_parallel_remote is not None:
if not self.has_single_remote: raise ValueError("Cannot set data parallelization flag when using more than one remote host")
self.launcher.set_data_parallel_for_host(self.single_host_id, self.config.data_parallel_remote)
# -----------------------------------------------------------------
def set_analysis_options(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting analysis options ...")
# General analysis options
self.set_general_analysis_options()
# Extraction options
self.set_extraction_options()
# Plotting options
self.set_plotting_options()
# Misc options
self.set_misc_options()
# -----------------------------------------------------------------
def set_general_analysis_options(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting general analysis options ...")
# To create the extr, plot, misc directories relative in the simulation directory
self.launcher.config.relative_analysis_paths = True
# Set the path to the modeling directory
self.launcher.config.analysis.modeling_path = self.config.path
# Set analyser classes
if self.is_images_modeling: self.launcher.config.analysers = ["pts.modeling.fitting.modelanalyser.ImagesFitModelAnalyser"]
else: self.launcher.config.analysers = ["pts.modeling.fitting.modelanalyser.SEDFitModelAnalyser"]
# -----------------------------------------------------------------
def set_extraction_options(self):
"""
This function ....
:return:
"""
# Debugging
log.debug("Setting extraction options ...")
# Extraction
self.launcher.config.analysis.extraction.path = "extr" # name of the extraction directory
self.launcher.config.analysis.extraction.progress = self.config.extract_progress # extract progress information
self.launcher.config.analysis.extraction.timeline = self.extract_timeline # extract the simulation timeline
self.launcher.config.analysis.extraction.memory = self.extract_memory # extract memory information
# -----------------------------------------------------------------
@property
def truncated_sed_path(self):
"""
This function ...
:return:
"""
return self.environment.truncated_sed_path
# -----------------------------------------------------------------
@property
def observed_sed_paths(self):
"""
This function ...
:return:
"""
if self.is_galaxy_modeling: return [self.observed_sed_path, self.truncated_sed_path]
elif self.is_sed_modeling: return [self.observed_sed_path]
else: raise ValueError("Observed SED not defined for modeling types other than 'galaxy' or 'sed'")
# -----------------------------------------------------------------
def set_plotting_options(self):
"""
Thisn function ...
:return:
"""
# Debugging
log.debug("Setting plotting options ...")
# Plotting
self.launcher.config.analysis.plotting.path = "plot" # name of the plot directory
self.launcher.config.analysis.plotting.seds = self.config.plot_seds # Plot the output SEDs
self.launcher.config.analysis.plotting.reference_seds = self.observed_sed_paths # the path to the reference SED (for plotting the simulated SED against the reference points)
self.launcher.config.analysis.plotting.format = self.config.plotting_format # plot format
self.launcher.config.analysis.plotting.progress = self.config.plot_progress
self.launcher.config.analysis.plotting.timeline = self.config.plot_timeline
self.launcher.config.analysis.plotting.memory = self.config.plot_memory
# -----------------------------------------------------------------
@lazyproperty
def photometry_image_paths_for_fitting_filter_names(self):
"""
This function ...
:return:
"""
# Create new dictionary
paths = OrderedDict()
# Loop over the paths
for filter_name in self.environment.photometry_image_paths_for_filter_names:
# Skip filters that are not in the fitting filters list
if filter_name not in self.fitting_filter_names: continue
# Add the path
path = self.environment.photometry_image_paths_for_filter_names[filter_name]
paths[filter_name] = path
# Return
return paths
# -----------------------------------------------------------------
@lazyproperty
def fit_sed_path(self):
"""
This function ...
:return:
"""
if self.config.fit_not_clipped: return self.truncated_sed_path
else: return self.observed_sed_path
# -----------------------------------------------------------------
@lazyproperty
def plot_sed_paths(self):
"""
This function ...
:return:
"""
# Initialize dictionary
paths = OrderedDict()
# Set the SED paths
paths["Observed clipped fluxes"] = self.observed_sed_path
paths["Observed truncated fluxes"] = self.truncated_sed_path
# Return the dictionary
return paths
# -----------------------------------------------------------------
def set_misc_options(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting miscellaneous analysis options ...")
# Miscellaneous
self.launcher.config.analysis.misc.path = "misc" # name of the misc output directory
# From images
if self.use_images:
self.launcher.config.analysis.misc.fluxes_from_images = True # calculate observed fluxes from images
self.launcher.config.analysis.misc.fluxes = False
self.launcher.config.analysis.misc.images = False
# Set instrument and coordinate system path
self.launcher.config.analysis.misc.fluxes_from_images_instrument = self.earth_instrument_name
self.launcher.config.analysis.misc.fluxes_from_images_wcs = self.reference_wcs_path
# Set mask paths
self.launcher.config.analysis.misc.fluxes_from_images_masks = self.photometry_image_paths_for_fitting_filter_names
self.launcher.config.analysis.misc.fluxes_from_images_mask_from_nans = True
# Write the fluxes images
self.launcher.config.analysis.misc.write_fluxes_images = True
# Plot fluxes
self.launcher.config.analysis.misc.plot_fluxes_from_images = True
self.launcher.config.analysis.misc.plot_fluxes_from_images_reference_seds = self.plot_sed_paths #self.fit_sed_path
# Set remote for creating images from datacubes
self.launcher.config.analysis.misc.fluxes_from_images_remote = self.other_host_id
self.launcher.config.analysis.misc.fluxes_from_images_remote_spectral_convolution = True
#self.launcher.config.analysis.misc.fluxes_from_images_remote_threshold =
#self.launcher.config.analysis.misc.fluxes_from_images_remote_npixels_threshold =
#self.launcher.config.analysis.misc.fluxes_from_images_rebin_remote_threshold =
# Make a plot of the images
self.launcher.config.analysis.misc.plot_fluxes_images = True
# From SEDs
else:
self.launcher.config.analysis.misc.fluxes_from_images = False
self.launcher.config.analysis.misc.fluxes = True # calculate observed fluxes from SEDs
self.launcher.config.analysis.misc.images = False
# Plot fluxes
self.launcher.config.analysis.misc.plot_fluxes = True
self.launcher.config.analysis.misc.plot_fluxes_reference_seds = self.plot_sed_paths #self.fit_sed_path
# Observation filters and observation instruments
self.launcher.config.analysis.misc.observation_filters = self.fitting_filter_names
self.launcher.config.analysis.misc.observation_instruments = [self.earth_instrument_name]
# Set spectral convolution flag
self.launcher.config.analysis.misc.fluxes_spectral_convolution = self.spectral_convolution
self.launcher.config.analysis.misc.fluxes_from_images_spectral_convolution = self.spectral_convolution
# -----------------------------------------------------------------
def get_initial_generation_name(self):
"""
This function ...
:return:
"""
return self.fitting_run.get_initial_generation_name()
# -----------------------------------------------------------------
def get_genetic_generation_name(self, index):
"""
This function ...
:param index:
:return:
"""
return self.fitting_run.get_genetic_generation_name(index)
# -----------------------------------------------------------------
def clear_for_restart(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Clearing things for restarting from generation '" + self.config.restart_from_generation + "' ...")
# Get the gneration names
to_clear = self.get_to_clear_generation_names()
# Get the generations table
generations_table = self.fitting_run.generations_table
best_parameters_table = self.fitting_run.best_parameters_table
# Get the names of the original genetic generations
#original_genetic_generation_names = generations_table.genetic_generations_with_initial
original_genetic_generations_with_initial_names_and_indices = generations_table.genetic_generations_with_initial_names_and_indices
# Keep track of the lowest genetic generation index
lowest_genetic_generation_index = None
removed_initial = False
to_be_removed_paths = []
# Loop over the generations to be cleared
for generation_name in to_clear:
# Prompt to proceed
if prompt_proceed("Are you absolutely sure all output from generation '" + generation_name + "' can be removed?"):
# Update the lowest genetic generation index
if generation_name.startswith("Generation"):
index = self.fitting_run.index_for_generation(generation_name)
if lowest_genetic_generation_index is None or index < lowest_genetic_generation_index: lowest_genetic_generation_index = index
if generation_name == "initial": removed_initial = True
# Remove from generations table
generations_table.remove_entry(generation_name)
# Remove from best_parameters table
best_parameters_table.remove_entry(generation_name)
# Remove from prob/generations
prob_generations_path = fs.create_directory_in(self.fitting_run.prob_path, "generations")
prob_generation_path = fs.join(prob_generations_path, generation_name + ".dat")
#fs.remove_file(prob_generation_path)
to_be_removed_paths.append(prob_generation_path)
# Remove directory from generations/
generation_directory_path = self.fitting_run.get_generation_path(generation_name)
#fs.remove_directory(generation_directory_path)
to_be_removed_paths.append(generation_directory_path)
# User doesn't want to proceed
else:
# Exit with an error
log.error("Cannot proceed without confirmation")
exit()
# IF GENETIC GENERATIONS ARE CLEARED, REPLACE THE MAIN ENGINE, MAIN PRNG AND MAIN OPTIMIZER.CFG
if removed_initial:
# Remove
fs.remove_file(self.fitting_run.main_engine_path)
fs.remove_file(self.fitting_run.main_prng_path)
fs.remove_file(self.fitting_run.optimizer_config_path)
# Some genetic generations are cleared, starting with some lowest genetic generation index
elif lowest_genetic_generation_index is not None:
# Search for the last remaining generation
last_remaining_generation = None
# Determine name of generation just before this index
for other_name, other_index in original_genetic_generations_with_initial_names_and_indices:
if other_index == lowest_genetic_generation_index - 1:
last_remaining_generation = other_name
break
if last_remaining_generation: raise RuntimeError("Something went wrong")
# Determine the path of this generation
generation = self.fitting_run.get_generation(last_remaining_generation)
# Determine the paths of the engine, prng and optimizer config
engine_path = generation.engine_path
prng_path = generation.prng_path
optimizer_config_path = generation.optimizer_config_path
# Replace the main engine, prng and optimizer config
fs.replace_file(self.fitting_run.main_engine_path, engine_path)
fs.replace_file(self.fitting_run.main_prng_path, prng_path)
fs.replace_file(self.fitting_run.optimizer_config_path, optimizer_config_path)
# Remove everything belonging the cleared generations
fs.remove_directories_and_files(to_be_removed_paths)
# Save the generations table
generations_table.save()
# Save the best parameters table
best_parameters_table.save()
# -----------------------------------------------------------------
def get_to_clear_generation_names(self):
"""
This function ...
:return:
"""
generation_name = self.config.restart_from_generation
# Check whether the generation exists
if generation_name not in self.fitting_run.generation_names: raise ValueError("Generation '" + generation_name + "' does not exist")
# Generation names to clear
to_clear = []
# Grid-type generation
if "grid" in generation_name:
# Add to be cleared
to_clear.append(generation_name)
# Get the timestamp
generation_time = time.get_time_from_unique_name(generation_name)
# Loop over other 'grid' generations
for other_generation_name in self.fitting_run.grid_generations:
if other_generation_name == generation_name: continue
# Get time
other_generation_time = time.get_time_from_unique_name(other_generation_name)
# If the time is later, add to generation names to be cleared
if other_generation_time > generation_time: to_clear.append(generation_name)
# Initial genetic generation
elif generation_name == self.get_initial_generation_name():
# All genetic generations have to be cleared
to_clear = self.fitting_run.genetic_generations
# Other genetic generation
elif generation_name.startswith("Generation"):
# Add to be cleared
to_clear.append(generation_name)
# Get the index of the generation
index = self.fitting_run.index_for_generation(generation_name)
# Loop over the other genetic generations
for other_generation_name in self.fitting_run.genetic_generations:
if other_generation_name == generation_name: continue
# Get index of other
other_index = self.fitting_run.index_for_generation(other_generation_name)
# If the index is higher, add to be cleared
if other_index > index: to_clear.append(other_generation_name)
# Could not understand
else: raise ValueError("Could not understand the nature of generation '" + generation_name + "'")
# Retunr the list of generation names to clear
return to_clear
# -----------------------------------------------------------------
@property
def grid_fitting(self):
"""
This function ...
:return:
"""
return self.config.generation_method == "grid"
# -----------------------------------------------------------------
@property
def genetic_fitting(self):
"""
This function ...
:return:
"""
return self.config.generation_method == "genetic"
# -----------------------------------------------------------------
def set_generator(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the model generator ...")
# Generate new models based on a simple grid (linear or logarithmic) of parameter values
if self.grid_fitting: self.set_grid_generator()
# Generate new models using genetic algorithms
elif self.genetic_fitting: self.set_genetic_generator()
# Invalid generation method
else: raise ValueError("Invalid generation method: " + str(self.config.generation_method))
# Set general options for the model generator
self.set_generator_options()
# Debugging
log.debug("The name of the new generation of parameter exploration is '" + self.generation_name + "'")
# -----------------------------------------------------------------
def set_grid_generator(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Setting grid model generator ...")
# Set a name for the generation
#self.generation_name = time.unique_name("grid")
# Determine grid generation index
highest_index = self.fitting_run.highest_grid_generation_index
if highest_index is None: generation_index = 0
else: generation_index = highest_index + 1
# Set generation name
self.generation_name = "grid_" + str(generation_index)
# Create the model generator
self.generator = GridModelGenerator()
# -----------------------------------------------------------------
def set_genetic_generator(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Setting genetic model generator ...")
# Not the initial generation
if self.get_initial_generation_name() in self.fitting_run.generation_names:
# Set index and name
self.generation_index = self.fitting_run.last_genetic_generation_index + 1
self.generation_name = self.get_genetic_generation_name(self.generation_index)
# Initial generation
else: self.generation_name = self.get_initial_generation_name()
# Create the generator
self.generator = GeneticModelGenerator()
# Set recurrence settings
self.generator.config.check_recurrence = self.config.check_recurrence
self.generator.config.recurrence_rtol = self.config.recurrence_rtol
self.generator.config.recurrence_atol = self.config.recurrence_atol
# -----------------------------------------------------------------
def set_generator_options(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting general model generator options ...")
# Set the modeling path for the model generator
self.generator.config.path = self.config.path
# Set generator options
self.generator.config.ngenerations = self.config.ngenerations # only useful for genetic model generator (and then again, cannot be more then 1 yet)
self.generator.config.nmodels = self.config.nsimulations
# -----------------------------------------------------------------
def load_ski(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Loading the ski file template ...")
# Load the labeled ski template file
self.ski = self.fitting_run.ski_template
# -----------------------------------------------------------------
def set_ranges(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the parameter ranges ...")
# Automatic: based on previous generation(s)
if self.config.auto_ranges and not self.first_generation: self.determine_ranges()
# Prompt
elif self.config.prompt_ranges: self.prompt_ranges()
# Default
else: self.set_default_ranges()
# -----------------------------------------------------------------
def determine_ranges(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Determining the parameter ranges automatically ...")
# Loop over the free parameters
for label in self.parameter_labels:
# If range is already defined
if label in self.ranges: continue
# Get the parameter distribution
if self.fitting_run.has_distribution(label): distribution = self.fitting_run.get_parameter_distribution(label)
else:
log.warning("Global parameter distribution for the '" + label + "' parameter not found: using parameter distribution for generation '" + self.last_previous_generation_name + "' ...")
distribution = self.fitting_run.get_parameter_distribution_for_generation(label, self.last_previous_generation_name)
# Get the parameter unit
unit = self.fitting_run.parameter_units[label]
# Get leading values
values, min_value, max_value, total_fraction = distribution.get_leading_values_and_edges(self.config.range_probability, logscale=True, return_fraction=True)
# Add units if necessary
if not hasattr(min_value, "unit"): min_value = min_value * unit
if not hasattr(max_value, "unit"): max_value = max_value * unit
# Debugging
log.debug("")
log.debug("Parameter '" + label + "':")
log.debug(" - Most probable value: " + tostr(distribution.most_frequent))
log.debug(" - Least probable value: " + tostr(distribution.least_frequent))
log.debug(" - Number of unique values: " + tostr(distribution.nvalues))
log.debug(" - Value(s) with (combined) >= " + str(self.config.range_probability * 100) + "% of the probablity: " + tostr(values))
log.debug(" - Combined probability: " + tostr(total_fraction * 100, round=True, ndigits=5) + "%")
log.debug(" - Old minimum value: " + tostr(distribution.min_value))
log.debug(" - Old maximum value: " + tostr(distribution.max_value))
log.debug(" - New minimum value: " + tostr(min_value))
log.debug(" - New maximum value: " + tostr(max_value))
# Set the range for this parameter
self.ranges[label] = QuantityRange(min_value, max_value)
# One more line
log.debug("")
# -----------------------------------------------------------------
def prompt_ranges(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Prompting for the parameter ranges ...")
# Create definition
definition = self.create_parameter_ranges_definition()
# Get the ranges
if len(definition) > 0: config = create_configuration_interactive(definition, "ranges", "parameter ranges", add_cwd=False, add_logging=False, prompt_optional=True)
# No parameters for which the ranges still have to be specified interactively
else: config = None
# Set the ranges
for label in self.parameter_labels:
# If range is already defined
if label in self.ranges: continue
# Set the range
self.ranges[label] = config[label + "_range"]
# -----------------------------------------------------------------
def set_default_ranges(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Using the default parameter ranges for each parameter ...")
# Loop over the free parameters, add a setting slot for each parameter range
for label in self.parameter_labels:
# Skip if range is already defined for this label
if label in self.ranges: continue
# Get the default range
default_range = self.fitting_run.fitting_configuration[label + "_range"]
# Set the range
self.ranges[label] = default_range
# -----------------------------------------------------------------
def create_parameter_ranges_definition(self):
"""
This function ...
:return:
"""
# Create a definition
definition = ConfigurationDefinition(write_config=False)
# Create info
extra_info = self.create_parameter_ranges_info()
# Loop over the free parameters, add a setting slot for each parameter range
for label in self.parameter_labels:
# Skip if range is already defined for this label
if label in self.ranges: continue
# Get the default range
default_range = self.fitting_run.fitting_configuration[label + "_range"]
ptype, string = stringify_not_list(default_range)
# Determine description
description = "the range of " + label
description += " (" + extra_info[label] + ")"
# Add the optional range setting for this free parameter
definition.add_optional(label + "_range", ptype, description, default_range)
# Return the definition
return definition
# -----------------------------------------------------------------
def create_parameter_ranges_info(self):
"""
This function ...
:return:
"""
extra_info = dict()
# Check if there are any models that have been evaluated
if self.fitting_run.has_evaluated_models:
# Inform the user
# log.info("Determining the parameter ranges based on the current best values and the specified relative ranges ...")
# Get the best model
model = self.fitting_run.best_model
# Debugging
# log.debug("Using the parameter values of simulation '" + model.simulation_name + "' of generation '" + model.generation_name + "' ...")
# Get the parameter values of the best model
parameter_values = model.parameter_values
# Set info
for label in parameter_values: extra_info[label] = "parameter value of current best model = " + stringify(parameter_values[label])[1]
else:
# Inform the user
#log.info("Determining the parameter ranges based on the first guess values and the specified relative ranges ...")
# Get the initial guess values
parameter_values = self.fitting_run.first_guess_parameter_values
# Set info
for label in parameter_values: extra_info[label] = "initial parameter value = " + stringify(parameter_values[label])[1]
# Return the info
return extra_info
# -----------------------------------------------------------------
def generate_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Generating the model parameters ...")
# Run the model generator
self.generator.run(fitting_run=self.fitting_run, parameter_ranges=self.ranges,
fixed_initial_parameters=self.fixed_initial_parameters, generation=self.generation,
scales=self.parameter_scales, most_sampled_parameters=self.most_sampled_parameters,
sampling_weights=self.sampling_weights, npoints=self.npoints)
# Set the actual number of simulations for this generation
self.generation_info.nsimulations = self.nmodels
# -----------------------------------------------------------------
@property
def parameter_scales(self):
"""
This function ...
:return:
"""
scales = dict()
# Loop over the free parameter
for label in self.fitting_run.free_parameter_labels:
# Check whether scales were given as input
if self.scales is not None and label in self.scales: scales[label] = self.scales[label]
#elif self.config.scales is not None and label in self.config.scales: scales[label] = self.config.scales[label]
#else: #raise ValueError("Scale was not set for '" + label + "'")
# # Take from grid fitting configuration
# scales[label] = self.fitting_run.grid_settings[label + "_scale"]
else: scales[label] = self.fitting_run.grid_settings[label + "_scale"]
# Return the scales
return scales
# -----------------------------------------------------------------
@lazyproperty
def selfabsorption(self):
"""
This function ...
:return:
"""
# Determine whether selfabsorption should be enabled
if self.config.selfabsorption is not None: return self.config.selfabsorption
else: return self.fitting_run.current_selfabsorption
# -----------------------------------------------------------------
@lazyproperty
def transient_heating(self):
"""
This function ...
:return:
"""
# Determine whether transient heating should be enabled
if self.config.transient_heating is not None: return self.config.transient_heating
else: return self.fitting_run.current_transient_heating
# -----------------------------------------------------------------
@lazyproperty
def spectral_convolution(self):
"""
This function ...
:return:
"""
if self.config.spectral_convolution is not None: return self.config.spectral_convolution
else: return self.fitting_run.current_spectral_convolution
# -----------------------------------------------------------------
@lazyproperty
def use_images(self):
"""
This function ...
:return:
"""
if self.config.use_images is not None: return self.config.use_images
else: return self.fitting_run.current_use_images
# -----------------------------------------------------------------
@lazyproperty
def npackages(self):
"""
This function ...
:return:
"""
# Defined?
if self.config.npackages is not None: npackages = self.config.npackages
else:
# Determine the number of photon packages from previous number
if self.config.increase_npackages: npackages = int(self.fitting_run.current_npackages * self.config.npackages_factor)
else: npackages = self.fitting_run.current_npackages
# Check
if npackages < self.ndust_cells:
if self.config.adjust_npackages:
log.debug("Adjusting the number of photon packages from " + str(npackages) + " to the number of dust cells (" + str(self.ndust_cells) + ") ...")
npackages = int(self.ndust_cells * self.config.ncells_npackages_factor)
else: log.warning("The number of photon packages (" + str(npackages) + ") is less than the number of dust cells (" + str(self.ndust_cells) + ")")
# Return the number of photon packages
return npackages
# -----------------------------------------------------------------
@lazyproperty
def representation(self):
"""
This function ...
:return:
"""
# DETERMINE THE REPRESENTATION
if self.config.refine_spatial: return self.fitting_run.next_model_representation # GET NEXT REPRESENTATION (THEY ARE NAMED IN ORDER OF SPATIAL RESOLUTION)
# Get the previous (current because this generation is just
else: return self.fitting_run.current_model_representation # GET LAST REPRESENTATION #self.fitting_run.initial_representation
# -----------------------------------------------------------------
@property
def representation_name(self):
"""
This function ...
:return:
"""
return self.representation.name
# -----------------------------------------------------------------
@property
def ndust_cells(self):
"""
This function ...
:return:
"""
return self.representation.ndust_cells
# -----------------------------------------------------------------
@property
def wavelength_grids_path(self):
"""
This function ...
:return:
"""
return self.fitting_run.wavelength_grids_path
# -----------------------------------------------------------------
@property
def highres_wavelength_grid(self):
"""
This function ...
:return:
"""
if self.config.highres is not None: return self.config.highres
else: return self.fitting_run.is_highres_current_wavelength_grid
# -----------------------------------------------------------------
@lazyproperty
def wavelength_grid_name(self):
"""
This function ...
:return:
"""
# Use high-resolution grids
if self.highres_wavelength_grid:
# Target number of wavelengths is defined
if self.config.nwavelengths is not None: return "highres_" + str(self.config.nwavelengths)
# Refine from previous high-resolution grid?
elif self.fitting_run.is_highres_current_wavelength_grid:
if self.config.refine_spectral: return self.fitting_run.next_wavelength_grid_name
else: return self.fitting_run.current_wavelength_grid_name
# Get lowest npoints wavelength grid of high-resolution grids
else:
if self.config.refine_spectral: log.warning("Not refining more: previous generation did not use high-resolution wavelength grid")
return self.fitting_run.lowest_highres_wavelength_grid_name
# Spectral convolution: use refined grids
elif self.spectral_convolution:
# Target number of wavelengths is defined
if self.config.nwavelengths is not None: return "refined_" + str(self.config.nwavelengths)
# Refine from previous refined grid?
if self.fitting_run.is_refined_current_wavelength_grid:
if self.config.refine_spectral: return self.fitting_run.next_wavelength_grid_name
else: return self.fitting_run.current_wavelength_grid_name
# Get lowest npoints wavelength grid of refined grids
else:
if self.config.refine_spectral: log.warning("Not refining more: previous generation did not use refined wavelength grid")
return self.fitting_run.lowest_refined_wavelength_grid_name
# Basic grids
else:
# Target number of wavelengths is defined
if self.config.nwavelengths is not None: return "basic_" + str(self.config.nwavelengths)
# Refine from previous basic grid?
if self.fitting_run.is_basic_current_wavelength_grid:
if self.config.refine_spectral: return self.fitting_run.next_wavelength_grid_name
else: return self.fitting_run.current_wavelength_grid_name
# Get lowest npoints wavelength grid of basic grids
else:
if self.config.refine_spectral: raise ValueError("Cannot refine: use 'nwavelengths' to define a specific wavelength grid (and 'highres' to control whether to use high-resolution grid)")
return self.fitting_run.lowest_basic_wavelength_grid_name
# -----------------------------------------------------------------
@property
def wavelength_grid_filename(self):
"""
This function ...
:return:
"""
return self.wavelength_grid_name + ".dat"
# -----------------------------------------------------------------
@lazyproperty
def wavelength_grid_path(self):
"""
This function ...
:return:
"""
return fs.join(self.wavelength_grids_path, self.wavelength_grid_filename)
# -----------------------------------------------------------------
@lazyproperty
def wavelength_grid(self):
"""
This function ...
:return:
"""
return WavelengthGrid.from_skirt_input(self.wavelength_grid_path)
# -----------------------------------------------------------------
@property
def nwavelengths(self):
"""
This function ...
:return:
"""
return len(self.wavelength_grid)
# -----------------------------------------------------------------
@lazyproperty
def fit_not_clipped(self):
"""
This function ...
:return:
"""
if self.use_images and self.config.fit_not_clipped: raise ValueError("Cannot fit to non-clipped fluxes when using images for calculating observed fluxes (clip masks are used)")
else: return self.config.fit_not_clipped
# -----------------------------------------------------------------
def set_info(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the generation info ...")
# Set the generation info
self.generation_info.name = self.generation_name
self.generation_info.index = self.generation_index
self.generation_info.method = self.config.generation_method
# Wavelength grid and representation
self.generation_info.wavelength_grid_name = self.wavelength_grid_name
self.generation_info.model_representation_name = self.representation_name
# DON'T DO IT HERE YET, GET THE NUMBER OF ACTUAL MODELS SPITTED OUT BY THE MODELGENERATOR (RECURRENCE)
#self.generation.nsimulations = self.config.nsimulations
# Set number of photon packages
self.generation_info.npackages = self.npackages
# Simulation options
self.generation_info.selfabsorption = self.selfabsorption
self.generation_info.transient_heating = self.transient_heating
self.generation_info.spectral_convolution = self.spectral_convolution
self.generation_info.use_images = self.use_images
# Fit options
self.generation_info.fit_not_clipped = self.fit_not_clipped
# -----------------------------------------------------------------
@lazyproperty
def use_file_tree_dust_grid(self):
"""
This function ...
:return:
"""
smile = SKIRTSmileSchema()
return smile.supports_file_tree_grids and self.representation.has_dust_grid_tree
# -----------------------------------------------------------------
def set_input(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the input paths ...")
# Initialize the SimulationInput object
self.simulation_input = SimulationInput()
# Set the paths to the input maps
for name in self.fitting_run.input_map_paths:
path = self.fitting_run.input_map_paths[name]
self.simulation_input.add_file(path, name)
# DETERMINE AND SET THE PATH TO THE APPROPRIATE DUST GRID TREE FILE
if self.use_file_tree_dust_grid: self.simulation_input.add_file(self.representation.dust_grid_tree_path)
# Determine and set the path to the appropriate wavelength grid file
self.simulation_input.add_file(self.wavelength_grid_path)
# Debugging
log.debug("The wavelength grid for the simulations contains " + str(self.nwavelengths) + " wavelength points")
# -----------------------------------------------------------------
def create_generation(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the generation directory")
# Set the path to the generation directory
self.generation_info.path = fs.create_directory_in(self.fitting_run.generations_path, self.generation_name)
# Create the generation object
self.generation = Generation(self.generation_info)
# Set working directory for the batch launcher
self.launcher.config.path = self.generation_path
# -----------------------------------------------------------------
def initialize_generation_tables(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Initializing generation tables ...")
# Initialize the individuals table
self.individuals_table = IndividualsTable()
# Initialize the parameters table
self.parameters_table = ParametersTable(parameters=self.parameter_labels, units=self.fitting_run.parameter_units)
# Initialize the chi squared table
self.chi_squared_table = ChiSquaredTable()
# -----------------------------------------------------------------
@lazyproperty
def earth_instrument(self):
"""
This function ...
:return:
"""
if self.use_images: return self.representation.simple_instrument
else: return self.representation.sed_instrument
# -----------------------------------------------------------------
@lazyproperty
def instruments(self):
"""
This function ...
:return:
"""
instrs = dict()
instrs[self.earth_instrument_name] = self.earth_instrument
return instrs
# -----------------------------------------------------------------
def adjust_ski(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Adjusting the ski template for the properties of this generation ...")
# Set packages
self.set_npackages()
# Set self-absoprtion
self.set_selfabsorption()
# Set transient heating
self.set_transient_heating()
# Set wavelength grid
if self.fitting_run.has_wavelength_grids: self.set_wavelength_grid()
# Set model representation
self.set_representation()
# Set instruments
self.set_instruments()
# -----------------------------------------------------------------
def set_npackages(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting the number of photon packages to " + str(self.generation_info.npackages) + " ...")
# Set the number of photon packages per wavelength
self.ski.setpackages(self.generation_info.npackages)
# -----------------------------------------------------------------
def set_selfabsorption(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Enabling dust self-absorption ..." if self.generation_info.selfabsorption else "Disabling dust self-absorption ...")
# Set dust self-absorption
if self.generation_info.selfabsorption: self.ski.enable_selfabsorption()
else: self.ski.disable_selfabsorption()
# -----------------------------------------------------------------
def set_transient_heating(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Enabling transient heating ..." if self.generation_info.transient_heating else "Disabling transient heating ...")
# Set transient heating
if self.generation_info.transient_heating: self.ski.set_transient_dust_emissivity()
else: self.ski.set_grey_body_dust_emissivity()
# -----------------------------------------------------------------
def set_wavelength_grid(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting the name of the wavelengths file to '" + self.wavelength_grid_filename + "' ...")
# Set the name of the wavelength grid file
self.ski.set_file_wavelength_grid(self.wavelength_grid_filename)
# -----------------------------------------------------------------
def set_representation(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting the model representation ...")
# GET DUST GRID
if self.use_file_tree_dust_grid:
# Get the file tree dust grid object
dust_grid = self.representation.create_file_tree_dust_grid(write=False)
# Make sure it is only the file name, not a complete path
dust_grid.filename = fs.name(dust_grid.filename)
# REGULAR DUST GRID OBJECT
else: dust_grid = self.representation.dust_grid
# Set the dust grid
self.ski.set_dust_grid(dust_grid)
# -----------------------------------------------------------------
def set_instruments(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Setting the instruments ...")
# Remove the existing instruments
self.ski.remove_all_instruments()
# Add the instrument
self.ski.add_instrument(self.earth_instrument_name, self.earth_instrument)
# -----------------------------------------------------------------
def launch_or_finish(self):
"""
This function ...
:return:
"""
# Test whether simulations are required, because if the optimizer detects recurrence of earlier models,
# it is possible that no simulations have to be done
# Launch simulations
if self.needs_simulations: self.launch()
# No simulations need to be launched
else: self.set_finishing_time()
# -----------------------------------------------------------------
def launch(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Launching the simulations ...")
# Only if not testing, because otherwise generation path does not exist
if not self.testing:
# Set the paths to the directories to contain the launch scripts (job scripts) for the different remote hosts
# Just use the directory created for the generation
for host_id in self.launcher.host_ids: self.launcher.set_script_path(host_id, self.generation_info.path)
# Enable screen output logging for remotes without a scheduling system for jobs
for host_id in self.launcher.no_scheduler_host_ids: self.launcher.enable_screen_output(host_id)
# Loop over the simulations, add them to the queue
for simulation_name in self.simulation_names:
# Get the parameter values
parameter_values = self.parameters_table.parameter_values_for_simulation(simulation_name)
# Prepare simulation directories, ski file, and return the simulation definition
if not self.testing:
definition = prepare_simulation(simulation_name, self.ski, parameter_values, self.object_name,
self.simulation_input, self.generation_info.path, scientific=True, fancy=True,
ndigits=self.fitting_run.ndigits_dict)
else: definition = make_test_definition(simulation_name, self.ski, parameter_values, self.object_name,
self.simulation_input, scientific=True, fancy=True, ndigits=self.fitting_run.ndigits_dict)
# Put the parameters in the queue and get the simulation object
self.launcher.add_to_queue(definition, simulation_name)
# Set the TEST flag if testing
self.launcher.config.test = self.testing
# Run the launcher, launches the simulations and retrieves and analyses finished simulations
try: self.launcher.run(ncells=self.ndust_cells)
except Exception as e:
# Raise the exception again if we are just testing
if self.testing: raise e
# Something went wrong launching the simulations, show error message
log.error("No simulations could be launched: removing generation ...")
log.error(str(e))
if log.is_debug: traceback.print_exc()
log.error("Try again later")
log.error("Cleaning up generation and quitting ...")
# Remove this generation from the generations table
self.fitting_run.generations_table.remove_entry(self.generation_name)
self.fitting_run.generations_table.save()
# Remove the generation directory
fs.remove_directory(self.generation_path)
# Check the launched simulations
if not self.testing: self.check_simulations()
# Save the succesful simulation files in their own directories
self.save_simulations()
# -----------------------------------------------------------------
def set_finishing_time(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Setting the generation finishing time (there were no simulations for this generation) ...")
# Set the time and save the table
self.fitting_run.generations_table.set_finishing_time(self.generation_name, time.timestamp())
self.fitting_run.generations_table.save()
# -----------------------------------------------------------------
@property
def simulations(self):
"""
This function ...
:return:
"""
return self.launcher.launched_simulations
# -----------------------------------------------------------------
def check_simulations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Checking the simulations ...")
# Check the number of simulations that were effectively launched
if self.nmodels == len(self.simulations):
log.success("All simulations were scheduled succesfully")
return
# No simulations were launched
if len(self.simulations) == 0:
# Show error message
log.error("No simulations could be launched: removing generation ...")
log.error("Try again later")
log.error("Cleaning up generation and quitting ...")
# Remove this generation from the generations table
self.fitting_run.generations_table.remove_entry(self.generation_name)
self.fitting_run.generations_table.save()
# Remove the generation directory
fs.remove_directory(self.generation_path)
# Quit
exit()
# Less simulations were launched
elif len(self.simulations) < self.nmodels:
# Get the names of simulations that were launched
launched_simulation_names = [simulation.name for simulation in self.simulations]
if None in launched_simulation_names: raise RuntimeError("Some or all simulation don't have a name defined")
# Show error message
log.error("Launching a simulation for the following models failed:")
log.error("")
# Loop over all simulations in the parameters table
failed_indices = []
for index, simulation_name in enumerate(self.parameters_table.simulation_names):
# This simulation is OK
if simulation_name in launched_simulation_names: continue
log.error("Model #" + str(index + 1))
log.error("")
parameter_values = self.parameters_table.parameter_values_for_simulation(simulation_name)
for label in parameter_values: log.error(" - " + label + ": " + stringify_not_list(parameter_values[label])[1])
log.error("")
failed_indices.append(index)
# Show error message
log.error("Removing corresponding entries from the model parameters table ...")
# Remove rows and save
self.parameters_table.remove_rows(failed_indices)
self.parameters_table.save()
# Unexpected
else: raise RuntimeError("Unexpected error where nsmulations > nmodels")
# -----------------------------------------------------------------
def save_simulations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Saving the simulation files in the generation's simulation directories ...")
# Loop over the simulations
for simulation in self.simulations:
# Check whether the simulation is succesfully launched
if simulation.name not in self.parameters_table.simulation_names: continue
# Determine the filepath
filepath = fs.join(simulation.base_path, "initial.sim")
# Save the simulation file
simulation.saveto(filepath, update_path=False)
# -----------------------------------------------------------------
@property
def model_names(self):
"""
This function ...
:return:
"""
return self.generator.individual_names
# -----------------------------------------------------------------
@property
def nmodels(self):
"""
This function ...
:return:
"""
return self.generator.nmodels
# -----------------------------------------------------------------
@property
def model_parameters(self):
"""
This function ...
:return:
"""
return self.generator.parameters
# -----------------------------------------------------------------
@property
def uses_schedulers(self):
"""
This function ...
:return:
"""
return self.launcher.uses_schedulers
# -----------------------------------------------------------------
@property
def simulation_names(self):
"""
This function ...
:return:
"""
return self.individuals_table.simulation_names
# -----------------------------------------------------------------
@property
def needs_simulations(self):
"""
This function ...
:return:
"""
return len(self.simulation_names) > 0
# -----------------------------------------------------------------
@property
def generation_path(self):
"""
This function ...
:return:
"""
return self.generation_info.path
# -----------------------------------------------------------------
@property
def run_name(self):
"""
This function ...
:return:
"""
return self.fitting_run.name
# -----------------------------------------------------------------
def fill_tables(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Filling the tables for the current generation ...")
# Loop over the model names
counter = 0
for name in self.model_names:
# Generate the simulation name
simulation_name = self.object_name + "__" + self.run_name + "__" + self.generation_name + "__" + str(counter)
# Debugging
log.debug("Adding an entry to the individuals table with:")
log.debug("")
log.debug(" - Simulation name: " + simulation_name)
log.debug(" - Individual_name: " + name)
log.debug("")
# Add entry
self.individuals_table.add_entry(simulation_name, name)
# Get the parameter values
parameter_values = get_parameter_values_for_named_individual(self.model_parameters, name, self.fitting_run)
# Debugging
log.debug("Adding entry to the parameters table with:")
log.debug("")
log.debug(" - Simulation name: " + simulation_name)
for label in parameter_values: log.debug(" - " + label + ": " + tostr(parameter_values[label], scientific=True, fancy=True, ndigits=self.fitting_run.ndigits_dict[label]))
log.debug("")
# Add an entry to the parameters table
self.parameters_table.add_entry(simulation_name, parameter_values)
# Increment counter
counter += 1
# -----------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# 1. Write the generation info
self.write_generation_info()
# 2. Write the generations table
self.write_generations_table()
# 2. Write the individuals table
self.write_individuals()
# 3. Write the parameters table
self.write_parameters()
# 4. Write the (empty) chi squared table
self.write_chi_squared()
# -----------------------------------------------------------------
def write_generation_info(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the generation info ...")
# Save as a data file
self.generation_info.saveto(self.generation.info_path)
# -----------------------------------------------------------------
def write_generations_table(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the generations table ...")
# Add an entry to the generations table
self.fitting_run.generations_table.add_entry(self.generation_info, self.ranges, self.parameter_scales)
# Save the table
self.fitting_run.generations_table.save()
# -----------------------------------------------------------------
def write_individuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the individuals table ...")
# Save the individuals table
self.individuals_table.saveto(self.generation.individuals_table_path)
# -----------------------------------------------------------------
def write_parameters(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the model parameters table ...")
# Save the parameters table
self.parameters_table.saveto(self.generation.parameters_table_path)
# -----------------------------------------------------------------
def write_chi_squared(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the chi squared table ...")
# Save the chi squared table
self.chi_squared_table.saveto(self.generation.chi_squared_table_path)
# -----------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# Show the ranges
self.show_ranges()
# Show the number of points
self.show_npoints()
# Show the simulation options
self.show_simulation_options()
# Show instruments
self.show_instruments()
# Show execution options
self.show_execution_options()
# Show analysis options
self.show_analysis_options()
# -----------------------------------------------------------------
def show_ranges(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing the parameter ranges ...")
print("")
print(fmt.green + fmt.underlined + "Parameter ranges:" + fmt.reset)
print("")
# Loop over the parameters
for label in self.ranges:
# Get the range
parameter_range = self.ranges[label]
# Show
print(" - " + fmt.bold + label + fmt.reset + ": " + tostr(parameter_range))
print("")
# -----------------------------------------------------------------
def show_npoints(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing the number of points ...")
print("")
print(fmt.green + fmt.underlined + "Number of grid points:" + fmt.reset)
print("")
# Loop over the parameters
for label in self.npoints:
# Get the npoints
npoints = self.npoints[label]
# Show
print(" - " + fmt.bold + label + fmt.reset + ": " + tostr(npoints))
print("")
# -----------------------------------------------------------------
def show_simulation_options(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing simulation options ...")
print("")
print(fmt.green + fmt.underlined + "Simulation options:" + fmt.reset)
print("")
print(" - number of wavelengths: " + tostr(self.nwavelengths) + " (" + self.wavelength_grid_name + ")")
print(" - number of dust cells: " + tostr(self.ndust_cells, scientific_int=False) + " (" + self.representation_name + ")")
print(" - number of photon packages per wavelength: " + tostr(self.npackages, scientific_int=False))
print(" - selfabsorption: " + tostr(self.selfabsorption))
print(" - transient heating: " + tostr(self.transient_heating))
print(" - dust grid type: " + tostr(self.representation.dust_grid_type))
if self.representation.has_dust_grid_tree_distribution:
print(" - dust grid minimum level: " + tostr(self.representation.dust_grid_min_level))
print(" - dust_grid_maximum_level: " + tostr(self.representation.dust_grid_max_level))
print("")
# -----------------------------------------------------------------
def show_instruments(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing the instruments ...")
print("")
print(fmt.green + fmt.underlined + "Instruments:" + fmt.reset)
print("")
# Loop over the instruments
for name in self.instruments:
instrument = self.instruments[name]
instr_class = str(type(instrument).__name__)
print(" - " + fmt.bold + name + fmt.reset + " (" + instr_class + "):")
print("")
print(instrument.to_string(line_prefix=" ", bullet="*", bold=False))
print("")
# -----------------------------------------------------------------
def show_execution_options(self):
"""
Thisn function ...
:return:
"""
# Inform the user
log.info("Showing execution options ...")
print("")
print(fmt.green + fmt.underlined + "Execution:" + fmt.reset)
print("")
print(" - remote hosts: " + tostr(self.remote_host_ids))
if self.config.cores_per_process is not None: print(" - number of cores per process: " + tostr(self.config.cores_per_process))
else: print(" - number of cores per process determined automatically")
if self.uses_remotes:
if self.config.nprocesses_remote is not None and self.has_single_remote: nprocesses = self.config.nprocesses_remote
else: nprocesses = None
if self.config.data_parallel_remote is not None and self.has_single_remote: data_parallel = self.config.data_parallel_remote
else: data_parallel = None
else:
nprocesses = self.config.nprocesses_local
data_parallel = self.config.data_parallel_local
if nprocesses is not None: print(" - number of processes: " + tostr(nprocesses))
else: print(" - number of processes determined automatically")
if data_parallel is not None: print(" - data parallelization: " + tostr(data_parallel))
else: print(" - data parallelization enabled or disabled automatically")
print("")
# -----------------------------------------------------------------
def show_analysis_options(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing analysis options ...")
print("")
print(fmt.green + fmt.underlined + "Analysis options:" + fmt.reset)
print("")
print(" - spectral convolution: " + tostr(self.spectral_convolution))
print(" - use images: " + tostr(self.use_images))
print(" - extract progress: " + tostr(self.config.extract_progress))
print(" - extract timeline: " + tostr(self.extract_timeline))
print(" - extract memory: " + tostr(self.extract_memory))
print(" - plotting format: " + tostr(self.config.plotting_format))
# From images
if self.use_images:
print(" - instrument reference image: " + tostr(self.reference_component_name))
print(" - reference image xsize: " + tostr(self.reference_wcs.xsize))
print(" - reference image ysize: " + tostr(self.reference_wcs.ysize))
print(" - reference image pixelscale: " + tostr(self.reference_wcs.average_pixelscale.to("arcsec")))
# For plotting:
#print(" - fluxes_from_images_masks: " + tostr(self.environment.photometry_image_paths_for_filter_names))
# Observation filters
print(" - observation_filters: " + tostr(self.observed_filter_names))
print("")
# -----------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot reference images
self.plot_reference_images()
# -----------------------------------------------------------------
@lazyproperty
def reference_images_path(self):
"""
This function ...
:return:
"""
return fs.create_directory_in(self.fitting_run.path, "refimages__" + self.generation_name)
# -----------------------------------------------------------------
@property
def fitting_filters(self):
"""
This function ...
:return:
"""
return self.fitting_run.fitting_filters
# -----------------------------------------------------------------
@property
def fitting_filter_names(self):
"""
This function ...
:return:
"""
return [str(fltr) for fltr in self.fitting_filters]
# -----------------------------------------------------------------
def plot_reference_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the reference images ...")
# Create plotter
plotter = StandardImageGridPlotter()
# Set output directory
plotter.config.output = self.reference_images_path
# Extra
plotter.config.normalize = True
plotter.config.colormap = self.config.reference_images_colormap
# Write data
plotter.config.write = self.config.write_reference_images
# Rebin and crop
plotter.rebin_to = self.reference_wcs
plotter.crop_to = self.environment.truncation_box
# Loop over the filters
for fltr in self.environment.photometry_image_paths_for_filters:
# Check whether fitting filter
#if fltr not in self.fitting_filters: continue
# Get path
path = self.environment.photometry_image_paths_for_filters[fltr]
# Add to plot
plotter.add_image_from_file(path, masks=False, regions=False)
# Run the plotter
plotter.run()
# -----------------------------------------------------------------
| SKIRT/PTS | modeling/fitting/explorer.py | Python | agpl-3.0 | 94,935 | [
"Galaxy"
] | 353442ce9eeb4f4634a132e5ad50649ec4c42ccd1dbb6fd1951c35eef6e96d9a |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from . import *
from . fixtures import *
import os
import email.iterators
from talon import quotations
import six
from six.moves import range
from six import StringIO
@patch.object(quotations, 'MAX_LINES_COUNT', 1)
def test_too_many_lines():
msg_body = """Test reply
Hi
-----Original Message-----
Test"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_wrote():
msg_body = """Test reply
On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> wrote:
>
> Test
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_polymail():
msg_body = """Test reply
On Tue, Apr 11, 2017 at 10:07 PM John Smith
<
mailto:John Smith <johnsmith@gmail.com>
> wrote:
Test quoted data
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_sent_from_samsung_smb_wrote():
msg_body = """Test reply
Sent from Samsung MobileName <address@example.com> wrote:
>
> Test
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_wrote_somebody():
eq_('Lorem', quotations.extract_from_plain(
"""Lorem
Op 13-02-2014 3:18 schreef Julius Caesar <pantheon@rome.com>:
Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse.
"""))
def test_pattern_on_date_somebody_wrote_date_with_slashes():
msg_body = """Test reply
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
> Test.
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_date_time_email_splitter():
msg_body = """Test reply
2014-10-17 11:28 GMT+03:00 Postmaster <
postmaster@sandboxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.mailgun.org>:
> First from site
>
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_wrote_allows_space_in_front():
msg_body = """Thanks Thanmai
On Mar 8, 2012 9:59 AM, "Example.com" <
r+7f1b094ceb90e18cca93d53d3703feae@example.com> wrote:
>**
> Blah-blah-blah"""
eq_("Thanks Thanmai", quotations.extract_from_plain(msg_body))
def test_pattern_on_date_somebody_sent():
msg_body = """Test reply
On 11-Apr-2011, at 6:54 PM, Roman Tkachenko <romant@example.com> sent:
>
> Test
>
> Roman"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_appointment():
msg_body = """Response
10/19/2017 @ 9:30 am for physical therapy
Bla
1517 4th Avenue Ste 300
London CA 19129, 555-421-6780
John Doe, FCLS
Mailgun Inc
555-941-0697
From: from@example.com [mailto:from@example.com]
Sent: Wednesday, October 18, 2017 2:05 PM
To: John Doer - SIU <jd@example.com>
Subject: RE: Claim # 5551188-1
Text"""
expected = """Response
10/19/2017 @ 9:30 am for physical therapy
Bla
1517 4th Avenue Ste 300
London CA 19129, 555-421-6780
John Doe, FCLS
Mailgun Inc
555-941-0697"""
eq_(expected, quotations.extract_from_plain(msg_body))
def test_line_starts_with_on():
msg_body = """Blah-blah-blah
On blah-blah-blah"""
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_reply_and_quotation_splitter_share_line():
# reply lines and 'On <date> <person> wrote:' splitter pattern
# are on the same line
msg_body = """reply On Wed, Apr 4, 2012 at 3:59 PM, bob@example.com wrote:
> Hi"""
eq_('reply', quotations.extract_from_plain(msg_body))
# test pattern '--- On <date> <person> wrote:' with reply text on
# the same line
msg_body = """reply--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote:
> Hi"""
eq_('reply', quotations.extract_from_plain(msg_body))
# test pattern '--- On <date> <person> wrote:' with reply text containing
# '-' symbol
msg_body = """reply
bla-bla - bla--- On Wed, Apr 4, 2012 at 3:59 PM, me@domain.com wrote:
> Hi"""
reply = """reply
bla-bla - bla"""
eq_(reply, quotations.extract_from_plain(msg_body))
def _check_pattern_original_message(original_message_indicator):
msg_body = u"""Test reply
-----{}-----
Test"""
eq_('Test reply', quotations.extract_from_plain(
msg_body.format(six.text_type(original_message_indicator))))
def test_english_original_message():
_check_pattern_original_message('Original Message')
_check_pattern_original_message('Reply Message')
def test_german_original_message():
_check_pattern_original_message(u'Ursprüngliche Nachricht')
_check_pattern_original_message('Antwort Nachricht')
def test_danish_original_message():
_check_pattern_original_message('Oprindelig meddelelse')
def test_reply_after_quotations():
msg_body = """On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
> Test
Test reply"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_android_wrote():
msg_body = """Test reply
---- John Smith wrote ----
> quoted
> text
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_reply_wraps_quotations():
msg_body = """Test reply
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
> Test
Regards, Roman"""
reply = """Test reply
Regards, Roman"""
eq_(reply, quotations.extract_from_plain(msg_body))
def test_reply_wraps_nested_quotations():
msg_body = """Test reply
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>Test test
>On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
>
>>
>> Test.
>>
>> Roman
Regards, Roman"""
reply = """Test reply
Regards, Roman"""
eq_(reply, quotations.extract_from_plain(msg_body))
def test_quotation_separator_takes_2_lines():
msg_body = """Test reply
On Fri, May 6, 2011 at 6:03 PM, Roman Tkachenko from Hacker News
<roman@definebox.com> wrote:
> Test.
>
> Roman
Regards, Roman"""
reply = """Test reply
Regards, Roman"""
eq_(reply, quotations.extract_from_plain(msg_body))
def test_quotation_separator_takes_3_lines():
msg_body = """Test reply
On Nov 30, 2011, at 12:47 PM, Somebody <
416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4@somebody.domain.com>
wrote:
Test message
"""
eq_("Test reply", quotations.extract_from_plain(msg_body))
def test_short_quotation():
msg_body = """Hi
On 04/19/2011 07:10 AM, Roman Tkachenko wrote:
> Hello"""
eq_("Hi", quotations.extract_from_plain(msg_body))
def test_with_indent():
msg_body = """YOLO salvia cillum kogi typewriter mumblecore cardigan skateboard Austin.
------On 12/29/1987 17:32 PM, Julius Caesar wrote-----
Brunch mumblecore pug Marfa tofu, irure taxidermy hoodie readymade pariatur.
"""
eq_("YOLO salvia cillum kogi typewriter mumblecore cardigan skateboard Austin.", quotations.extract_from_plain(msg_body))
def test_short_quotation_with_newline():
msg_body = """Btw blah blah...
On Tue, Jan 27, 2015 at 12:42 PM -0800, "Company" <christine.XXX@XXX.com> wrote:
Hi Mark,
Blah blah?
Thanks,Christine
On Jan 27, 2015, at 11:55 AM, Mark XXX <mark@XXX.com> wrote:
Lorem ipsum?
Mark
Sent from Acompli"""
eq_("Btw blah blah...", quotations.extract_from_plain(msg_body))
def test_pattern_date_email_with_unicode():
msg_body = """Replying ok
2011/4/7 Nathan \xd0\xb8ova <support@example.com>
> Cool beans, scro"""
eq_("Replying ok", quotations.extract_from_plain(msg_body))
def test_english_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain("""Allo! Follow up MIME!
From: somebody@example.com
Sent: March-19-11 5:42 PM
To: Somebody
Subject: The manager has commented on your Loop
Blah-blah-blah
"""))
def test_german_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain(
"""Allo! Follow up MIME!
Von: somebody@example.com
Gesendet: Dienstag, 25. November 2014 14:59
An: Somebody
Betreff: The manager has commented on your Loop
Blah-blah-blah
"""))
def test_french_multiline_from_block():
eq_('Lorem ipsum', quotations.extract_from_plain(
u"""Lorem ipsum
De : Brendan xxx [mailto:brendan.xxx@xxx.com]
Envoyé : vendredi 23 janvier 2015 16:39
À : Camille XXX
Objet : Follow Up
Blah-blah-blah
"""))
def test_french_from_block():
eq_('Lorem ipsum', quotations.extract_from_plain(
u"""Lorem ipsum
Le 23 janv. 2015 à 22:03, Brendan xxx <brendan.xxx@xxx.com<mailto:brendan.xxx@xxx.com>> a écrit:
Bonjour!"""))
def test_polish_from_block():
eq_('Lorem ipsum', quotations.extract_from_plain(
u"""Lorem ipsum
W dniu 28 stycznia 2015 01:53 użytkownik Zoe xxx <zoe.xxx@xxx.com>
napisał:
Blah!
"""))
def test_danish_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain(
"""Allo! Follow up MIME!
Fra: somebody@example.com
Sendt: 19. march 2011 12:10
Til: Somebody
Emne: The manager has commented on your Loop
Blah-blah-blah
"""))
def test_swedish_from_block():
eq_('Allo! Follow up MIME!', quotations.extract_from_plain(
u"""Allo! Follow up MIME!
Från: Anno Sportel [mailto:anno.spoel@hsbcssad.com]
Skickat: den 26 augusti 2015 14:45
Till: Isacson Leiff
Ämne: RE: Week 36
Blah-blah-blah
"""))
def test_swedish_from_line():
eq_('Lorem', quotations.extract_from_plain(
"""Lorem
Den 14 september, 2015 02:23:18, Valentino Rudy (valentino@rudy.be) skrev:
Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse.
"""))
def test_norwegian_from_line():
eq_('Lorem', quotations.extract_from_plain(
u"""Lorem
På 14 september 2015 på 02:23:18, Valentino Rudy (valentino@rudy.be) skrev:
Veniam laborum mlkshk kale chips authentic. Normcore mumblecore laboris, fanny pack readymade eu blog chia pop-up freegan enim master cleanse.
"""))
def test_dutch_from_block():
eq_('Gluten-free culpa lo-fi et nesciunt nostrud.', quotations.extract_from_plain(
"""Gluten-free culpa lo-fi et nesciunt nostrud.
Op 17-feb.-2015, om 13:18 heeft Julius Caesar <pantheon@rome.com> het volgende geschreven:
Small batch beard laboris tempor, non listicle hella Tumblr heirloom.
"""))
def test_vietnamese_from_block():
eq_('Hello', quotations.extract_from_plain(
u"""Hello
Vào 14:24 8 tháng 6, 2017, Hùng Nguyễn <hungnguyen@xxx.com> đã viết:
> Xin chào
"""))
def test_quotation_marker_false_positive():
msg_body = """Visit us now for assistance...
>>> >>> http://www.domain.com <<<
Visit our site by clicking the link above"""
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_link_closed_with_quotation_marker_on_new_line():
msg_body = '''8.45am-1pm
From: somebody@example.com
Date: Wed, 16 May 2012 00:15:02 -0600
<http://email.example.com/c/dHJhY2tpbmdfY29kZT1mMDdjYzBmNzM1ZjYzMGIxNT
> <bob@example.com <mailto:bob@example.com> >
Requester: '''
eq_('8.45am-1pm', quotations.extract_from_plain(msg_body))
def test_link_breaks_quotation_markers_sequence():
# link starts and ends on the same line
msg_body = """Blah
On Thursday, October 25, 2012 at 3:03 PM, life is short. on Bob wrote:
>
> Post a response by replying to this email
>
(http://example.com/c/YzOTYzMmE) >
> life is short. (http://example.com/c/YzMmE)
>
"""
eq_("Blah", quotations.extract_from_plain(msg_body))
# link starts after some text on one line and ends on another
msg_body = """Blah
On Monday, 24 September, 2012 at 3:46 PM, bob wrote:
> [Ticket #50] test from bob
>
> View ticket (http://example.com/action
_nonce=3dd518)
>
"""
eq_("Blah", quotations.extract_from_plain(msg_body))
def test_from_block_starts_with_date():
msg_body = """Blah
Date: Wed, 16 May 2012 00:15:02 -0600
To: klizhentas@example.com
"""
eq_('Blah', quotations.extract_from_plain(msg_body))
def test_bold_from_block():
msg_body = """Hi
*From:* bob@example.com [mailto:
bob@example.com]
*Sent:* Wednesday, June 27, 2012 3:05 PM
*To:* travis@example.com
*Subject:* Hello
"""
eq_("Hi", quotations.extract_from_plain(msg_body))
def test_weird_date_format_in_date_block():
msg_body = """Blah
Date: Fri=2C 28 Sep 2012 10:55:48 +0000
From: tickets@example.com
To: bob@example.com
Subject: [Ticket #8] Test
"""
eq_('Blah', quotations.extract_from_plain(msg_body))
def test_dont_parse_quotations_for_forwarded_messages():
msg_body = """FYI
---------- Forwarded message ----------
From: bob@example.com
Date: Tue, Sep 4, 2012 at 1:35 PM
Subject: Two
line subject
To: rob@example.com
Text"""
eq_(msg_body, quotations.extract_from_plain(msg_body))
def test_forwarded_message_in_quotations():
msg_body = """Blah
-----Original Message-----
FYI
---------- Forwarded message ----------
From: bob@example.com
Date: Tue, Sep 4, 2012 at 1:35 PM
Subject: Two
line subject
To: rob@example.com
"""
eq_("Blah", quotations.extract_from_plain(msg_body))
def test_mark_message_lines():
# e - empty line
# s - splitter line
# m - line starting with quotation marker '>'
# t - the rest
lines = ['Hello', '',
# next line should be marked as splitter
'_____________',
'From: foo@bar.com',
'Date: Wed, 16 May 2012 00:15:02 -0600',
'',
'> Hi',
'',
'Signature']
eq_('tesssemet', quotations.mark_message_lines(lines))
lines = ['Just testing the email reply',
'',
'Robert J Samson',
'Sent from my iPhone',
'',
# all 3 next lines should be marked as splitters
'On Nov 30, 2011, at 12:47 PM, Skapture <',
('416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4'
'@skapture-staging.mailgun.org>'),
'wrote:',
'',
'Tarmo Lehtpuu has posted the following message on']
eq_('tettessset', quotations.mark_message_lines(lines))
def test_process_marked_lines():
# quotations and last message lines are mixed
# consider all to be a last message
markers = 'tsemmtetm'
lines = [str(i) for i in range(len(markers))]
lines = [str(i) for i in range(len(markers))]
eq_(lines, quotations.process_marked_lines(lines, markers))
# no splitter => no markers
markers = 'tmm'
lines = ['1', '2', '3']
eq_(['1', '2', '3'], quotations.process_marked_lines(lines, markers))
# text after splitter without markers is quotation
markers = 'tst'
lines = ['1', '2', '3']
eq_(['1'], quotations.process_marked_lines(lines, markers))
# message + quotation + signature
markers = 'tsmt'
lines = ['1', '2', '3', '4']
eq_(['1', '4'], quotations.process_marked_lines(lines, markers))
# message + <quotation without markers> + nested quotation
markers = 'tstsmt'
lines = ['1', '2', '3', '4', '5', '6']
eq_(['1'], quotations.process_marked_lines(lines, markers))
# test links wrapped with paranthesis
# link starts on the marker line
markers = 'tsmttem'
lines = ['text',
'splitter',
'>View (http://example.com',
'/abc',
')',
'',
'> quote']
eq_(lines[:1], quotations.process_marked_lines(lines, markers))
# link starts on the new line
markers = 'tmmmtm'
lines = ['text',
'>'
'>',
'>',
'(http://example.com) > ',
'> life is short. (http://example.com) '
]
eq_(lines[:1], quotations.process_marked_lines(lines, markers))
# check all "inline" replies
markers = 'tsmtmtm'
lines = ['text',
'splitter',
'>',
'(http://example.com)',
'>',
'inline reply',
'>']
eq_(lines, quotations.process_marked_lines(lines, markers))
# inline reply with link not wrapped in paranthesis
markers = 'tsmtm'
lines = ['text',
'splitter',
'>',
'inline reply with link http://example.com',
'>']
eq_(lines, quotations.process_marked_lines(lines, markers))
# inline reply with link wrapped in paranthesis
markers = 'tsmtm'
lines = ['text',
'splitter',
'>',
'inline reply (http://example.com)',
'>']
eq_(lines, quotations.process_marked_lines(lines, markers))
def test_preprocess():
msg = ('Hello\n'
'See <http://google.com\n'
'> for more\n'
'information On Nov 30, 2011, at 12:47 PM, Somebody <\n'
'416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n'
'@example.com>'
'wrote:\n'
'\n'
'> Hi')
# test the link is rewritten
# 'On <date> <person> wrote:' pattern starts from a new line
prepared_msg = ('Hello\n'
'See @@http://google.com\n'
'@@ for more\n'
'information\n'
' On Nov 30, 2011, at 12:47 PM, Somebody <\n'
'416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n'
'@example.com>'
'wrote:\n'
'\n'
'> Hi')
eq_(prepared_msg, quotations.preprocess(msg, '\n'))
msg = """
> <http://teemcl.mailgun.org/u/**aD1mZmZiNGU5ODQwMDNkZWZlMTExNm**
> MxNjQ4Y2RmOTNlMCZyPXNlcmdleS5v**YnlraG92JTQwbWFpbGd1bmhxLmNvbS**
> Z0PSUyQSZkPWUwY2U<http://example.org/u/aD1mZmZiNGU5ODQwMDNkZWZlMTExNmMxNjQ4Y>
"""
eq_(msg, quotations.preprocess(msg, '\n'))
# 'On <date> <person> wrote' shouldn't be spread across too many lines
msg = ('Hello\n'
'How are you? On Nov 30, 2011, at 12:47 PM,\n '
'Example <\n'
'416ffd3258d4d2fa4c85cfa4c44e1721d66e3e8f4\n'
'@example.org>'
'wrote:\n'
'\n'
'> Hi')
eq_(msg, quotations.preprocess(msg, '\n'))
msg = ('Hello On Nov 30, smb wrote:\n'
'Hi\n'
'On Nov 29, smb wrote:\n'
'hi')
prepared_msg = ('Hello\n'
' On Nov 30, smb wrote:\n'
'Hi\n'
'On Nov 29, smb wrote:\n'
'hi')
eq_(prepared_msg, quotations.preprocess(msg, '\n'))
def test_preprocess_postprocess_2_links():
msg_body = "<http://link1> <http://link2>"
eq_(msg_body, quotations.extract_from_plain(msg_body))
def body_iterator(msg, decode=False):
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, six.text_type):
yield payload
else:
yield payload.decode('utf8')
def test_standard_replies():
for filename in os.listdir(STANDARD_REPLIES):
filename = os.path.join(STANDARD_REPLIES, filename)
if not filename.endswith('.eml') or os.path.isdir(filename):
continue
with open(filename) as f:
message = email.message_from_file(f)
body = next(email.iterators.typed_subpart_iterator(message, subtype='plain'))
text = ''.join(body_iterator(body, True))
stripped_text = quotations.extract_from_plain(text)
reply_text_fn = filename[:-4] + '_reply_text'
if os.path.isfile(reply_text_fn):
with open(reply_text_fn) as f:
reply_text = f.read().strip()
else:
reply_text = 'Hello'
yield eq_, reply_text, stripped_text, \
"'%(reply)s' != %(stripped)s for %(fn)s" % \
{'reply': reply_text, 'stripped': stripped_text,
'fn': filename}
def test_split_email():
msg = """From: Mr. X
Date: 24 February 2016
To: Mr. Y
Subject: Hi
Attachments: none
Goodbye.
From: Mr. Y
To: Mr. X
Date: 24 February 2016
Subject: Hi
Attachments: none
Hello.
On 24th February 2016 at 09.32am, Conal wrote:
Hey!
On Mon, 2016-10-03 at 09:45 -0600, Stangel, Dan wrote:
> Mohan,
>
> We have not yet migrated the systems.
>
> Dan
>
> > -----Original Message-----
> > Date: Mon, 2 Apr 2012 17:44:22 +0400
> > Subject: Test
> > From: bob@xxx.mailgun.org
> > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
> >
> > Hi
> >
> > > From: bob@xxx.mailgun.org
> > > To: xxx@gmail.com; xxx@hotmail.com; xxx@yahoo.com; xxx@aol.com; xxx@comcast.net; xxx@nyc.rr.com
> > > Date: Mon, 2 Apr 2012 17:44:22 +0400
> > > Subject: Test
> > > Hi
> > >
> >
>
>
"""
expected_markers = "stttttsttttetesetesmmmmmmsmmmmmmmmmmmmmmmm"
markers = quotations.split_emails(msg)
eq_(markers, expected_markers)
def test_feedback_below_left_unparsed():
msg_body = """Please enter your feedback below. Thank you.
------------------------------------- Enter Feedback Below -------------------------------------
The user experience was unparallelled. Please continue production. I'm sending payment to ensure
that this line is intact."""
parsed = quotations.extract_from_plain(msg_body)
eq_(msg_body, parsed)
def test_appointment_2():
msg_body = """Invitation for an interview:
Date: Wednesday 3, October 2011
Time: 7 : 00am
Address: 130 Fox St
Please bring in your ID."""
parsed = quotations.extract_from_plain(msg_body)
eq_(msg_body, parsed)
| mailgun/talon | tests/text_quotations_test.py | Python | apache-2.0 | 21,510 | [
"VisIt"
] | 6ab0235063b66376a36995e93d18baef8d48a3b3861daffc8cc7c2d70ba9d756 |
import numpy as np
from upho.analysis.mappings_modifier import MappingsModifier
class ElementWeightsCalculator(object):
"""Extract weights on elements from eigenvectors"""
def __init__(self, unitcell, primitive):
"""
Parameters
----------
unitcell : Phonopy Atoms object
This may have a disordered atomic configuration.
primitive : Phonopy Primitive object
"""
self._extract_map_elements(unitcell)
self._extract_map_atoms_u2p(primitive)
def _extract_map_elements(self, unitcell):
natoms_u = unitcell.get_number_of_atoms()
elements = unitcell.get_chemical_symbols()
reduced_elements = sorted(set(elements), key=elements.index)
map_elements = []
for re in reduced_elements:
map_elements.append(
[i for i, v in enumerate(elements) if v == re])
if sum(len(v) for v in map_elements) != natoms_u:
raise ValueError("Mapping of elements is failed.")
self._map_elements = map_elements
self._reduced_elements = reduced_elements
def _extract_map_atoms_u2p(self, primitive):
p2s_map = primitive.get_primitive_to_supercell_map()
s2p_map = primitive.get_supercell_to_primitive_map()
natoms_u = len(s2p_map)
map_atoms_u2p = []
for iatom_s in p2s_map:
map_atoms_u2p.append(
[i for i, v in enumerate(s2p_map) if v == iatom_s])
if sum(len(v) for v in map_atoms_u2p) != natoms_u:
raise ValueError("Mapping of atoms_u2p is failed.")
self._map_atoms_u2p = map_atoms_u2p
def get_map_elements(self):
return self._map_elements
def get_map_atoms_u2p(self):
return self._map_atoms_u2p
def get_reduced_elements(self):
return self._reduced_elements
def get_number_of_elements(self):
return len(self._reduced_elements)
def run_star(self, vectors, ndims=3):
"""
Parameters
----------
vectors : (narms, natoms_u * ndims, nbands) array
ndims : Integer
number of dimensions of the space.
Returns
-------
weights : (narms, nelements, natoms_p, nbands) array
"""
weights = []
for vectors_arm in vectors:
weights_arm = self.run(vectors_arm, ndims)
weights.append(weights_arm)
return np.array(weights)
def run(self, vectors, ndims=3):
"""
Parameters
----------
vectors : (natoms_u * ndims, nbands) array
ndims : Integer
number of dimensions of the space.
Returns
-------
weights : (natoms_p, nelements, nbands) array
"""
map_atoms_u2p = self._map_atoms_u2p
map_elements = self._map_elements
shape = vectors.shape
nbands = shape[1]
tmp = vectors.reshape(shape[0] // ndims, ndims, nbands)
weights_atoms = np.linalg.norm(tmp, axis=1) ** 2
shape_weights = (len(map_atoms_u2p), len(map_elements), nbands)
weights = np.full(shape_weights, np.nan) # Initialization
for ip, lp in enumerate(map_atoms_u2p):
for ie, le in enumerate(map_elements):
indices = sorted(set(lp) & set(le))
weights[ip, ie] = np.sum(weights_atoms[indices], axis=0)
return weights
def project_vectors(self, vectors, ndims=3):
map_atoms_u2p = self._map_atoms_u2p
map_elements = self._map_elements
natoms_p = len(map_atoms_u2p)
num_elements = len(map_elements)
tmp = np.zeros_like(vectors[None, None]) # Add two dimensions
projected_vectors = (
np.repeat(np.repeat(tmp, natoms_p, axis=0), num_elements, axis=1))
for ip, lp in enumerate(map_atoms_u2p):
for ie, le in enumerate(map_elements):
indices_tmp = sorted(set(lp) & set(le))
indices = MappingsModifier(indices_tmp).expand_mappings(ndims)
if len(indices) > 0: # The element "le" exists on the sublattice.
projected_vectors[ip, ie, indices] = vectors[indices]
return projected_vectors
| yuzie007/ph_unfolder | upho/phonon/element_weights_calculator.py | Python | mit | 4,245 | [
"phonopy"
] | 36c41ba3889bab6b0016254da32f13870339389a44ed4ef5b2841437e85bdffe |
################################################################################
# Copyright (C) 2015 Hannu Hartikainen, Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Tests for the module bayespy.plot.
This file mostly contains functional tests. Since testing the plotting
capabilities relies on image comparisons, it's difficult to create
strict unit tests.
"""
import numpy as np
from matplotlib.testing.decorators import image_comparison
import bayespy.plot as bpplt
from bayespy.nodes import Bernoulli, Beta, Categorical, Dirichlet, \
Gamma, Gaussian, GaussianARD, Mixture, SumMultiply, Wishart
from bayespy.inference import VB
from bayespy.utils import random
@image_comparison(baseline_images=['gaussian_mixture'], extensions=['png'], remove_text=True)
def test_gaussian_mixture_plot():
"""
Test the gaussian_mixture plotting function.
The code is from http://www.bayespy.org/examples/gmm.html
"""
np.random.seed(1)
y0 = np.random.multivariate_normal([0, 0], [[1, 0], [0, 0.02]], size=50)
y1 = np.random.multivariate_normal([0, 0], [[0.02, 0], [0, 1]], size=50)
y2 = np.random.multivariate_normal([2, 2], [[1, -0.9], [-0.9, 1]], size=50)
y3 = np.random.multivariate_normal([-2, -2], [[0.1, 0], [0, 0.1]], size=50)
y = np.vstack([y0, y1, y2, y3])
bpplt.pyplot.plot(y[:,0], y[:,1], 'rx')
N = 200
D = 2
K = 10
alpha = Dirichlet(1e-5*np.ones(K),
name='alpha')
Z = Categorical(alpha,
plates=(N,),
name='z')
mu = Gaussian(np.zeros(D), 1e-5*np.identity(D),
plates=(K,),
name='mu')
Lambda = Wishart(D, 1e-5*np.identity(D),
plates=(K,),
name='Lambda')
Y = Mixture(Z, Gaussian, mu, Lambda,
name='Y')
Z.initialize_from_random()
Q = VB(Y, mu, Lambda, Z, alpha)
Y.observe(y)
Q.update(repeat=1000)
bpplt.gaussian_mixture_2d(Y, scale=2)
# Have to define these limits because on some particular environments these
# may otherwise differ and thus result in an image comparsion failure
bpplt.pyplot.xlim([-3, 6])
bpplt.pyplot.ylim([-3, 5])
@image_comparison(baseline_images=['hinton_r'], extensions=['png'], remove_text=True)
def test_hinton_plot_dirichlet():
(R,P,Z) = _setup_bernoulli_mixture()
bpplt.hinton(R)
@image_comparison(baseline_images=['hinton_p'], extensions=['png'], remove_text=True)
def test_hinton_plot_beta():
(R,P,Z) = _setup_bernoulli_mixture()
bpplt.hinton(P)
@image_comparison(baseline_images=['hinton_z'], extensions=['png'], remove_text=True)
def test_hinton_plot_categorical():
(R,P,Z) = _setup_bernoulli_mixture()
bpplt.hinton(Z)
@image_comparison(baseline_images=['pdf'], extensions=['png'], remove_text=True)
def test_pdf_plot():
data = _setup_linear_regression()
bpplt.pdf(data['tau'], np.linspace(1e-6,1,100), color='k')
bpplt.pyplot.axvline(data['s']**(-2), color='r')
@image_comparison(baseline_images=['contour'], extensions=['png'], remove_text=True)
def test_contour_plot():
data = _setup_linear_regression()
bpplt.contour(data['B'], np.linspace(1,3,1000), np.linspace(1,9,1000),
n=10, colors='k')
bpplt.plot(data['c'], x=data['k'], color='r', marker='x', linestyle='None',
markersize=10, markeredgewidth=2)
def _setup_bernoulli_mixture():
"""
Setup code for the hinton tests.
This code is from http://www.bayespy.org/examples/bmm.html
"""
np.random.seed(1)
p0 = [0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9, 0.1, 0.9]
p1 = [0.1, 0.1, 0.1, 0.1, 0.1, 0.9, 0.9, 0.9, 0.9, 0.9]
p2 = [0.9, 0.9, 0.9, 0.9, 0.9, 0.1, 0.1, 0.1, 0.1, 0.1]
p = np.array([p0, p1, p2])
z = random.categorical([1/3, 1/3, 1/3], size=100)
x = random.bernoulli(p[z])
N = 100
D = 10
K = 10
R = Dirichlet(K*[1e-5],
name='R')
Z = Categorical(R,
plates=(N,1),
name='Z')
P = Beta([0.5, 0.5],
plates=(D,K),
name='P')
X = Mixture(Z, Bernoulli, P)
Q = VB(Z, R, X, P)
P.initialize_from_random()
X.observe(x)
Q.update(repeat=1000)
return (R,P,Z)
def _setup_linear_regression():
"""
Setup code for the pdf and contour tests.
This code is from http://www.bayespy.org/examples/regression.html
"""
np.random.seed(1)
k = 2 # slope
c = 5 # bias
s = 2 # noise standard deviation
x = np.arange(10)
y = k*x + c + s*np.random.randn(10)
X = np.vstack([x, np.ones(len(x))]).T
B = GaussianARD(0, 1e-6, shape=(2,))
F = SumMultiply('i,i', B, X)
tau = Gamma(1e-3, 1e-3)
Y = GaussianARD(F, tau)
Y.observe(y)
Q = VB(Y, B, tau)
Q.update(repeat=1000)
xh = np.linspace(-5, 15, 100)
Xh = np.vstack([xh, np.ones(len(xh))]).T
Fh = SumMultiply('i,i', B, Xh)
return locals()
| dungvtdev/upsbayescpm | bayespy/tests/test_plot.py | Python | mit | 5,085 | [
"Gaussian"
] | 701d5ffa6e6ce6359d1a913959dfaaa8f8bb454129bddafc2caa848edb440a34 |
# -*- coding: utf-8 -*-
#
# Tax Calculator documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 9 17:06:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.pngmath',
'sphinx.ext.viewcode',
]
# The napoleon extension supports both numpy and google style docstrings.
# For more information, including additional settings visit:
# http://sphinxcontrib-napoleon.readthedocs.org/en/latest/
napoleon_include_private_with_doc = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tax Calculator'
copyright = u'2015, Open Source Policy Center'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
if sys.version_info[0] == 3:
from unittest.mock import MagicMock
elif sys.version_info[0] == 2:
from mock import Mock as MagicMock
else:
print("Please install or update python to at least version 2.x")
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numba', 'numba.jit', 'numba.vectorize', 'numba.guvectorize']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TaxCalculatordoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TaxCalculator.tex', u'Tax Calculator Documentation',
u'Open Source Policy Center', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'taxcalculator', u'Tax Calculator Documentation',
[u'Open Source Policy Center'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TaxCalculator', u'Tax Calculator Documentation',
u'Open Source Policy Center', 'TaxCalculator',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Tax Calculator'
epub_author = u'Open Source Policy Center'
epub_publisher = u'Open Source Policy Center'
epub_copyright = u'2015, Open Source Policy Center'
# The basename for the epub file. It defaults to the project name.
# epub_basename = u'Tax Calculator'
# The HTML theme for the epub output.
# Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
| mcdeaton13/Tax-Calculator | docs/source/conf.py | Python | mit | 11,308 | [
"VisIt"
] | bd05eb33234bc1e965e49b9731d197374c0c8a5e6165e6a92918f36f70f4a91c |
# -*- coding: utf-8 -*-
# Copyright 2004-2018 Davide Alberani <da@erlug.linux.it>
# 2008-2018 H. Turgut Uyar <uyar@tekir.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides the classes (and the instances) that are used to parse
the IMDb pages on the www.imdb.com server about a movie.
For example, for Brian De Palma's "The Untouchables", the referred pages
would be:
combined details
http://www.imdb.com/title/tt0094226/reference
plot summary
http://www.imdb.com/title/tt0094226/plotsummary
...and so on.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import re
from imdb import PY2
from imdb import imdbURL_base
from imdb.Company import Company
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.utils import _Container, KIND_MAP
from .piculet import Path, Rule, Rules, preprocessors, transformers
from .utils import DOMParserBase, analyze_imdbid, build_person
if PY2:
from urllib import unquote
else:
from urllib.parse import unquote
# Dictionary used to convert some section's names.
_SECT_CONV = {
'directed': 'director',
'directed by': 'director',
'directors': 'director',
'editors': 'editor',
'writing credits': 'writer',
'writers': 'writer',
'produced': 'producer',
'cinematography': 'cinematographer',
'film editing': 'editor',
'casting': 'casting director',
'costume design': 'costume designer',
'makeup department': 'make up',
'production management': 'production manager',
'second unit director or assistant director': 'assistant director',
'costume and wardrobe department': 'costume department',
'costume departmen': 'costume department',
'sound department': 'sound crew',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'also known as': 'akas',
'country': 'countries',
'runtime': 'runtimes',
'language': 'languages',
'certification': 'certificates',
'genre': 'genres',
'created': 'creator',
'creators': 'creator',
'color': 'color info',
'plot': 'plot outline',
'art directors': 'art direction',
'assistant directors': 'assistant director',
'set decorators': 'set decoration',
'visual effects department': 'visual effects',
'miscellaneous': 'miscellaneous crew',
'make up department': 'make up',
'plot summary': 'plot outline',
'cinematographers': 'cinematographer',
'camera department': 'camera and electrical department',
'costume designers': 'costume designer',
'production designers': 'production design',
'production managers': 'production manager',
'music original': 'original music',
'casting directors': 'casting director',
'other companies': 'miscellaneous companies',
'producers': 'producer',
'special effects by': 'special effects department',
'special effects': 'special effects companies'
}
def _manageRoles(mo):
"""Perform some transformation on the html, so that roleIDs can
be easily retrieved."""
firstHalf = mo.group(1)
secondHalf = mo.group(2)
newRoles = []
roles = secondHalf.split(' / ')
for role in roles:
role = role.strip()
if not role:
continue
roleID = analyze_imdbid(role)
if roleID is None:
roleID = '/'
else:
roleID += '/'
newRoles.append('<div class="_imdbpyrole" roleid="%s">%s</div>' % (
roleID, role.strip()
))
return firstHalf + ' / '.join(newRoles) + mo.group(3)
_reRolesMovie = re.compile(r'(<td class="character">)(.*?)(</td>)', re.I | re.M | re.S)
def _replaceBR(mo):
"""Replaces <br> tags with '::' (useful for some akas)"""
txt = mo.group(0)
return txt.replace('<br>', '::')
_reAkas = re.compile(r'<h5>also known as:</h5>.*?</div>', re.I | re.M | re.S)
def makeSplitter(lstrip=None, sep='|', comments=True,
origNotesSep=' (', newNotesSep='::(', strip=None):
"""Return a splitter function suitable for a given set of data."""
def splitter(x):
if not x:
return x
x = x.strip()
if not x:
return x
if lstrip is not None:
x = x.lstrip(lstrip).lstrip()
lx = x.split(sep)
lx[:] = [_f for _f in [j.strip() for j in lx] if _f]
if comments:
lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
if strip:
lx[:] = [j.strip(strip) for j in lx]
return lx
return splitter
def _toInt(val, replace=()):
"""Return the value, converted to integer, or None; if present, 'replace'
must be a list of tuples of values to replace."""
for before, after in replace:
val = val.replace(before, after)
try:
return int(val)
except (TypeError, ValueError):
return None
_re_og_title = re.compile(
r'(.*) \((?:(?:(.+)(?= ))? ?(\d{4})(?:(–)(\d{4}| ))?|(.+))\)',
re.UNICODE
)
def analyze_og_title(og_title):
data = {}
match = _re_og_title.match(og_title)
if og_title and not match:
# assume it's a title in production, missing release date information
return {'title': og_title}
data['title'] = match.group(1)
if match.group(3):
data['year'] = int(match.group(3))
kind = match.group(2) or match.group(6)
if kind is None:
kind = 'movie'
else:
kind = kind.lower()
kind = KIND_MAP.get(kind, kind)
data['kind'] = kind
year_separator = match.group(4)
# There is a year separator so assume an ongoing or ended series
if year_separator is not None:
end_year = match.group(5)
if end_year is not None:
data['series years'] = '%(year)d-%(end_year)s' % {
'year': data['year'],
'end_year': end_year.strip(),
}
elif kind.endswith('series'):
data['series years'] = '%(year)d-' % {'year': data['year']}
# No year separator and series, so assume that it ended the same year
elif kind.endswith('series') and 'year' in data:
data['series years'] = '%(year)d-%(year)d' % {'year': data['year']}
if data['kind'] == 'episode' and data['title'][0] == '"':
quote_end = data['title'].find('"', 1)
data['tv series title'] = data['title'][1:quote_end]
data['title'] = data['title'][quote_end + 1:].strip()
return data
def analyze_certificates(certificates):
def reducer(acc, el):
cert_re = re.compile(r'^(.+):(.+)$', re.UNICODE)
if cert_re.match(el):
acc.append(el)
elif acc:
acc[-1] = u'{}::{}'.format(
acc[-1],
el,
)
return acc
certificates = [el.strip() for el in certificates.split('\n') if el.strip()]
return functools.reduce(reducer, certificates, [])
class DOMHTMLMovieParser(DOMParserBase):
"""Parser for the "reference" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
mparser = DOMHTMLMovieParser()
result = mparser.parse(reference_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='title',
extractor=Path('//meta[@property="og:title"]/@content',
transform=analyze_og_title)
),
# parser for misc sections like 'casting department', 'stunts', ...
Rule(
key='misc sections',
extractor=Rules(
foreach='//h4[contains(@class, "ipl-header__content")]',
rules=[
Rule(
key=Path('./@name', transform=lambda x: x.replace('_', ' ').strip()),
extractor=Rules(
foreach='../../following-sibling::table[1]//tr',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[1]/a[@href]/@href')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link'))
)
)
)
]
)
),
Rule(
key='cast',
extractor=Rules(
foreach='//table[@class="cast_list"]//tr',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[2]/a/@href')
),
Rule(
key='roleID',
extractor=Path('./td[4]//div[@class="_imdbpyrole"]/@roleid')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or '').split('/')
)
)
),
Rule(
key='myrating',
extractor=Path('//span[@id="voteuser"]//text()')
),
Rule(
key='plot summary',
extractor=Path('//td[starts-with(text(), "Plot")]/..//p/text()',
transform=lambda x: x.strip().rstrip('|').rstrip())
),
Rule(
key='genres',
extractor=Path(
foreach='//td[starts-with(text(), "Genre")]/..//li/a',
path='./text()'
)
),
Rule(
key='runtimes',
extractor=Path(
foreach='//td[starts-with(text(), "Runtime")]/..//li',
path='./text()',
transform=lambda x: x.strip().replace(' min', '')
)
),
Rule(
key='countries',
extractor=Path(
foreach='//td[starts-with(text(), "Countr")]/..//li/a',
path='./text()'
)
),
Rule(
key='country codes',
extractor=Path(
foreach='//td[starts-with(text(), "Countr")]/..//li/a',
path='./@href',
transform=lambda x: x.split('/')[2].strip().lower()
)
),
Rule(
key='language',
extractor=Path(
foreach='//td[starts-with(text(), "Language")]/..//li/a',
path='./text()'
)
),
Rule(
key='language codes',
extractor=Path(
foreach='//td[starts-with(text(), "Language")]/..//li/a',
path='./@href',
transform=lambda x: x.split('/')[2].strip()
)
),
Rule(
key='color info',
extractor=Path(
foreach='//td[starts-with(text(), "Color")]/..//li/a',
path='./text()',
transform=lambda x: x.replace(' (', '::(')
)
),
Rule(
key='aspect ratio',
extractor=Path(
'//td[starts-with(text(), "Aspect")]/..//li/text()',
transform=transformers.strip
)
),
Rule(
key='sound mix',
extractor=Path(
foreach='//td[starts-with(text(), "Sound Mix")]/..//li/a',
path='./text()',
transform=lambda x: x.replace(' (', '::(')
)
),
Rule(
key='certificates',
extractor=Path(
'//td[starts-with(text(), "Certificat")]/..//text()',
transform=analyze_certificates
)
),
# Collects akas not encosed in <i> tags.
Rule(
key='other akas',
extractor=Path(
'//section[contains(@class, "listo")]'
'//td[starts-with(text(), "Also Known As")]/..//ul//text()',
transform=makeSplitter(
sep='::', origNotesSep='" - ', newNotesSep='::', strip='"'
)
)
),
Rule(
key='creator',
extractor=Rules(
foreach='//td[starts-with(text(), "Creator")]/..//a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='thin writer',
extractor=Rules(
foreach='//div[starts-with(normalize-space(text()), "Writer")]/ul/li[1]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='thin director',
extractor=Rules(
foreach='//div[starts-with(normalize-space(text()), "Director")]/ul/li[1]/a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
)
],
transform=lambda x: build_person(
x.get('name') or '',
personID=analyze_imdbid(x.get('link'))
)
)
),
Rule(
key='top/bottom rank',
extractor=Path(
'//li[@class="ipl-inline-list__item"]//a[starts-with(@href, "/chart/")]/text()'
)
),
Rule(
key='original air date',
extractor=Path('//span[@imdbpy="airdate"]/text()')
),
Rule(
key='series years',
extractor=Path(
'//div[@id="tn15title"]//span[starts-with(text(), "TV series")]/text()',
transform=lambda x: x.replace('TV series', '').strip()
)
),
Rule(
key='season/episode',
extractor=Path(
'//div[@class="titlereference-overview-season-episode-section"]/ul//text()',
transform=transformers.strip
)
),
Rule(
key='number of episodes',
extractor=Path(
'//a[starts-with(text(), "All Episodes")]/text()',
transform=lambda x: int(x.replace('All Episodes', '').strip()[1:-1])
)
),
Rule(
key='episode number',
extractor=Path(
'//div[@id="tn15epnav"]/text()',
transform=lambda x: int(re.sub(r'[^a-z0-9 ]', '',
x.lower()).strip().split()[0]))
),
Rule(
key='previous episode',
extractor=Path(
'//span[@class="titlereference-overview-episodes-links"]'
'//a[contains(text(), "Previous")]/@href',
transform=analyze_imdbid
)
),
Rule(
key='next episode',
extractor=Path(
'//span[@class="titlereference-overview-episodes-links"]'
'//a[contains(text(), "Next")]/@href',
transform=analyze_imdbid
)
),
Rule(
key='number of seasons',
extractor=Path(
'//span[@class="titlereference-overview-years-links"]/../a[1]/text()',
transform=int
)
),
Rule(
key='tv series link',
extractor=Path('//a[starts-with(text(), "All Episodes")]/@href')
),
Rule(
key='akas',
extractor=Path(
foreach='//i[@class="transl"]',
path='./text()',
transform=lambda x: x
.replace(' ', ' ')
.rstrip('-')
.replace('" - ', '"::', 1)
.strip('"')
.replace(' ', ' ')
)
),
Rule(
key='production status',
extractor=Path(
'//td[starts-with(text(), "Status:")]/..//div[@class="info-content"]//text()',
transform=lambda x: x.strip().split('|')[0].strip().lower()
)
),
Rule(
key='production status updated',
extractor=Path(
'//td[starts-with(text(), "Status Updated:")]/'
'..//div[@class="info-content"]//text()',
transform=transformers.strip
)
),
Rule(
key='production comments',
extractor=Path(
'//td[starts-with(text(), "Comments:")]/'
'..//div[@class="info-content"]//text()',
transform=transformers.strip
)
),
Rule(
key='production note',
extractor=Path(
'//td[starts-with(text(), "Note:")]/'
'..//div[@class="info-content"]//text()',
transform=transformers.strip
)
),
Rule(
key='companies',
extractor=Rules(
foreach="//ul[@class='simpleList']",
rules=[
Rule(
key=Path('preceding-sibling::header[1]/div/h4/text()', transform=transformers.lower),
extractor=Rules(
foreach='./li',
rules=[
Rule(
key='name',
extractor=Path('./a//text()')
),
Rule(
key='comp-link',
extractor=Path('./a/@href')
),
Rule(
key='notes',
extractor=Path('./text()')
)
],
transform=lambda x: Company(
name=x.get('name') or '',
accessSystem='http',
companyID=analyze_imdbid(x.get('comp-link')),
notes=(x.get('notes') or '').strip()
)
)
)
]
)
),
Rule(
key='rating',
extractor=Path('(//span[@class="ipl-rating-star__rating"])[1]/text()')
),
Rule(
key='votes',
extractor=Path('//span[@class="ipl-rating-star__total-votes"][1]/text()')
),
Rule(
key='cover url',
extractor=Path('//img[@alt="Poster"]/@src')
)
]
preprocessors = [
('/releaseinfo">', '"><span imdbpy="airdate">'),
(re.compile(r'(<b class="blackcatheader">.+?</b>)', re.I), r'</div><div>\1'),
('<small>Full cast and crew for<br>', ''),
('<td> </td>', '<td>...</td>'),
(re.compile(r'<span class="tv-extra">TV mini-series(\s+.*?)</span>', re.I),
r'<span class="tv-extra">TV series\1</span> (mini)'),
(_reRolesMovie, _manageRoles),
(_reAkas, _replaceBR)
]
def preprocess_dom(self, dom):
# Handle series information.
xpath = self.xpath(dom, "//b[text()='Series Crew']")
if xpath:
b = xpath[-1] # In doubt, take the last one.
for a in self.xpath(b, "./following::h5/a[@class='glossary']"):
name = a.get('name')
if name:
a.set('name', 'series %s' % name)
# Remove links to IMDbPro.
preprocessors.remove(dom, '//span[@class="pro-link"]')
# Remove some 'more' links (keep others, like the one around
# the number of votes).
preprocessors.remove(dom, '//a[@class="tn15more"][starts-with(@href, "/title/")]')
# Remove the "rest of list" in cast.
preprocessors.remove(dom, '//td[@colspan="4"]/..')
return dom
re_space = re.compile(r'\s+')
re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
def postprocess_data(self, data):
# Convert section names.
for sect in list(data.keys()):
if sect in _SECT_CONV:
data[_SECT_CONV[sect]] = data[sect]
del data[sect]
sect = _SECT_CONV[sect]
# Filter out fake values.
for key in data:
value = data[key]
if isinstance(value, list) and value:
if isinstance(value[0], Person):
data[key] = [x for x in value if x.personID is not None]
if isinstance(value[0], _Container):
for obj in data[key]:
obj.accessSystem = self._as
obj.modFunct = self._modFunct
for key in ['title']:
if (key in data) and isinstance(data[key], dict):
subdata = data[key]
del data[key]
data.update(subdata)
misc_sections = data.get('misc sections')
if misc_sections is not None:
for section in misc_sections:
data.update(section)
del data['misc sections']
if 'akas' in data or 'other akas' in data:
akas = data.get('akas') or []
other_akas = data.get('other akas') or []
akas += other_akas
nakas = []
for aka in akas:
aka = aka.strip()
if aka.endswith('" -'):
aka = aka[:-3].rstrip()
nakas.append(aka)
if 'akas' in data:
del data['akas']
if 'other akas' in data:
del data['other akas']
if nakas:
data['akas'] = nakas
if 'runtimes' in data:
data['runtimes'] = [x.replace(' min', '')
for x in data['runtimes']]
if 'number of seasons' in data:
data['seasons'] = [str(i) for i in range(1, data['number of seasons'] + 1)]
if 'season/episode' in data:
tokens = data['season/episode'].split('Episode')
try:
data['season'] = int(tokens[0].split('Season')[1])
except:
data['season'] = 'unknown'
try:
data['episode'] = int(tokens[1])
except:
data['episode'] = 'unknown'
del data['season/episode']
for k in ('writer', 'director'):
t_k = 'thin %s' % k
if t_k not in data:
continue
if k not in data:
data[k] = data[t_k]
del data[t_k]
if 'top/bottom rank' in data:
tbVal = data['top/bottom rank'].lower()
if tbVal.startswith('top'):
tbKey = 'top 250 rank'
tbVal = _toInt(tbVal, [('top rated movies: #', '')])
else:
tbKey = 'bottom 100 rank'
tbVal = _toInt(tbVal, [('bottom rated movies: #', '')])
if tbVal:
data[tbKey] = tbVal
del data['top/bottom rank']
if 'year' in data and data['year'] == '????':
del data['year']
if 'tv series link' in data:
if 'tv series title' in data:
data['episode of'] = Movie(title=data['tv series title'],
movieID=analyze_imdbid(data['tv series link']),
accessSystem=self._as,
modFunct=self._modFunct)
data['episode of']['kind'] = 'tv series'
del data['tv series title']
del data['tv series link']
if 'rating' in data:
try:
data['rating'] = float(data['rating'].replace('/10', ''))
except (TypeError, ValueError):
pass
if data['rating'] == 0:
del data['rating']
if 'votes' in data:
try:
votes = data['votes'].replace('(', '').replace(')', '').replace(',', '').replace('votes', '')
data['votes'] = int(votes)
except (TypeError, ValueError):
pass
companies = data.get('companies')
if companies:
for section in companies:
for key, value in section.items():
if key in data:
key = '%s companies' % key
data.update({key: value})
del data['companies']
return data
def _process_plotsummary(x):
"""Process a plot (contributed by Rdian06)."""
xauthor = x.get('author')
xplot = x.get('plot', '').strip()
if xauthor:
xplot += '::%s' % xauthor
return xplot
class DOMHTMLPlotParser(DOMParserBase):
"""Parser for the "plot summary" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a 'plot' key, containing a list
of string with the structure: 'summary::summary_author <author@email>'.
Example::
pparser = HTMLPlotParser()
result = pparser.parse(plot_summary_html_string)
"""
_defGetRefs = True
# Notice that recently IMDb started to put the email of the
# author only in the link, that we're not collecting, here.
rules = [
Rule(
key='plot',
extractor=Rules(
foreach='//ul[@id="plot-summaries-content"]/li',
rules=[
Rule(
key='plot',
extractor=Path('./p//text()')
),
Rule(
key='author',
extractor=Path('.//div[@class="author-container"]//a/text()')
)
],
transform=_process_plotsummary
)
),
Rule(
key='synopsis',
extractor=Path(
foreach='//ul[@id="plot-synopsis-content"]',
path='.//li//text()'
)
)
]
def preprocess_dom(self, dom):
preprocessors.remove(dom, '//li[@id="no-summary-content"]')
return dom
def postprocess_data(self, data):
if 'synopsis' in data and data['synopsis'][0] and 'a Synopsis for this title' in data['synopsis'][0]:
del data['synopsis']
return data
def _process_award(x):
award = {}
_award = x.get('award')
if _award is not None:
_award = _award.strip()
award['award'] = _award
if not award['award']:
return {}
award['year'] = x.get('year').strip()
if award['year'] and award['year'].isdigit():
award['year'] = int(award['year'])
award['result'] = x.get('result').strip()
category = x.get('category').strip()
if category:
award['category'] = category
received_with = x.get('with')
if received_with is not None:
award['with'] = received_with.strip()
notes = x.get('notes')
if notes is not None:
notes = notes.strip()
if notes:
award['notes'] = notes
award['anchor'] = x.get('anchor')
return award
class DOMHTMLAwardsParser(DOMParserBase):
"""Parser for the "awards" page of a given person or movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
awparser = HTMLAwardsParser()
result = awparser.parse(awards_html_string)
"""
subject = 'title'
_containsObjects = True
rules = [
Rule(
key='awards',
extractor=Rules(
foreach='//table//big',
rules=[
Rule(
key=Path('./a'),
extractor=Rules(
foreach='./ancestor::tr[1]/following-sibling::tr/td[last()][not(@colspan)]',
rules=[
Rule(
key='year',
extractor=Path('./td[1]/a/text()')
),
Rule(
key='result',
extractor=Path('../td[2]/b/text()')
),
Rule(
key='award',
extractor=Path('./td[3]/text()')
),
Rule(
key='category',
extractor=Path('./text()[1]')
),
Rule(
key='with',
extractor=Path(
'./small[starts-with(text(), "Shared with:")]/'
'following-sibling::a[1]/text()'
)
),
Rule(
key='notes',
extractor=Path('./small[last()]//text()')
),
Rule(
key='anchor',
extractor=Path('.//text()')
)
],
transform=_process_award
)
)
]
)
),
Rule(
key='recipients',
extractor=Rules(
foreach='//table//big',
rules=[
Rule(
key=Path('./a'),
extractor=Rules(
foreach='./ancestor::tr[1]/following-sibling::tr'
'/td[last()]/small[1]/preceding-sibling::a',
rules=[
Rule(
key='name',
extractor=Path('./text()')
),
Rule(
key='link',
extractor=Path('./@href')
),
Rule(
key='anchor',
extractor=Path('..//text()')
)
]
)
)
]
)
)
]
preprocessors = [
(re.compile('(<tr><td[^>]*>.*?</td></tr>\n\n</table>)', re.I),
r'\1</table>'),
(re.compile('(<tr><td[^>]*>\n\n<big>.*?</big></td></tr>)', re.I),
r'</table><table class="_imdbpy">\1'),
(re.compile('(<table[^>]*>\n\n)</table>(<table)', re.I), r'\1\2'),
(re.compile('(<small>.*?)<br>(.*?</small)', re.I), r'\1 \2'),
(re.compile('(</tr>\n\n)(<td)', re.I), r'\1<tr>\2')
]
def preprocess_dom(self, dom):
"""Repeat td elements according to their rowspan attributes
in subsequent tr elements.
"""
cols = self.xpath(dom, "//td[@rowspan]")
for col in cols:
span = int(col.get('rowspan'))
del col.attrib['rowspan']
position = len(self.xpath(col, "./preceding-sibling::td"))
row = col.getparent()
for tr in self.xpath(row, "./following-sibling::tr")[:span - 1]:
# if not cloned, child will be moved to new parent
clone = self.clone(col)
tr.insert(position, clone)
return dom
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for key in list(data.keys()):
dom = self.get_dom(key)
assigner = self.xpath(dom, "//a/text()")[0]
for entry in data[key]:
if 'name' not in entry:
if not entry:
continue
# this is an award, not a recipient
entry['assigner'] = assigner.strip()
# find the recipients
matches = [p for p in data[key]
if 'name' in p and (entry['anchor'] == p['anchor'])]
if self.subject == 'title':
recipients = [
Person(name=recipient['name'],
personID=analyze_imdbid(recipient['link']))
for recipient in matches
]
entry['to'] = recipients
elif self.subject == 'name':
recipients = [
Movie(title=recipient['name'],
movieID=analyze_imdbid(recipient['link']))
for recipient in matches
]
entry['for'] = recipients
nd.append(entry)
del entry['anchor']
return {'awards': nd}
class DOMHTMLTaglinesParser(DOMParserBase):
"""Parser for the "taglines" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
tparser = DOMHTMLTaglinesParser()
result = tparser.parse(taglines_html_string)
"""
rules = [
Rule(
key='taglines',
extractor=Path(
foreach='//div[@id="taglines_content"]/div',
path='.//text()'
)
)
]
def preprocess_dom(self, dom):
preprocessors.remove(dom, '//div[@id="taglines_content"]/div[@class="header"]')
preprocessors.remove(dom, '//div[@id="taglines_content"]/div[@id="no_content"]')
return dom
def postprocess_data(self, data):
if 'taglines' in data:
data['taglines'] = [tagline.strip() for tagline in data['taglines']]
return data
class DOMHTMLKeywordsParser(DOMParserBase):
"""Parser for the "keywords" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
kwparser = DOMHTMLKeywordsParser()
result = kwparser.parse(keywords_html_string)
"""
rules = [
Rule(
key='keywords',
extractor=Path(
foreach='//a[starts-with(@href, "/keyword/")]',
path='./text()',
transform=lambda x: x.lower().replace(' ', '-')
)
)
]
class DOMHTMLAlternateVersionsParser(DOMParserBase):
"""Parser for the "alternate versions" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
avparser = DOMHTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='alternate versions',
extractor=Path(
foreach='//ul[@class="trivia"]/li',
path='.//text()',
transform=transformers.strip
)
)
]
class DOMHTMLTriviaParser(DOMParserBase):
"""Parser for the "trivia" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
tparser = DOMHTMLTriviaParser()
result = tparser.parse(trivia_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='trivia',
extractor=Path(
foreach='//div[@class="sodatext"]',
path='.//text()',
transform=transformers.strip
)
)
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
preprocessors.remove(dom, '//span[@class="linksoda"]')
return dom
class DOMHTMLSoundtrackParser(DOMParserBase):
"""Parser for the "soundtrack" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
stparser = DOMHTMLSoundtrackParser()
result = stparser.parse(soundtrack_html_string)
"""
_defGetRefs = True
preprocessors = [('<br />', '\n'), ('<br>', '\n')]
rules = [
Rule(
key='soundtrack',
extractor=Path(
foreach='//div[@class="list"]//div',
path='.//text()',
transform=transformers.strip
)
)
]
def postprocess_data(self, data):
if 'soundtrack' in data:
nd = []
for x in data['soundtrack']:
ds = x.split('\n')
title = ds[0]
if title[0] == '"' and title[-1] == '"':
title = title[1:-1]
nds = []
newData = {}
for l in ds[1:]:
if ' with ' in l or ' by ' in l or ' from ' in l \
or ' of ' in l or l.startswith('From '):
nds.append(l)
else:
if nds:
nds[-1] += l
else:
nds.append(l)
newData[title] = {}
for l in nds:
skip = False
for sep in ('From ',):
if l.startswith(sep):
fdix = len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
skip = True
if not skip:
for sep in ' with ', ' by ', ' from ', ' of ':
fdix = l.find(sep)
if fdix != -1:
fdix = fdix + len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
break
nd.append(newData)
data['soundtrack'] = nd
return data
class DOMHTMLCrazyCreditsParser(DOMParserBase):
"""Parser for the "crazy credits" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
ccparser = DOMHTMLCrazyCreditsParser()
result = ccparser.parse(crazycredits_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='crazy credits',
extractor=Path(
foreach='//ul/li/tt',
path='.//text()',
transform=lambda x: x.replace('\n', ' ').replace(' ', ' ')
)
)
]
def _process_goof(x):
if x['spoiler_category']:
return x['spoiler_category'].strip() + ': SPOILER: ' + x['text'].strip()
else:
return x['category'].strip() + ': ' + x['text'].strip()
class DOMHTMLGoofsParser(DOMParserBase):
"""Parser for the "goofs" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
gparser = DOMHTMLGoofsParser()
result = gparser.parse(goofs_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='goofs',
extractor=Rules(
foreach='//div[@class="soda odd"]',
rules=[
Rule(
key='text',
extractor=Path('./text()')
),
Rule(
key='category',
extractor=Path('./preceding-sibling::h4[1]/text()')
),
Rule(
key='spoiler_category',
extractor=Path('./h4/text()')
)
],
transform=_process_goof
)
)
]
class DOMHTMLQuotesParser(DOMParserBase):
"""Parser for the "memorable quotes" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
qparser = DOMHTMLQuotesParser()
result = qparser.parse(quotes_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='quotes_odd',
extractor=Path(
foreach='//div[@class="quote soda odd"]',
path='.//text()',
transform=lambda x: x
.strip()
.replace(' \n', '::')
.replace('::\n', '::')
.replace('\n', ' ')
)
),
Rule(
key='quotes_even',
extractor=Path(
foreach='//div[@class="quote soda even"]',
path='.//text()',
transform=lambda x: x
.strip()
.replace(' \n', '::')
.replace('::\n', '::')
.replace('\n', ' ')
)
)
]
preprocessors = [
(re.compile('<a href="#" class="hidesoda hidden">Hide options</a><br>', re.I), '')
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
preprocessors.remove(dom, '//span[@class="linksoda"]')
preprocessors.remove(dom, '//div[@class="sharesoda_pre"]')
return dom
def postprocess_data(self, data):
quotes = data.get('quotes_odd', []) + data.get('quotes_even', [])
if not quotes:
return {}
quotes = [q.split('::') for q in quotes]
return {'quotes': quotes}
class DOMHTMLReleaseinfoParser(DOMParserBase):
"""Parser for the "release dates" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
rdparser = DOMHTMLReleaseinfoParser()
result = rdparser.parse(releaseinfo_html_string)
"""
rules = [
Rule(
key='release dates',
extractor=Rules(
foreach='//table[@id="release_dates"]//tr',
rules=[
Rule(
key='country',
extractor=Path('.//td[1]//text()')
),
Rule(
key='date',
extractor=Path('.//td[2]//text()')
),
Rule(
key='notes',
extractor=Path('.//td[3]//text()')
)
]
)
),
Rule(
key='akas',
extractor=Rules(
foreach='//table[@id="akas"]//tr',
rules=[
Rule(
key='countries',
extractor=Path('./td[1]/text()')
),
Rule(
key='title',
extractor=Path('./td[2]/text()')
)
]
)
)
]
preprocessors = [
(re.compile('(<h5><a name="?akas"?.*</table>)', re.I | re.M | re.S),
r'<div class="_imdbpy_akas">\1</div>')
]
def postprocess_data(self, data):
if not ('release dates' in data or 'akas' in data):
return data
releases = data.get('release dates') or []
rl = []
for i in releases:
country = i.get('country')
date = i.get('date')
if not (country and date):
continue
country = country.strip()
date = date.strip()
if not (country and date):
continue
notes = i.get('notes')
info = '%s::%s' % (country, date)
if notes:
notes = notes.replace('\n', '')
i['notes'] = notes
info += notes
rl.append(info)
if releases:
data['raw release dates'] = data['release dates']
del data['release dates']
if rl:
data['release dates'] = rl
akas = data.get('akas') or []
nakas = []
for aka in akas:
title = (aka.get('title') or '').strip()
if not title:
continue
countries = (aka.get('countries') or '').split(',')
if not countries:
nakas.append(title)
else:
for country in countries:
nakas.append('%s::%s' % (title, country.strip()))
if akas:
data['raw akas'] = data['akas']
del data['akas']
if nakas:
data['akas'] = data['akas from release info'] = nakas
return data
class DOMHTMLRatingsParser(DOMParserBase):
"""Parser for the "user ratings" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
rparser = DOMHTMLRatingsParser()
result = rparser.parse(userratings_html_string)
"""
re_means = re.compile('mean\s*=\s*([0-9]\.[0-9])\s*median\s*=\s*([0-9])', re.I)
rules = [
Rule(
key='votes',
extractor=Rules(
foreach='//th[@class="firstTableCoulmn"]/../../tr',
rules=[
Rule(
key='ordinal',
extractor=Path('./td[1]/div//text()')
),
Rule(
key='votes',
extractor=Path('./td[3]/div/div//text()')
)
]
)
),
Rule(
key='mean and median',
extractor=Path(
'//div[starts-with(normalize-space(text()), "Arithmetic mean")]/text()'
)
),
Rule(
key='demographics',
extractor=Rules(
foreach='//div[@class="smallcell"]',
rules=[
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='rating',
extractor=Path('..//div[@class="bigcell"]//text()')
),
Rule(
key='votes',
extractor=Path('./a/text()')
)
]
)
)
]
def postprocess_data(self, data):
nd = {}
demographics = data.get('demographics')
if demographics:
dem = {}
for dem_data in demographics:
link = (dem_data.get('link') or '').strip()
votes = (dem_data.get('votes') or '').strip()
rating = (dem_data.get('rating') or '').strip()
if not (link and votes and rating):
continue
eq_idx = link.rfind('=')
if eq_idx == -1:
continue
info = link[eq_idx + 1:].replace('_', ' ')
try:
votes = int(votes.replace(',', ''))
except Exception:
continue
try:
rating = float(rating)
except Exception:
continue
dem[info] = {'votes': votes, 'rating': rating}
nd['demographics'] = dem
votes = data.get('votes', [])
if votes:
nd['number of votes'] = {}
for v_info in votes:
ordinal = v_info.get('ordinal')
nr_votes = v_info.get('votes')
if not (ordinal and nr_votes):
continue
try:
ordinal = int(ordinal)
except Exception:
continue
try:
nr_votes = int(nr_votes.replace(',', ''))
except Exception:
continue
nd['number of votes'][ordinal] = nr_votes
mean = data.get('mean and median', '')
if mean:
means = self.re_means.findall(mean)
if means and len(means[0]) == 2:
am, med = means[0]
try:
am = float(am)
except (ValueError, OverflowError):
pass
if isinstance(am, float):
nd['arithmetic mean'] = am
try:
med = int(med)
except (ValueError, OverflowError):
pass
if isinstance(med, int):
nd['median'] = med
return nd
def _normalize_href(href):
if (href is not None) and (not href.lower().startswith('http://')):
if href.startswith('/'):
href = href[1:]
# TODO: imdbURL_base may be set by the user!
href = '%s%s' % (imdbURL_base, href)
return href
class DOMHTMLCriticReviewsParser(DOMParserBase):
"""Parser for the "critic reviews" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
crparser = DOMHTMLCriticReviewsParser()
result = crparser.parse(criticreviews_html_string)
"""
kind = 'critic reviews'
rules = [
Rule(
key='metascore',
extractor=Path('//div[@class="metascore_wrap"]/div/span//text()')
),
Rule(
key='metacritic url',
extractor=Path('//div[@class="article"]/div[@class="see-more"]/a/@href')
)
]
class DOMHTMLReviewsParser(DOMParserBase):
"""Parser for the "reviews" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
rparser = DOMHTMLReviewsParser()
result = rparser.parse(reviews_html_string)
"""
rules = [
Rule(
key='reviews',
extractor=Rules(
foreach='//div[@class="review-container"]',
rules=[
Rule(
key='text',
extractor=Path('.//div[@class="text"]//text()')
),
Rule(
key='helpful',
extractor=Path('.//div[@class="text-muted"]/text()[1]')
),
Rule(
key='title',
extractor=Path('.//div[@class="title"]//text()')
),
Rule(
key='author',
extractor=Path('.//span[@class="display-name-link"]/a/@href')
),
Rule(
key='date',
extractor=Path('.//span[@class="review-date"]//text()')
),
Rule(
key='rating',
extractor=Path('.//span[@class="point-scale"]/preceding-sibling::span[1]/text()')
)
],
transform=lambda x: ({
'content': x.get('text', '').replace('\n', ' ').replace(' ', ' ').strip(),
'helpful': [int(s) for s in x.get('helpful', '').split() if s.isdigit()],
'title': x.get('title', '').strip(),
'author': analyze_imdbid(x.get('author')),
'date': x.get('date', '').strip(),
'rating': x.get('rating', '').strip()
})
)
)
]
preprocessors = [('<br>', '<br>\n')]
def postprocess_data(self, data):
for review in data.get('reviews', []):
if review.get('rating') and len(review['rating']) == 2:
review['rating'] = int(review['rating'][0])
else:
review['rating'] = None
if review.get('helpful') and len(review['helpful']) == 2:
review['not_helpful'] = review['helpful'][1] - review['helpful'][0]
review['helpful'] = review['helpful'][0]
else:
review['helpful'] = 0
review['not_helpful'] = 0
review['author'] = "ur%s" % review['author']
return data
class DOMHTMLFullCreditsParser(DOMParserBase):
"""Parser for the "full credits" (series cast section) page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
fcparser = DOMHTMLFullCreditsParser()
result = fcparser.parse(fullcredits_html_string)
"""
kind = 'full credits'
rules = [
Rule(
key='cast',
extractor=Rules(
foreach='//table[@class="cast_list"]//tr[@class="odd" or @class="even"]',
rules=[
Rule(
key='person',
extractor=Path('.//text()')
),
Rule(
key='link',
extractor=Path('./td[2]/a/@href')
),
Rule(
key='roleID',
extractor=Path('./td[4]//div[@class="_imdbpyrole"]/@roleid')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or '').split('/')
)
)
)
]
preprocessors = [
(_reRolesMovie, _manageRoles)
]
class DOMHTMLOfficialsitesParser(DOMParserBase):
"""Parser for the "official sites", "external reviews"
"miscellaneous links", "sound clips", "video clips" and
"photographs" pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
osparser = DOMHTMLOfficialsitesParser()
result = osparser.parse(officialsites_html_string)
"""
rules = [
Rule(
foreach='//h4[@class="li_group"]',
key=Path(
'./text()',
transform=lambda x: x.strip().lower()
),
extractor=Rules(
foreach='./following::ul[1]/li/a',
rules=[
Rule(
key='link',
extractor=Path('./@href')
),
Rule(
key='info',
extractor=Path('./text()')
)
],
transform=lambda x: (
x.get('info').strip(),
unquote(_normalize_href(x.get('link')))
)
)
)
]
class DOMHTMLConnectionParser(DOMParserBase):
"""Parser for the "connections" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
connparser = DOMHTMLConnectionParser()
result = connparser.parse(connections_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='connection',
extractor=Rules(
foreach='//div[@class="_imdbpy"]',
rules=[
Rule(
key=Path('./h5/text()', transform=transformers.lower),
extractor=Rules(
foreach='./a',
rules=[
Rule(
key='title',
extractor=Path('./text()')
),
Rule(
key='movieID',
extractor=Path('./@href')
)
]
)
)
]
)
)
]
preprocessors = [
('<h5>', '</div><div class="_imdbpy"><h5>'),
# To get the movie's year.
('</a> (', ' ('),
('\n<br/>', '</a>'),
('<br/> - ', '::')
]
def postprocess_data(self, data):
for key in list(data.keys()):
nl = []
for v in data[key]:
title = v['title']
ts = title.split('::', 1)
title = ts[0].strip()
notes = ''
if len(ts) == 2:
notes = ts[1].strip()
m = Movie(title=title, movieID=analyze_imdbid(v['movieID']),
accessSystem=self._as, notes=notes, modFunct=self._modFunct)
nl.append(m)
data[key] = nl
if not data:
return {}
return {'connections': data}
class DOMHTMLLocationsParser(DOMParserBase):
"""Parser for the "locations" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
lparser = DOMHTMLLocationsParser()
result = lparser.parse(locations_html_string)
"""
rules = [
Rule(
key='locations',
extractor=Rules(
foreach='//dt',
rules=[
Rule(
key='place',
extractor=Path('.//text()')
),
Rule(
key='note',
extractor=Path('./following-sibling::dd[1]//text()')
)
],
transform=lambda x: ('%s::%s' % (x['place'].strip(),
(x['note'] or '').strip())).strip(':')
)
)
]
class DOMHTMLTechParser(DOMParserBase):
"""Parser for the "technical", "publicity" (for people) and "contacts" (for people)
pages of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
tparser = DOMHTMLTechParser()
result = tparser.parse(technical_html_string)
"""
kind = 'tech'
re_space = re.compile(r'\s+')
rules = [
Rule(
key='tech',
extractor=Rules(
foreach='//table//tr/td[@class="label"]',
rules=[
Rule(
key=Path(
'./text()',
transform=lambda x: x.lower().strip()),
extractor=Path(
'..//td[2]//text()',
transform=lambda x: [t.strip()
for t in x.split(':::') if t.strip()]
)
)
]
)
)
]
preprocessors = [
(re.compile('(<h5>.*?</h5>)', re.I), r'</div>\1<div class="_imdbpy">'),
(re.compile('((<br/>|</p>|</table>))\n?<br/>(?!<a)', re.I), r'\1</div>'),
# the ones below are for the publicity parser
(re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
(re.compile('(</td><td valign="top">)', re.I), r'\1::'),
(re.compile('(</tr><tr>)', re.I), r'\n\1'),
(re.compile('<span class="ghost">\|</span>', re.I), r':::'),
(re.compile('<br/?>', re.I), r':::')
# this is for splitting individual entries
]
def postprocess_data(self, data):
info = {}
for section in data.get('tech', []):
info.update(section)
for key, value in info.items():
if isinstance(value, list):
info[key] = [self.re_space.sub(' ', x).strip() for x in value]
else:
info[key] = self.re_space.sub(' ', value).strip()
return {self.kind: info}
class DOMHTMLNewsParser(DOMParserBase):
"""Parser for the "news" page of a given movie or person.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
nwparser = DOMHTMLNewsParser()
result = nwparser.parse(news_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='news',
extractor=Rules(
foreach='//h2',
rules=[
Rule(
key='title',
extractor=Path('./text()')
),
Rule(
key='fromdate',
extractor=Path('./following-sibling::p[1]/small//text()')
),
Rule(
key='body',
extractor=Path('../following-sibling::p[2]//text()')
),
Rule(
key='link',
extractor=Path('../..//a[text()="Permalink"]/@href')
),
Rule(
key='fulllink',
extractor=Path('../..//a[starts-with(text(), "See full article at")]/@href')
)
],
transform=lambda x: {
'title': x.get('title').strip(),
'date': x.get('fromdate').split('|')[0].strip(),
'from': x.get('fromdate').split('|')[1].replace('From ', '').strip(),
'body': (x.get('body') or '').strip(),
'link': _normalize_href(x.get('link')),
'full article link': _normalize_href(x.get('fulllink'))
}
)
)
]
preprocessors = [
(re.compile('(<a name=[^>]+><h2>)', re.I), r'<div class="_imdbpy">\1'),
(re.compile('(<hr/>)', re.I), r'</div>\1'),
(re.compile('<p></p>', re.I), r'')
]
def postprocess_data(self, data):
if 'news' not in data:
return {}
for news in data['news']:
if 'full article link' in news:
if news['full article link'] is None:
del news['full article link']
return data
def _parse_review(x):
result = {}
title = x.get('title').strip()
if title[-1] == ':':
title = title[:-1]
result['title'] = title
result['link'] = _normalize_href(x.get('link'))
kind = x.get('kind').strip()
if kind[-1] == ':':
kind = kind[:-1]
result['review kind'] = kind
text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||')
review = '\n'.join(text)
if x.get('author') is not None:
author = x.get('author').strip()
review = review.split(author)[0].strip()
result['review author'] = author[2:]
if x.get('item') is not None:
item = x.get('item').strip()
review = review[len(item):].strip()
review = "%s: %s" % (item, review)
result['review'] = review
return result
class DOMHTMLSeasonEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
sparser = DOMHTMLSeasonEpisodesParser()
result = sparser.parse(episodes_html_string)
"""
rules = [
Rule(
key='series link',
extractor=Path('//div[@class="parent"]//a/@href')
),
Rule(
key='series title',
extractor=Path('//head/meta[@property="og:title"]/@content')
),
Rule(
key='_seasons',
extractor=Path(
foreach='//select[@id="bySeason"]//option',
path='./@value'
)
),
Rule(
key='_current_season',
extractor=Path('//select[@id="bySeason"]//option[@selected]/@value')
),
Rule(
key='episodes',
extractor=Rules(
foreach='//div[@class="info"]',
rules=[
Rule(
key=Path('.//meta/@content',
transform=lambda x: 'episode %s' % x),
extractor=Rules(
rules=[
Rule(
key='link',
extractor=Path('.//strong//a[@href][1]/@href')
),
Rule(
key='original air date',
extractor=Path('.//div[@class="airdate"]/text()')
),
Rule(
key='title',
extractor=Path('.//strong//text()')
),
Rule(
key='rating',
extractor=Path(
'.//div[@class="ipl-rating-star "][1]'
'/span[@class="ipl-rating-star__rating"][1]/text()'
)
),
Rule(
key='votes',
extractor=Path(
'.//div[contains(@class, "ipl-rating-star")][1]'
'/span[@class="ipl-rating-star__total-votes"][1]/text()'
)
),
Rule(
key='plot',
extractor=Path('.//div[@class="item_description"]//text()')
)
]
)
)
]
)
)
]
def postprocess_data(self, data):
series_id = analyze_imdbid(data.get('series link'))
series_title = data.get('series title', '').strip()
selected_season = data.get('_current_season', 'unknown season').strip()
if not (series_id and series_title):
return {}
series = Movie(title=series_title, movieID=str(series_id),
accessSystem=self._as, modFunct=self._modFunct)
if series.get('kind') == 'movie':
series['kind'] = 'tv series'
try:
selected_season = int(selected_season)
except ValueError:
pass
nd = {selected_season: {}}
if 'episode -1' in data:
counter = 1
for episode in data['episode -1']:
while 'episode %d' % counter in data:
counter += 1
k = 'episode %d' % counter
data[k] = [episode]
del data['episode -1']
episodes = data.get('episodes', [])
for ep in episodes:
if not ep:
continue
episode_nr, episode = list(ep.items())[0]
if not episode_nr.startswith('episode '):
continue
episode_nr = episode_nr[8:].rstrip()
try:
episode_nr = int(episode_nr)
except ValueError:
pass
episode_id = analyze_imdbid(episode.get('link' ''))
episode_air_date = episode.get('original air date', '').strip()
episode_title = episode.get('title', '').strip()
episode_plot = episode.get('plot', '')
episode_rating = episode.get('rating', '')
episode_votes = episode.get('votes', '')
if not (episode_nr is not None and episode_id and episode_title):
continue
ep_obj = Movie(movieID=episode_id, title=episode_title,
accessSystem=self._as, modFunct=self._modFunct)
ep_obj['kind'] = 'episode'
ep_obj['episode of'] = series
ep_obj['season'] = selected_season
ep_obj['episode'] = episode_nr
if episode_rating:
try:
ep_obj['rating'] = float(episode_rating)
except:
pass
if episode_votes:
try:
ep_obj['votes'] = int(episode_votes.replace(',', '')
.replace('.', '').replace('(', '').replace(')', ''))
except:
pass
if episode_air_date:
ep_obj['original air date'] = episode_air_date
if episode_air_date[-4:].isdigit():
ep_obj['year'] = episode_air_date[-4:]
if episode_plot:
ep_obj['plot'] = episode_plot
nd[selected_season][episode_nr] = ep_obj
_seasons = data.get('_seasons') or []
for idx, season in enumerate(_seasons):
try:
_seasons[idx] = int(season)
except ValueError:
pass
return {'episodes': nd, '_seasons': _seasons, '_current_season': selected_season}
def _build_episode(x):
"""Create a Movie object for a given series' episode."""
episode_id = analyze_imdbid(x.get('link'))
episode_title = x.get('title')
e = Movie(movieID=episode_id, title=episode_title)
e['kind'] = 'episode'
oad = x.get('oad')
if oad:
e['original air date'] = oad.strip()
year = x.get('year')
if year is not None:
year = year[5:]
if year == 'unknown':
year = '????'
if year and year.isdigit():
year = int(year)
e['year'] = year
else:
if oad and oad[-4:].isdigit():
e['year'] = int(oad[-4:])
epinfo = x.get('episode')
if epinfo is not None:
season, episode = epinfo.split(':')[0].split(',')
e['season'] = int(season[7:])
e['episode'] = int(episode[8:])
else:
e['season'] = 'unknown'
e['episode'] = 'unknown'
plot = x.get('plot')
if plot:
e['plot'] = plot.strip()
return e
class DOMHTMLEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
# XXX: no more used for the list of episodes parser,
# but only for the episodes cast parser (see below).
_containsObjects = True
kind = 'episodes list'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::span/strong[1]/text()"
def _init(self):
self.rules = [
Rule(
key='series title',
extractor=Path('//title/text()')
),
Rule(
key='series movieID',
extractor=Path(
'.//h1/a[@class="main"]/@href',
transform=analyze_imdbid
)
),
Rule(
key='episodes',
extractor=Rules(
foreach='//div[@class="_imdbpy"]/h3',
rules=[
Rule(
key='./a/@name',
extractor=Rules(
foreach=self._episodes_path,
rules=[
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='title',
extractor=Path('./a/text()')
),
Rule(
key='year',
extractor=Path('./preceding-sibling::a[1]/@name')
),
Rule(
key='episode',
extractor=Path('./text()[1]')
),
Rule(
key='oad',
extractor=Path(self._oad_path)
),
Rule(
key='plot',
extractor=Path('./following-sibling::text()[1]')
)
],
transform=_build_episode
)
)
]
)
)
]
if self.kind == 'episodes cast':
self.rules += [
Rule(
key='cast',
extractor=Rules(
foreach='//h4',
rules=[
Rule(
key=Path('./text()[1]', transform=transformers.strip),
extractor=Rules(
foreach='./following-sibling::table[1]//td[@class="nm"]',
rules=[
Rule(
key='person',
extractor=Path('..//text()')
),
Rule(
key='link',
extractor=Path('./a/@href')
),
Rule(
key='roleID',
extractor=Path('../td[4]//div[@class="_imdbpyrole"]/@roleid')
)
],
transform=lambda x: build_person(
x.get('person') or '',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or '').split('/'),
accessSystem=self._as,
modFunct=self._modFunct
)
)
)
]
)
)
]
preprocessors = [
(re.compile('(<hr/>\n)(<h3>)', re.I), r'</div>\1<div class="_imdbpy">\2'),
(re.compile('(</p>\n\n)</div>', re.I), r'\1'),
(re.compile('<h3>(.*?)</h3>', re.I), r'<h4>\1</h4>'),
(_reRolesMovie, _manageRoles),
(re.compile('(<br/> <br/>\n)(<hr/>)', re.I), r'\1</div>\2')
]
def postprocess_data(self, data):
# A bit extreme?
if 'series title' not in data:
return {}
if 'series movieID' not in data:
return {}
stitle = data['series title'].replace('- Episode list', '')
stitle = stitle.replace('- Episodes list', '')
stitle = stitle.replace('- Episode cast', '')
stitle = stitle.replace('- Episodes cast', '')
stitle = stitle.strip()
if not stitle:
return {}
seriesID = data['series movieID']
if seriesID is None:
return {}
series = Movie(title=stitle, movieID=str(seriesID),
accessSystem=self._as, modFunct=self._modFunct)
nd = {}
for key in list(data.keys()):
if key.startswith('filter-season-') or key.startswith('season-'):
season_key = key.replace('filter-season-', '').replace('season-', '')
try:
season_key = int(season_key)
except ValueError:
pass
nd[season_key] = {}
ep_counter = 1
for episode in data[key]:
if not episode:
continue
episode_key = episode.get('episode')
if episode_key is None:
continue
if not isinstance(episode_key, int):
episode_key = ep_counter
ep_counter += 1
cast_key = 'Season %s, Episode %s:' % (season_key, episode_key)
if cast_key in data:
cast = data[cast_key]
for i in range(len(cast)):
cast[i].billingPos = i + 1
episode['cast'] = cast
episode['episode of'] = series
nd[season_key][episode_key] = episode
if len(nd) == 0:
return {}
return {'episodes': nd}
class DOMHTMLFaqsParser(DOMParserBase):
"""Parser for the "FAQ" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
fparser = DOMHTMLFaqsParser()
result = fparser.parse(faqs_html_string)
"""
_defGetRefs = True
rules = [
Rule(
key='faqs',
extractor=Rules(
foreach='//div[@class="section"]',
rules=[
Rule(
key='question',
extractor=Path('./h3/a/span/text()')
),
Rule(
key='answer',
extractor=Path('../following-sibling::div[1]//text()')
)
],
transform=lambda x: '%s::%s' % (
x.get('question').strip(),
'\n\n'.join(x.get('answer').replace('\n\n', '\n').strip().split('||'))
)
)
)
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('<h4>(.*?)</h4>\n', re.I), r'||\1--'),
(re.compile('<span class="spoiler"><span>(.*?)</span></span>', re.I),
r'[spoiler]\1[/spoiler]')
]
class DOMHTMLAiringParser(DOMParserBase):
"""Parser for the "airing" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
aparser = DOMHTMLAiringParser()
result = aparser.parse(airing_html_string)
"""
_containsObjects = True
rules = [
Rule(
key='series title',
extractor=Path(
'//title/text()',
transform=lambda x: x.replace(' - TV schedule', '')
)
),
Rule(
key='series id',
extractor=Path('//h1/a[@href]/@href')
),
Rule(
key='tv airings',
extractor=Rules(
foreach='//tr[@class]',
rules=[
Rule(
key='date',
extractor=Path('./td[1]//text()')
),
Rule(
key='time',
extractor=Path('./td[2]//text()')
),
Rule(
key='channel',
extractor=Path('./td[3]//text()')
),
Rule(
key='link',
extractor=Path('./td[4]/a[1]/@href')
),
Rule(
key='title',
extractor=Path('./td[4]//text()')
),
Rule(
key='season',
extractor=Path('./td[5]//text()')
)
],
transform=lambda x: {
'date': x.get('date'),
'time': x.get('time'),
'channel': x.get('channel').strip(),
'link': x.get('link'),
'title': x.get('title'),
'season': (x.get('season') or '').strip()
}
)
)
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
seriesTitle = data.get('series title') or ''
seriesID = analyze_imdbid(data.get('series id'))
if seriesID and 'airing' in data:
for airing in data['airing']:
title = airing.get('title', '').strip()
if not title:
epsTitle = seriesTitle
if seriesID is None:
continue
epsID = seriesID
else:
epsTitle = '%s {%s}' % (data['series title'],
airing['title'])
epsID = analyze_imdbid(airing['link'])
e = Movie(title=epsTitle, movieID=epsID)
airing['episode'] = e
del airing['link']
del airing['title']
if not airing['season']:
del airing['season']
if 'series title' in data:
del data['series title']
if 'series id' in data:
del data['series id']
if 'airing' in data:
data['airing'] = [_f for _f in data['airing'] if _f]
if 'airing' not in data or not data['airing']:
return {}
return data
class DOMHTMLParentsGuideParser(DOMParserBase):
"""Parser for the "parents guide" page of a given movie.
The page should be provided as a string, as taken from
the www.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example::
pgparser = HTMLParentsGuideParser()
result = pgparser.parse(parentsguide_html_string)
"""
rules = [
Rule(
key='parents guide',
extractor=Rules(
foreach='//div[@class="section"]',
rules=[
Rule(
key=Path(
'./h3/a/span/text()',
transform=transformers.lower
),
extractor=Path(
foreach='../following-sibling::div[1]/p',
path='.//text()',
transform=lambda x: [
t.strip().replace('\n', ' ')
for t in x.split('||') if t.strip()
]
)
)
]
)
)
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
def postprocess_data(self, data):
data2 = {}
for key in data:
if data[key]:
data2[key] = data[key]
if not data2:
return {}
return {'parents guide': data2}
_OBJECTS = {
'movie_parser': ((DOMHTMLMovieParser,), None),
'full_credits_parser': ((DOMHTMLFullCreditsParser,), None),
'plot_parser': ((DOMHTMLPlotParser,), None),
'movie_awards_parser': ((DOMHTMLAwardsParser,), None),
'taglines_parser': ((DOMHTMLTaglinesParser,), None),
'keywords_parser': ((DOMHTMLKeywordsParser,), None),
'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None),
'goofs_parser': ((DOMHTMLGoofsParser,), None),
'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None),
'trivia_parser': ((DOMHTMLTriviaParser,), None),
'soundtrack_parser': ((DOMHTMLSoundtrackParser,), None),
'quotes_parser': ((DOMHTMLQuotesParser,), None),
'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None),
'ratings_parser': ((DOMHTMLRatingsParser,), None),
'criticrev_parser': ((DOMHTMLCriticReviewsParser,), {'kind': 'critic reviews'}),
'reviews_parser': ((DOMHTMLReviewsParser,), {'kind': 'reviews'}),
'externalsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'externalrev_parser': ((DOMHTMLOfficialsitesParser,), None),
'misclinks_parser': ((DOMHTMLOfficialsitesParser,), None),
'soundclips_parser': ((DOMHTMLOfficialsitesParser,), None),
'videoclips_parser': ((DOMHTMLOfficialsitesParser,), None),
'photosites_parser': ((DOMHTMLOfficialsitesParser,), None),
'connections_parser': ((DOMHTMLConnectionParser,), None),
'tech_parser': ((DOMHTMLTechParser,), None),
'locations_parser': ((DOMHTMLLocationsParser,), None),
'news_parser': ((DOMHTMLNewsParser,), None),
'episodes_parser': ((DOMHTMLEpisodesParser,), None),
'season_episodes_parser': ((DOMHTMLSeasonEpisodesParser,), None),
'movie_faqs_parser': ((DOMHTMLFaqsParser,), None),
'airing_parser': ((DOMHTMLAiringParser,), None),
'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None)
}
| archerja/ilxr.py | imdb/parser/http/movieParser.py | Python | mit | 90,582 | [
"Brian"
] | 80c71c7aaed37370e5ba829d3ae287a15f442a7c140203b885245cbce4732417 |
"""@namespace IMP.pmi.metadata
Classes for attaching metadata to PMI objects.
"""
from __future__ import print_function, division
from IMP.pmi.tools import OrderedDict
import os
class Metadata(object):
"""Base class for all metadata"""
pass
class RootMetadata(Metadata):
"""Metadata that only makes sense for the top-level PMI object."""
pass
class Software(RootMetadata):
"""Software (other than IMP) used as part of the modeling protocol."""
def __init__(self, name, classification, description, url, type='program',
version=None):
self.name = name
self.classification = classification
self.description = description
self.url = url
self.type = type
self.version = version
class Citation(RootMetadata):
"""A publication that describes the modeling."""
def __init__(self, pmid, title, journal, volume, page_range, year, authors,
doi):
self.title, self.journal, self.volume = title, journal, volume
self.page_range, self.year = page_range, year
self.pmid, self.authors, self.doi = pmid, authors, doi
class PythonScript(RootMetadata):
"""A Python script used as part of the modeling."""
def __init__(self, location):
self.location = location
class Dataset(Metadata):
"""A set of input data, for example, a crystal structure or EM map."""
_eq_keys = ['location']
# Datasets compare equal iff they are the same class and have the
# same attributes
def _eq_vals(self):
return tuple([self.__class__]
+ [getattr(self, x) for x in self._eq_keys])
def __eq__(self, other):
return self._eq_vals() == other._eq_vals()
def __hash__(self):
return hash(self._eq_vals())
_data_type = 'unspecified'
def __init__(self, location):
self.location = location
self._parents = OrderedDict()
def add_parent(self, dataset):
"""Add another Dataset from which this one was derived.
For example, a 3D EM map may be derived from a set of 2D images."""
self._parents[dataset] = None
def add_primary(self, dataset):
"""Add another Dataset from which the ultimate parent of this one
was derived."""
if len(self._parents) == 0:
self.add_parent(dataset)
elif len(self._parents) == 1:
list(self._parents.keys())[0].add_parent(dataset)
else:
raise ValueError("This dataset has multiple parents - don't "
"know which one to add to")
class CXMSDataset(Dataset):
"""Processed crosslinks from a CX-MS experiment"""
_data_type = 'CX-MS data'
class MassSpecDataset(Dataset):
"""Raw mass spectrometry files such as peaklists"""
_data_type = 'Mass Spectrometry data'
class EMDensityDataset(Dataset):
"""A 3D electron microscopy dataset"""
_data_type = '3DEM volume'
class PDBDataset(Dataset):
"""An experimentally-determined 3D structure as a set of a coordinates,
usually in a PDB file"""
_data_type = 'Experimental model'
class ComparativeModelDataset(Dataset):
"""A 3D structure determined by comparative modeling"""
_data_type = 'Comparative model'
class EMMicrographsDataset(Dataset):
"""Raw 2D electron micrographs"""
_eq_keys = Dataset._eq_keys + ['number']
_data_type = 'EM raw micrographs'
def __init__(self, location, number):
super(EMMicrographsDataset, self).__init__(location)
self.number = number
class EM2DClassDataset(Dataset):
"""2DEM class average"""
_data_type = '2DEM class average'
class Location(Metadata):
"""Identifies the location where a resource can be found."""
# 'details' can differ without affecting dataset equality
_eq_keys = []
_allow_duplicates = False
def __init__(self, details=None):
self.details = details
# Locations compare equal iff they are the same class, have the
# same attributes, and allow_duplicates=False
def _eq_vals(self):
if self._allow_duplicates:
return id(self)
else:
return tuple([self.__class__]
+ [getattr(self, x) for x in self._eq_keys])
def __eq__(self, other):
return self._eq_vals() == other._eq_vals()
def __hash__(self):
return hash(self._eq_vals())
class DatabaseLocation(Location):
"""A dataset stored in an official database (PDB, EMDB, PRIDE, etc.)"""
_eq_keys = Location._eq_keys + ['db_name', 'access_code', 'version']
def __init__(self, db_name, db_code, version=None, details=None):
super(DatabaseLocation, self).__init__(details)
self.db_name = db_name
self.access_code = db_code
self.version = version
class EMDBLocation(DatabaseLocation):
"""Something stored in the EMDB database."""
def __init__(self, db_code, version=None, details=None):
DatabaseLocation.__init__(self, 'EMDB', db_code, version, details)
class PDBLocation(DatabaseLocation):
"""Something stored in the PDB database."""
def __init__(self, db_code, version=None, details=None):
DatabaseLocation.__init__(self, 'PDB', db_code, version, details)
class MassIVELocation(DatabaseLocation):
"""Something stored in the MassIVE database."""
def __init__(self, db_code, version=None, details=None):
DatabaseLocation.__init__(self, 'MASSIVE', db_code, version, details)
class FileLocation(Location):
"""An individual file or directory.
This may be in a repository (if `repo` is not None) or only on the
local disk (if `repo` is None)."""
_eq_keys = Location._eq_keys + ['repo', 'path']
def __init__(self, path, repo=None, details=None):
"""Constructor.
@param path the location of the file or directory.
@param repo a Repository object that describes the repository
containing the file (if any).
"""
super(FileLocation, self).__init__(details)
self.repo = repo
if repo:
self.path = path
# Cannot determine file size if non-local
self.file_size = None
else:
if not os.path.exists(path):
raise ValueError("%s does not exist" % path)
self.file_size = os.stat(path).st_size
# Store absolute path in case the working directory changes later
self.path = os.path.abspath(path)
class Repository(Metadata):
"""A repository containing modeling files.
This can be used if the PMI script plus input files are part of a
repository, which has been archived somewhere with a DOI.
This will be used to construct permanent references to files
used in this modeling, even if they haven't been uploaded to
a database such as PDB or EMDB.
@see FileLocation."""
# Two repositories compare equal if their DOIs and URLs are the same
def __eq__(self, other):
return self.doi == other.doi and self.url == other.url
def __hash__(self):
return hash((self.doi, self.url))
def __init__(self, doi, root=None, url=None,
top_directory=None):
"""Constructor.
@param doi the Digital Object Identifier for the repository.
@param root the relative path to the top-level directory
of the repository from the working directory of the script,
or None if files in this repository aren't checked out.
@param url If given, a location that this repository can be
downloaded from.
@param top_directory If given, prefix all paths for files in this
repository with this value. This is useful when the archived
version of the repository is found in a subdirectory at the
URL or DOI (for example, GitHub repositories archived at
Zenodo get placed in a subdirectory named for the repository
and git hash).
"""
# todo: DOI should be optional (could also use URL, local path)
self.doi = doi
self.url, self.top_directory = url, top_directory
if root:
# Store absolute path in case the working directory changes later
self._root = os.path.abspath(root)
@staticmethod
def update_in_repos(fileloc, repos):
"""If the given FileLocation maps to somewhere within one of the
passed repositories, update it to reflect that."""
if fileloc.repo:
return
orig_path = fileloc.path
for repo in repos:
relpath = os.path.relpath(orig_path, repo._root)
if not relpath.startswith('..'):
# Prefer the shortest paths if multiple repositories can match
if fileloc.repo is None or len(fileloc.path) > len(relpath):
fileloc.repo = repo
fileloc.path = relpath
def _get_full_path(self, path):
"""Prefix the given path with our top-level directory"""
return os.path.join(self.top_directory or "", path)
| shanot/imp | modules/pmi/pyext/src/metadata.py | Python | gpl-3.0 | 9,210 | [
"CRYSTAL"
] | 19f9e7bd298ba38b24161a5be91112d549c6e7b8558e1c2ae01a11da74382f12 |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import itertools
import numpy as np
import espressomd
import espressomd.lb
"""
Tests for the streaming of populations of the LB algorithm.
"""
AGRID = 0.5
TAU = 0.1
VISC = 1e18
BULK_VISC = VISC
VELOCITY_VECTORS = np.array([
[0, 0, 0],
[1, 0, 0],
[-1, 0, 0],
[0, 1, 0],
[0, -1, 0],
[0, 0, 1],
[0, 0, -1],
[1, 1, 0],
[-1, -1, 0],
[1, -1, 0],
[-1, 1, 0],
[1, 0, 1],
[-1, 0, -1],
[1, 0, -1],
[-1, 0, 1],
[0, 1, 1],
[0, -1, -1],
[0, 1, -1],
[0, -1, 1]])
LB_PARAMETERS = {
'agrid': AGRID,
'visc': VISC,
'bulk_visc': BULK_VISC,
'tau': TAU,
'dens': 1.0,
'gamma_odd': 1.0,
'gamma_even': 1.0
}
class LBStreamingCommon:
"""
Check the streaming step of the LB fluid implementation by setting all populations
to zero except one. Relaxation is suppressed by choosing appropriate parameters.
"""
system = espressomd.System(box_l=[3.0] * 3)
system.cell_system.skin = 0.4 * AGRID
system.time_step = TAU
grid = np.array(system.box_l / AGRID, dtype=int)
def setUp(self):
self.lbf = self.lb_class(**LB_PARAMETERS)
self.system.actors.add(self.lbf)
def tearDown(self):
self.system.actors.clear()
def reset_fluid_populations(self):
"""Set all populations to 0.0.
"""
for i in itertools.product(range(self.grid[0]), range(
self.grid[1]), range(self.grid[2])):
self.lbf[i].population = np.zeros(19)
def set_fluid_populations(self, grid_index):
"""Set the population of direction n_v of grid_index to n_v+1.
"""
pop = np.arange(1, 20)
self.lbf[grid_index].population = pop
def test_population_streaming(self):
self.reset_fluid_populations()
for grid_index in itertools.product(
range(self.grid[0]), range(self.grid[1]), range(self.grid[2])):
self.set_fluid_populations(grid_index)
self.system.integrator.run(1)
for n_v in range(19):
target_node_index = np.mod(
grid_index + VELOCITY_VECTORS[n_v], self.grid)
np.testing.assert_almost_equal(
self.lbf[target_node_index].population[n_v], float(n_v + 1))
self.lbf[target_node_index].population = np.zeros(19)
class LBCPU(LBStreamingCommon, ut.TestCase):
"""Test for the CPU implementation of the LB."""
lb_class = espressomd.lb.LBFluid
@utx.skipIfMissingGPU()
class LBGPU(LBStreamingCommon, ut.TestCase):
"""Test for the GPU implementation of the LB."""
lb_class = espressomd.lb.LBFluidGPU
if __name__ == "__main__":
ut.main()
| espressomd/espresso | testsuite/python/lb_streaming.py | Python | gpl-3.0 | 3,478 | [
"ESPResSo"
] | 60230cedcf7624b8c602dd65707577eebaeb268c73ae0535a9be7b69b3b6a185 |
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""left navigation pane"""
import logging
logger = logging.getLogger('camelot.view.controls.navpane2')
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QIcon
from PyQt4.QtGui import QMenu
from PyQt4.QtGui import QFrame
from PyQt4.QtGui import QWidget
from PyQt4.QtGui import QToolBox
from PyQt4.QtGui import QDockWidget
from PyQt4.QtGui import QVBoxLayout
from camelot.view.action import ActionFactory
from camelot.view.model_thread import post
from camelot.view.controls.modeltree import ModelItem
from camelot.view.controls.modeltree import ModelTree
class PlainWidgetWithNoMargins(QWidget):
def __init__(self, layout=None, parent=None):
super(PlainWidgetWithNoMargins, self).__init__(parent)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
class NavigationPane(QDockWidget):
def __init__(self, app_admin, workspace, parent):
super(QDockWidget, self).__init__(parent)
self.workspace = workspace
self.app_admin = app_admin
self._tree_items = None
self._title_widget = QWidget()
self._toolbox = self.get_toolbox()
self._toolbox.setMouseTracking(True)
self._shared_tree_widget = None
self._tree_widgets = []
# hack for removing the dock title bar
self.setTitleBarWidget(self._title_widget)
self.setWidget(self._toolbox)
self.setFeatures(QDockWidget.NoDockWidgetFeatures)
# should happen at the top level
#self.setStyleSheet(art.read('stylesheet/navpane2_office2007_blue.qss'))
self.app_admin.sections_changed_signal.connect(self.update_sections)
self.update_sections()
@QtCore.pyqtSlot()
def update_sections(self):
post(self.app_admin.get_sections, self.set_sections)
def get_toolbox(self):
tb = QToolBox()
tb.setFrameShape(QFrame.NoFrame)
tb.layout().setContentsMargins(0,0,0,0)
tb.layout().setSpacing(1)
return tb
def get_tree_widget(self):
tw = ModelTree(parent=self)
# i hate the sunken frame style
tw.setFrameShape(QFrame.NoFrame)
tw.setFrameShadow(QFrame.Plain)
tw.contextmenu = QMenu(self)
act = ActionFactory.new_tab(self, self.open_in_new_view)
tw.contextmenu.addAction( act )
tw.setContextMenuPolicy(Qt.CustomContextMenu)
tw.customContextMenuRequested.connect(self.create_context_menu)
return tw
def get_sections(self):
return self._sections
@QtCore.pyqtSlot(list)
def set_sections(self, sections):
logger.debug('setting navpane sections')
animation = QtCore.QPropertyAnimation(self._toolbox, 'minimumWidth', self)
animation.setDuration( 500 )
animation.setStartValue( 0 )
animation.setEndValue( 220 )
animation.start()
self._sections = sections
self._buttons = [(
index,
section.get_verbose_name(),
section.get_icon().getQPixmap(),
) for index, section in enumerate(sections)]
# performs QToolBox clean up
# QToolbox won't delete items we have to do it explicitly
count = self._toolbox.count()
while count:
item = self._toolbox.widget(count-1)
self._toolbox.removeItem(count-1)
item.deleteLater()
count -= 1
self._shared_tree_widget = self.get_tree_widget()
self._shared_tree_widget.itemClicked.connect(self.open_in_current_view)
self._toolbox_widgets = []
for _i, name, pixmap in self._buttons:
# TODO: old navpane used translation here
name = unicode(name)
icon = QIcon(pixmap)
pwdg = PlainWidgetWithNoMargins(QVBoxLayout())
self._toolbox_widgets.append(pwdg)
self._toolbox.addItem(pwdg, icon, name)
self._toolbox.currentChanged.connect(self.change_current)
self._toolbox.setCurrentIndex(0)
# setCurrentIndex does not emit currentChanged
self.change_current(0)
# WARNING: hardcoded width
#self._toolbox.setMinimumWidth(220)
@QtCore.pyqtSlot(int)
def change_current(self, index):
logger.debug('setting current navpane index to %s' % index)
def get_models_for_tree():
"""returns pairs of (Admin, query) classes for items in the tree"""
if index < len(self._sections):
section = self._sections[index]
return section.get_items()
return []
post(get_models_for_tree, self.set_items_in_tree)
@QtCore.pyqtSlot(list)
def set_items_in_tree(self, items):
logger.debug('setting items for current navpane section')
self._shared_tree_widget.clear()
self._shared_tree_widget.clear_model_items()
self._toolbox.currentWidget().layout().addWidget(self._shared_tree_widget)
self._tree_items = items
if not items: return
for item in items:
label = item.get_verbose_name()
model_item = ModelItem(self._shared_tree_widget, [label])
self._shared_tree_widget.modelitems.append(model_item)
def get_section_item(self, item):
index = self._shared_tree_widget.indexFromItem(item)
return self._tree_items[index.row()]
def create_context_menu(self, point):
logger.debug('creating context menu')
item = self._shared_tree_widget.itemAt(point)
if item:
self._shared_tree_widget.setCurrentItem(item)
self._shared_tree_widget.contextmenu.popup(
self._shared_tree_widget.mapToGlobal(point)
)
@QtCore.pyqtSlot(QtGui.QTreeWidgetItem, int)
def open_in_current_view(self, item, _column):
"""pops a model window in parent's workspace"""
logger.debug('poping a window in parent')
item = self._shared_tree_widget.currentItem()
index = self._shared_tree_widget.indexFromItem(item)
section_item = self._tree_items[index.row()]
new_view = section_item.get_action().run(self.workspace)
if new_view:
self.workspace.set_view(new_view)
@QtCore.pyqtSlot()
def open_in_new_view(self):
"""pops a model window in parent's workspace"""
logger.debug('poping a window in parent')
item = self._shared_tree_widget.currentItem()
index = self._shared_tree_widget.indexFromItem(item)
section_item = self._tree_items[index.row()]
new_view = section_item.get_action().run(self.workspace)
if new_view:
self.workspace.add_view(new_view)
| kurtraschke/camelot | camelot/view/controls/navpane2.py | Python | gpl-2.0 | 7,756 | [
"VisIt"
] | 956d861615df70c072165afe72a90b4ec0ab3d3813b3a0fb1ba0de9afbcd39df |
"""
This module implements the functionality to take any Python expression as a
string and fix all numbers and other things before evaluating it,
thus
1/2
returns
Integer(1)/Integer(2)
We use the Python ast module for that, which is in python2.6 and later. It is
well documented at docs.python.org.
Some tips to understand how this works: use dump() to get a nice representation
of any node. Then write a string of what you want to get, e.g.
"Integer(1)", parse it, dump it and you'll see that you need to do
"Call(Name('Integer', Load()), [node], [], None, None)". You don't need to
bother with lineno and col_offset, just call fix_missing_locations() before
returning the node.
If the ast module is not available (python2.4 and 2.5), we use the old compiler
module.
"""
from sympy.core.basic import Basic
from sympy.core.sympify import SympifyError
try:
from ast import parse, NodeTransformer, Call, Name, Load, \
fix_missing_locations, Str
ast_enabled = True
except ImportError:
ast_enabled = False
if ast_enabled:
class Transform(NodeTransformer):
def __init__(self, local_dict, global_dict):
NodeTransformer.__init__(self)
self.local_dict = local_dict
self.global_dict = global_dict
def visit_Num(self, node):
if isinstance(node.n, int):
return fix_missing_locations(Call(Name('Integer', Load()),
[node], [], None, None))
elif isinstance(node.n, float):
return fix_missing_locations(Call(Name('Real', Load()),
[node], [], None, None))
return node
def visit_Name(self, node):
if node.id in self.local_dict:
return node
elif node.id in self.global_dict:
name_obj = self.global_dict[node.id]
if isinstance(name_obj, (Basic, type)) or callable(name_obj):
return node
elif node.id in ['True', 'False']:
return node
return fix_missing_locations(Call(Name('Symbol', Load()),
[Str(node.id)], [], None, None))
def visit_Lambda(self, node):
if len(node.args.args) == 0:
args = [Str("x")]
else:
args = node.args.args
args = [self.visit(arg) for arg in args]
body = self.visit(node.body)
n = Call(Name('Lambda', Load()), args + [body], [], None, None)
return fix_missing_locations(n)
def parse_expr(s, local_dict):
"""
Converts the string "s" to a SymPy expression, in local_dict.
It converts all numbers to Integers before feeding it to Python and
automatically creates Symbols.
"""
if ast_enabled:
global_dict = {}
exec 'from sympy import *' in global_dict
try:
a = parse(s.strip(), mode="eval")
except SyntaxError:
raise SympifyError("Cannot parse %s." %repr(s))
a = Transform(local_dict, global_dict).visit(a)
e = compile(a, "<string>", "eval")
return eval(e, global_dict, local_dict)
else:
# in python2.4 and 2.5, the "ast" module is not available, so we need
# to use our old implementation:
from ast_parser_python24 import SymPyParser
try:
return SymPyParser(local_dict=local_dict).parse_expr(s)
except SyntaxError:
raise SympifyError("Cannot parse %s." %repr(s))
| pernici/sympy | sympy/parsing/ast_parser.py | Python | bsd-3-clause | 3,517 | [
"VisIt"
] | 25eb57e4589d779546f21b932f25b52b9e31a0c7cf35396d87f92821071535b7 |
#!/usr/bin/env python3
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
import cairo
import sys
import re
import gtk
## DataRange class
class DataRange:
## @var start
# start
## @var end
# end
## @var value
# value
def __init__(self, start = 0, end = 0, value = ''):
"""! Initializer
@param self this object
@param start start
@param end end
@param value value
@return none
"""
self.start = start
self.end = end
self.value = value
## EventString class
class EventString:
## @var at
# at
## @var value
# value
def __init__(self, at = 0, value = ''):
"""! Initializer
@param self this object
@param at you
@param value value
@return none
"""
self.at = at
self.value = value
## EventFloat class
class EventFloat:
## @var at
# at
## @var value
# value
def __init__(self, at = 0, value = 0.0):
"""! Initializer
@param self this object
@param at you
@param value value
@return none
"""
self.at = at
self.value = value
## EventInt class
class EventInt:
## @var at
# at
## @var value
# value
def __init__(self, at = 0, value = 0.0):
"""! Initializer
@param self this object
@param at you
@param value value
@return none
"""
self.at = at
self.value = value
def ranges_cmp(a, b):
diff = a.start - b.start
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
def events_cmp(a, b):
diff = a.at - b.at
if diff < 0:
return -1
elif diff > 0:
return +1
else:
return 0
## TimelineDataRange
class TimelineDataRange:
## @var name
# name
## @var ranges
# ranges
def __init__(self, name = ''):
"""! Initializer
@param self this object
@param name name
@return none
"""
self.name = name
self.ranges = []
return
def __search(self, key):
"""! Search
@param self this object
@param key key
@return index if found or -1 if not found
"""
l = 0
u = len(self.ranges)-1
while l <= u:
i = int((l + u) / 2)
if key >= self.ranges[i].start and key <= self.ranges[i].end:
return i
elif key < self.ranges[i].start:
u = i - 1
else:
# key > self.ranges[i].end
l = i + 1
return - 1
def add_range(self, range):
"""! Add range
@param self this object
@param range range
@return none
"""
self.ranges.append(range)
def get_all(self):
"""! Get all ranges
@param self this object
@return the ranges
"""
return self.ranges
def get_ranges(self, start, end):
"""! Get selected ranges
@param self this object
@param start range start
@param end range end
@return the range or and empty list
"""
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return []
elif s == -1:
return self.ranges[0:e + 1]
elif e == -1:
return self.ranges[s:len(self.ranges)]
else:
return self.ranges[s:e + 1]
def get_ranges_bounds(self, start, end):
"""! Get ranges bounds
@param self this object
@param start range start
@param end range end
@return range
"""
s = self.__search(start)
e = self.__search(end)
if s == -1 and e == -1:
return(0, 0)
elif s == -1:
return(0, e + 1)
elif e == -1:
return(s, len(self.ranges))
else:
return(s, e + 1)
def sort(self):
"""! Sort ranges
@param self this object
@return none
"""
self.ranges.sort(ranges_cmp)
def get_bounds(self):
"""! Get bounds
@param self this object
@return the bounds
"""
if len(self.ranges) > 0:
lo = self.ranges[0].start
hi = self.ranges[len(self.ranges)-1].end
return(lo, hi)
else:
return(0, 0)
## TimelineEvent class
class TimelineEvent:
## @var name
# name
## @var events
# events
def __init__(self, name = ''):
"""! Get ranges bounds
@param self this object
@param name name
@return none
"""
self.name = name
self.events = []
def __search(self, key):
"""! Search function
@param self this object
@param key the key
@return event index
"""
l = 0
u = len(self.events)-1
while l <= u:
i = int((l + u) / 2)
if key == self.events[i].at:
return i
elif key < self.events[i].at:
u = i - 1
else:
# key > self.events[i].at
l = i + 1
return l
def add_event(self, event):
"""! Add Event
@param self this object
@param event event to add
@return none
"""
self.events.append(event)
def get_events(self, start, end):
"""! Get Events
@param self this object
@param start starting event
@param end ending event
@return the events
"""
s = self.__search(start)
e = self.__search(end)
return self.events[s:e + 1]
def get_events_bounds(self, start, end):
"""! Get Events Bounds
@param self this object
@param start starting event
@param end ending event
@return event bounds
"""
s = self.__search(start)
e = self.__search(end)
return(s, e + 1)
def sort(self):
"""! Sort function
@param self this object
@return none
"""
self.events.sort(events_cmp)
def get_bounds(self):
"""! Get Bounds
@param self this object
@return the bounds
"""
if len(self.events) > 0:
lo = self.events[0].at
hi = self.events[-1].at
return(lo, hi)
else:
return(0, 0)
## Timeline class
class Timeline:
## @var name
# name
## @var ranges
# ranges
## @var event_str
# event string
## @var event_int
# event int
def __init__(self, name = ''):
"""! Initializer
@param self this object
@param name name
@return none
"""
self.ranges = []
self.event_str = []
self.event_int = []
self.name = name
def get_range(self, name):
"""! Get range
@param self this object
@param name name
@return the range
"""
for range in self.ranges:
if range.name == name:
return range
timeline = TimelineDataRange(name)
self.ranges.append(timeline)
return timeline
def get_event_str(self, name):
"""! Get Event String
@param self this object
@param name name
@return the event string
"""
for event_str in self.event_str:
if event_str.name == name:
return event_str
timeline = TimelineEvent(name)
self.event_str.append(timeline)
return timeline
def get_event_int(self, name):
"""! Get Event Int
@param self this object
@param name name
@return eevent int
"""
for event_int in self.event_int:
if event_int.name == name:
return event_int
timeline = TimelineEvent(name)
self.event_int.append(timeline)
return timeline
def get_ranges(self):
"""! Get Ranges
@param self this object
@return the ranges
"""
return self.ranges
def get_events_str(self):
"""! Get Events string
@param self this object
@return event string
"""
return self.event_str
def get_events_int(self):
"""! Get Events int
@param self this object
@return evrnt int
"""
return self.event_int
def sort(self):
"""! Sort the ranges and events
@param self this object
@return none
"""
for range in self.ranges:
range.sort()
for event in self.event_int:
event.sort()
for event in self.event_str:
event.sort()
def get_bounds(self):
"""! Get Bounds
@param self this object
@return the bounds
"""
lo = 0
hi = 0
for range in self.ranges:
(range_lo, range_hi) = range.get_bounds()
if range_lo < lo:
lo = range_lo
if range_hi > hi:
hi = range_hi
for event_str in self.event_str:
(ev_lo, ev_hi) = event_str.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
for event_int in self.event_int:
(ev_lo, ev_hi) = event_int.get_bounds()
if ev_lo < lo:
lo = ev_lo
if ev_hi > hi:
hi = ev_hi
return(lo, hi)
## Timelines class
class Timelines:
## @var timelines
# timelines
def __init__(self):
""" Initializer
@param self: this object
"""
self.timelines = []
def get(self, name):
"""! Get Timeline
@param self this object
@param name name
@return the timeline for the name
"""
for timeline in self.timelines:
if timeline.name == name:
return timeline
timeline = Timeline(name)
self.timelines.append(timeline)
return timeline
def get_all(self):
"""! Get All Timeline
@param self this object
@return all timelines
"""
return self.timelines
def sort(self):
"""! Sort the timelines
@param self this object
@return none
"""
for timeline in self.timelines:
timeline.sort()
def get_bounds(self):
"""! Get Bounds
@param self this object
@return the bounds for all timelines
"""
lo = 0
hi = 0
for timeline in self.timelines:
(t_lo, t_hi) = timeline.get_bounds()
if t_lo < lo:
lo = t_lo
if t_hi > hi:
hi = t_hi
return(lo, hi)
def get_all_range_values(self):
"""! Get All Ranges
@param self this object
@return the keys for all ranges
"""
range_values = {}
for timeline in self.timelines:
for ranges in timeline.get_ranges():
for ran in ranges.get_all():
range_values[ran.value] = 1
return range_values.keys()
## Color class
class Color:
## @var r
# red
## @var g
# green
## @var b
# blue
def __init__(self, r = 0.0, g = 0.0, b = 0.0):
"""! Initializer
@param self: this object
@param r: red
@param g: green
@param b: blue
@return none
"""
self.r = r
self.g = g
self.b = b
def set(self, r, g, b):
"""! Set color
@param self: this object
@param r: red
@param g: green
@param b: blue
@return none
"""
self.r = r
self.g = g
self.b = b
## Colors class
class Colors:
## @var __colors
# colors
## @var default_colors
# default colors
## XXX add more
default_colors = [Color(1, 0, 0), Color(0, 1, 0), Color(0, 0, 1), Color(1, 1, 0), Color(1, 0, 1), Color(0, 1, 1)]
def __init__(self):
"""! Initializer
@param self this object
@return none
"""
self.__colors = {}
def add(self, name, color):
"""! Add
@param self this object
@param name name of the color
@param color color value
@return none
"""
self.__colors[name] = color
def lookup(self, name):
"""! Lookup name
@param self this object
@param name name
@return named color
"""
if not self.__colors.has_key(name):
self.add(name, self.default_colors.pop())
return self.__colors.get(name)
## TopLegendRenderer class
class TopLegendRenderer:
## @var __padding
# padding
## @var __legends
# legends
## @var __colors
# colors
## @var __width
# width
## @var __height
# height
def __init__(self):
"""! Initializer
@param self this object
@return none
"""
self.__padding = 10
def set_padding(self, padding):
"""! Set padding
@param self this object
@param padding padding
@return none
"""
self.__padding = padding
def set_legends(self, legends, colors):
"""! Set padding
@param self this object
@param legends legends
@param colors colors
@return none
"""
self.__legends = legends
self.__colors = colors
def layout(self, width):
"""! Set padding
@param self this object
@param width width
@return none
"""
self.__width = width
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
total_height += line_height
self.__height = total_height
def get_height(self):
"""! Set padding
@param self this object
@return height
"""
return self.__height
def draw(self, ctx):
"""! Set padding
@param self this object
@param ctx ctx
@return none
"""
i = 0
line_height = 0
total_height = self.__padding
line_used = self.__padding
for legend in self.__legends:
(t_width, t_height) = ctx.text_extents(legend)[2:4]
item_width = self.__padding + self.__padding + t_width + self.__padding
item_height = t_height + self.__padding
if item_height > line_height:
line_height = item_height
if line_used + item_width > self.__width:
line_used = self.__padding + item_width
total_height += line_height
else:
line_used += item_width
x = line_used - item_width
ctx.rectangle(x, total_height, self.__padding, self.__padding)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke_preserve()
ctx.set_source_rgb(self.__colors[i].r,
self.__colors[i].g,
self.__colors[i].b)
ctx.fill()
ctx.move_to(x + self.__padding*2, total_height + t_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(legend)
i += 1
return
## TimelinesRenderer class
class TimelinesRenderer:
## @var padding
# padding
## @var timelines
# timelines
## @var colors
# colors
## @var start
# start
## @var end
# end
## @var left_width
# left width
## @var right_width
# right width
## @var max_text_height
# maximum text height
## @var width
# width
## @var height
# height
## @var grey_background
# grey background
def __init__(self):
"""! Initializer
@param self this object
@return none
"""
self.padding = 10
return
def get_height(self):
"""! Get Height
@param self this object
@return height
"""
return self.height
def set_timelines(self, timelines, colors):
"""! Set Timelines
@param self this object
@param timelines timelines
@param colors colors
@return none
"""
self.timelines = timelines
self.colors = colors
def set_render_range(self, start, end):
"""! Set Render Range
@param self this object
@param start start
@param end end
@return none
"""
self.start = start
self.end = end
def get_data_x_start(self):
"""! Get Data X Start
@param self: this object
@return X start
"""
return self.padding / 2 + self.left_width + self.padding + self.right_width + self.padding / 2
def layout(self, width):
"""! Get Data X Start
@param self this object
@param width width
@return none
"""
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
left_width = 0
right_width = 0
left_n_lines = 0
range_n = 0
eventint_n = 0
eventstr_n = 0
for timeline in self.timelines.get_all():
left_n_lines += 1
t_width = ctx.text_extents(timeline.name)[2]
left_width = max(left_width, t_width)
for rang in timeline.get_ranges():
t_width = ctx.text_extents(rang.name)[2]
right_width = max(right_width, t_width)
range_n += 1
for events_int in timeline.get_events_int():
t_width = ctx.text_extents(events_int.name)[2]
right_width = max(right_width, t_width)
eventint_n += 1
for events_str in timeline.get_events_str():
t_width = ctx.text_extents(events_str.name)[2]
right_width = max(right_width, t_width)
eventstr_n += 1
left_height = left_n_lines * max_text_height + (left_n_lines - 1) * self.padding
right_n_lines = range_n + eventint_n + eventstr_n
right_height = (right_n_lines - 1) * self.padding + right_n_lines * max_text_height
right_data_height = (eventint_n + eventstr_n) * (max_text_height + 5) + range_n * 10
right_data_height += (right_n_lines - 1) * self.padding
height = max(left_height, right_height)
height = max(height, right_data_height)
self.left_width = left_width
self.right_width = right_width
self.max_text_height = max_text_height
self.width = width
self.height = height + self.padding
def draw_line(self, ctx, x, y, width, height):
"""! Draw Line
@param self this object
@param ctx ctx
@param x x
@param y y
@param width width
@param height height
@return none
"""
ctx.move_to(x, y)
ctx.rel_line_to(width, height)
ctx.close_path()
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.set_line_width(1.0)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
def draw_events(self, ctx, events, x, y, width, height):
"""! Draw Event
@param self this object
@param ctx ctx
@param events events
@param x x
@param y y
@param width width
@param height height
@return none
"""
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x)
(lo, hi) = events.get_events_bounds(self.start, self.end)
for event in events.events[lo:hi]:
real_x = int(x + (event.at - self.start) * width / (self.end - self.start))
if real_x > last_x_drawn + 2:
ctx.rectangle(real_x, y, 1, 1)
ctx.set_source_rgb(1, 0, 0)
ctx.stroke()
ctx.move_to(real_x, y + self.max_text_height)
ctx.set_source_rgb(0, 0, 0)
ctx.show_text(str(event.value))
last_x_drawn = real_x
self.grey_background += 1
def draw_ranges(self, ctx, ranges, x, y, width, height):
"""! Draw Ranges
@param self this object
@param ctx ctx
@param ranges ranges
@param x x
@param y y
@param width width
@param height height
@return none
"""
if (self.grey_background % 2) == 0:
ctx.rectangle(x, y - self.padding / 2,
width, height + self.padding)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
last_x_drawn = int(x - 1)
(lo, hi) = ranges.get_ranges_bounds(self.start, self.end)
for data_range in ranges.ranges[lo:hi]:
s = max(data_range.start, self.start)
e = min(data_range.end, self.end)
x_start = int(x + (s - self.start) * width / (self.end - self.start))
x_end = int(x + (e - self.start) * width / (self.end - self.start))
if x_end > last_x_drawn:
ctx.rectangle(x_start, y, x_end - x_start, 10)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke_preserve()
color = self.colors.lookup(data_range.value)
ctx.set_source_rgb(color.r, color.g, color.b)
ctx.fill()
last_x_drawn = x_end
self.grey_background += 1
def draw(self, ctx):
"""! Draw
@param self this object
@param ctx ctx
@return none
"""
timeline_top = 0
top_y = self.padding / 2
left_x_start = self.padding / 2
left_x_end = left_x_start + self.left_width
right_x_start = left_x_end + self.padding
right_x_end = right_x_start + self.right_width
data_x_start = right_x_end + self.padding / 2
data_x_end = self.width
data_width = data_x_end - data_x_start
cur_y = top_y
self.draw_line(ctx, 0, 0, self.width, 0)
self.grey_background = 1
for timeline in self.timelines.get_all():
(y_bearing, t_width, t_height) = ctx.text_extents(timeline.name)[1:4]
ctx.move_to(left_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(timeline.name);
for events_int in timeline.get_events_int():
(y_bearing, t_width, t_height) = ctx.text_extents(events_int.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_int.name)
self.draw_events(ctx, events_int, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for events_str in timeline.get_events_str():
(y_bearing, t_width, t_height) = ctx.text_extents(events_str.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(events_str.name)
self.draw_events(ctx, events_str, data_x_start, cur_y, data_width, self.max_text_height + 5)
cur_y += self.max_text_height + 5 + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
for ranges in timeline.get_ranges():
(y_bearing, t_width, t_height) = ctx.text_extents(ranges.name)[1:4]
ctx.move_to(right_x_start, cur_y + self.max_text_height - (t_height + y_bearing))
ctx.show_text(ranges.name)
self.draw_ranges(ctx, ranges, data_x_start, cur_y, data_width, 10)
cur_y += self.max_text_height + self.padding
self.draw_line(ctx, right_x_start - self.padding / 2, cur_y - self.padding / 2,
self.right_width + self.padding, 0)
self.draw_line(ctx, 0, cur_y - self.padding / 2,
self.width, 0)
bot_y = cur_y - self.padding / 2
self.draw_line(ctx, left_x_end + self.padding / 2, 0,
0, bot_y)
self.draw_line(ctx, right_x_end + self.padding / 2, 0,
0, bot_y)
return
## ScaleRenderer class
class ScaleRenderer:
## @var __top
# top
## @var __lo
# lo
## @var __hi
# hi
## @var __delta
# delta
## @var __width
# width
## @var __height
# height
## @var max_text_height
# maximum text height
def __init__(self):
"""! Initializer
@param self this object
@return none
"""
self.__top = 0
return
def set_bounds(self, lo, hi):
"""! Set Bounds
@param self this object
@param lo lo
@param hi hi
@return none
"""
self.__lo = lo
self.__hi = hi
def get_position(self, x):
"""! Get Position
@param self this object
@param x x
@return real x
"""
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
return real_x
def set_top(self):
"""! Set Top
@param self this object
@return none
"""
self.__top = 1
def set_bot(self):
"""! Set Bottom
@param self this object
@return none
"""
self.__top = 0
def layout(self, width):
"""! Layout
@param self this object
@param width width
@return none
"""
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)
ctx = cairo.Context(surface)
# calculate scale delta
data_delta = self.__hi - self.__lo
closest = 1
while (closest*10) < data_delta:
closest *= 10
if (data_delta / closest) == 0:
delta = closest
elif(data_delta / closest) == 1:
delta = closest / 10
else:
delta = closest
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
self.__delta = delta
self.__width = width
# calculate text height
max_text_height = ctx.text_extents("ABCDEFGHIJKLMNOPQRSTUVWXYZabcedefghijklmnopqrstuvwxyz0123456789")[3]
self.max_text_height = max_text_height
height = max_text_height + 10
self.__height = height
def get_height(self):
"""! Get Height
@param self: this object
@return height
"""
return self.__height
def draw(self, ctx):
"""! Draw
@param self this object
@param ctx ctx
@return none
"""
delta = self.__delta
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
if self.__top == 1:
s = -1
else:
s = 1
# print scale points
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1.0)
ticks = range(int(start), int(end + delta), int(delta))
for x in ticks:
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 5*s)
ctx.close_path()
ctx.stroke()
(t_y_bearing, t_width, t_height) = ctx.text_extents(str(x))[1:4]
if self.__top:
text_delta = t_height + t_y_bearing
else:
text_delta = -t_y_bearing
ctx.move_to(real_x - t_width / 2, (5 + 5 + text_delta)*s)
ctx.show_text(str(x))
# draw subticks
delta /= 10
if delta > 0:
start = self.__lo - (self.__lo % delta) + delta
end = self.__hi - (self.__hi % delta)
for x in range(int(start), int(end + delta), int(delta)):
real_x = (x - self.__lo ) * self.__width / (self.__hi - self.__lo)
ctx.move_to(real_x, 0)
ctx.line_to(real_x, 3*s)
ctx.close_path()
ctx.stroke()
## GraphicRenderer class
class GraphicRenderer:
## @var __start
# start
## @var __end
# end
## @var __mid_scale
# mid scale
## @var __bot_scale
# bottom scale
## @var __width
# width
## @var __height
# height
## @var __r_start
# start
## @var __r_end
# end
## @var __data
# data
## @var __mid_scale
# mid scale
## @var __top_legend
# top legend
def __init__(self, start, end):
"""! Initializer
@param self this object
@param start start
@param end end
@return none
"""
self.__start = float(start)
self.__end = float(end)
self.__mid_scale = ScaleRenderer()
self.__mid_scale.set_top()
self.__bot_scale = ScaleRenderer()
self.__bot_scale.set_bounds(start, end)
self.__bot_scale.set_bot()
self.__width = 1
self.__height = 1
def get_width(self):
"""! Get Width
@param self: this object
@return width
"""
return self.__width
def get_height(self):
"""! Get Height
@param self this object
@return height
"""
return self.__height
# return x, y, width, height
def get_data_rectangle(self):
"""! Get Data Rectangle
@param self this object
@return rectangle
"""
y_start = self.__top_legend.get_height()
x_start = self.__data.get_data_x_start()
return(x_start, y_start, self.__width - x_start, self.__data.get_height())
def scale_data(self, x):
"""! Get Data Rectangle
@param self this object
@param x x
@return scaled x
"""
x_start = self.__data.get_data_x_start()
x_scaled = x / (self.__width - x_start) * (self.__r_end - self.__r_start)
return x_scaled
# return x, y, width, height
def get_selection_rectangle(self):
"""! Get Selection Rectangle
@param self this object
@return rectangle
"""
y_start = self.__top_legend.get_height() + self.__data.get_height() + self.__mid_scale.get_height() + 20
y_height = self.__bot_scale.get_height() + 20
x_start = self.__bot_scale.get_position(self.__r_start)
x_end = self.__bot_scale.get_position(self.__r_end)
return(x_start, y_start, x_end - x_start, y_height)
def scale_selection(self, x):
"""! Scale Selection
@param self this object
@param x the X
@return scaled X
"""
x_scaled = x / self.__width * (self.__end - self.__start)
return x_scaled
def set_range(self, start, end):
"""! Set Range
@param self this object
@param start start
@param end end
@return none
"""
s = min(start, end)
e = max(start, end)
start = max(self.__start, s)
end = min(self.__end, e)
self.__r_start = start
self.__r_end = end
self.__data.set_render_range(start, end)
self.__mid_scale.set_bounds(start, end)
self.layout(self.__width, self.__height)
def get_range(self):
"""! Get Range
@param self this object
@return range
"""
return(self.__r_start, self.__r_end)
def set_data(self, data):
"""! Set Date
@param self this object
@param data data
@return none
"""
self.__data = data
def set_top_legend(self, top_legend):
"""! Set Top Legend
@param self this object
@param top_legend The legend
@return none
"""
self.__top_legend = top_legend
def layout(self, width, height):
"""! Set Layout
@param self this object
@param width width
@param height height
@return none
"""
self.__width = width
self.__height = height
self.__top_legend.layout(width)
top_legend_height = self.__top_legend.get_height()
self.__data.layout(width)
self.__mid_scale.layout(width - self.__data.get_data_x_start())
self.__bot_scale.layout(width)
return
def __x_pixel(self, x, width):
"""! X Pixel
@param self this object
@param x x
@param width width
@return x pixel
"""
new_x = (x - self.__start) * width / (self.__end - self.__start)
return new_x
def draw(self, ctx):
"""! Draw
@param self this object
@param ctx ctx
@return none
"""
# default background is white
ctx.save()
ctx.set_source_rgb(1, 1, 1)
ctx.set_operator(cairo.OPERATOR_SOURCE)
ctx.rectangle(0, 0, self.__width, self.__height)
ctx.fill()
# top legend
ctx.save()
self.__top_legend.draw(ctx)
top_legend_height = self.__top_legend.get_height()
ctx.restore()
# separation line
ctx.move_to(0, top_legend_height)
ctx.line_to(self.__width, top_legend_height)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
# data
ctx.save()
ctx.translate(0,
top_legend_height)
self.__data.draw(ctx)
ctx.restore()
# scale below data
ctx.save()
ctx.translate(self.__data.get_data_x_start(),
top_legend_height + self.__data.get_height() + self.__mid_scale.get_height())
self.__mid_scale.draw(ctx)
ctx.restore()
height_used = top_legend_height + self.__data.get_height() + self.__mid_scale.get_height()
# separation between scale and left pane
ctx.move_to(self.__data.get_data_x_start(), height_used)
ctx.rel_line_to(0, -self.__mid_scale.get_height())
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(2)
ctx.stroke()
# separation below scale
ctx.move_to(0, height_used)
ctx.line_to(self.__width, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
select_start = self.__bot_scale.get_position(self.__r_start)
select_end = self.__bot_scale.get_position(self.__r_end)
# left connection between top scale and bottom scale
ctx.move_to(0, height_used);
ctx.line_to(self.__data.get_data_x_start(), height_used)
ctx.line_to(select_start, height_used + 20)
ctx.line_to(0, height_used + 20)
ctx.line_to(0, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# right connection between top scale and bottom scale
ctx.move_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + 20)
ctx.line_to(select_end, height_used + 20)
ctx.line_to(self.__width, height_used)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke_preserve()
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
height_used += 20
# unused area background
unused_start = self.__bot_scale.get_position(self.__r_start)
unused_end = self.__bot_scale.get_position(self.__r_end)
unused_height = self.__bot_scale.get_height() + 20
ctx.rectangle(0, height_used,
unused_start,
unused_height)
ctx.rectangle(unused_end,
height_used,
self.__width - unused_end,
unused_height)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.fill()
# border line around bottom scale
ctx.move_to(unused_end, height_used)
ctx.line_to(self.__width, height_used)
ctx.line_to(self.__width, height_used + unused_height)
ctx.line_to(0, height_used + unused_height)
ctx.line_to(0, height_used)
ctx.line_to(unused_start, height_used)
ctx.close_path()
ctx.set_line_width(2)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
ctx.move_to(unused_start, height_used)
ctx.line_to(unused_end, height_used)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0.9, 0.9, 0.9)
ctx.stroke()
# unused area dot borders
ctx.save()
ctx.move_to(max(unused_start, 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.move_to(min(unused_end, self.__width - 2), height_used)
ctx.rel_line_to(0, unused_height)
ctx.set_dash([5], 0)
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
ctx.restore()
# bottom scale
ctx.save()
ctx.translate(0, height_used)
self.__bot_scale.draw(ctx)
ctx.restore()
## GtkGraphicRenderer class
class GtkGraphicRenderer(gtk.DrawingArea):
## @var __data
# data
## @var __moving_left
# moving left
## @var __moving_right
# moving right
## @var __moving_both
# moving both
## @var __moving_top
# moving top
## @var __force_full_redraw
# full redraw
## @var __moving_left_cur
# moving left cur
## @var __moving_right_cur
# moving right cur
## @var __moving_both_start
# moving both start
## @var __moving_both_cur
# moving both cur
## @var __moving_top_cur
# moving top cur
## @var __moving_top_start
# moving top start
## @var __width
# width
## @var __height
# height
## @var __buffer_surface
# buffer surface
def __init__(self, data):
"""! Initializer
@param self this object
@param data data
@return none
"""
super(GtkGraphicRenderer, self).__init__()
self.__data = data
self.__moving_left = False
self.__moving_right = False
self.__moving_both = False
self.__moving_top = False
self.__force_full_redraw = True
self.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("expose_event", self.expose)
self.connect('size-allocate', self.size_allocate)
self.connect('motion-notify-event', self.motion_notify)
self.connect('button-press-event', self.button_press)
self.connect('button-release-event', self.button_release)
def set_smaller_zoom(self):
"""! Set Smaller Zoom
@param self this object
@return none
"""
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start)*2)
self.__force_full_redraw = True
self.queue_draw()
def set_bigger_zoom(self):
"""! Set Bigger Zoom
@param self this object
@return none
"""
(start, end) = self.__data.get_range()
self.__data.set_range(start, start + (end - start) / 2)
self.__force_full_redraw = True
self.queue_draw()
def output_png(self, filename):
"""! Output PNG
@param self this object
@param filename file name
@return none
"""
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
surface.write_to_png(filename)
def button_press(self, widget, event):
"""! Button Press
@param self this object
@param widget widget
@param event event
@return true if button has been pressed otherwise false
"""
(x, y, width, height) = self.__data.get_selection_rectangle()
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5:
self.__moving_left = True
return True
if abs(event.x - (x + width)) < 5:
self.__moving_right = True
return True
if event.x > x and event.x < x + width:
self.__moving_both = True
self.__moving_both_start = event.x
self.__moving_both_cur = event.x
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
self.__moving_top = True
self.__moving_top_start = event.x
self.__moving_top_cur = event.x
return True
return False
def button_release(self, widget, event):
"""! Button Release
@param self this object
@param widget widget
@param event event
@return true if button was released otherwise false
"""
if self.__moving_left:
self.__moving_left = False
left = self.__data.scale_selection(self.__moving_left_cur)
right = self.__data.get_range()[1]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_right:
self.__moving_right = False
right = self.__data.scale_selection(self.__moving_right_cur)
left = self.__data.get_range()[0]
self.__data.set_range(left, right)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_both:
self.__moving_both = False
delta = self.__data.scale_selection(self.__moving_both_cur - self.__moving_both_start)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.queue_draw()
return True
if self.__moving_top:
self.__moving_top = False
return False
def motion_notify(self, widget, event):
"""! Motion Notify
@param self this object
@param widget widget
@param event event
@return true if moving otherwise false
"""
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
if event.x <= 0:
self.__moving_left_cur = 0
elif event.x >= x + width:
self.__moving_left_cur = x + width
else:
self.__moving_left_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_right:
if event.x >= self.__width:
self.__moving_right = self.__width
elif event.x < x:
self.__moving_right_cur = x
else:
self.__moving_right_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_both:
cur_e = self.__width - (x + width - self.__moving_both_start)
cur_s = (self.__moving_both_start - x)
if event.x < cur_s:
self.__moving_both_cur = cur_s
elif event.x > cur_e:
self.__moving_both_cur = cur_e
else:
self.__moving_both_cur = event.x
self.queue_draw_area(0, int(y), int(self.__width), int(height))
return True
if self.__moving_top:
self.__moving_top_cur = event.x
delta = self.__data.scale_data(self.__moving_top_start - self.__moving_top_cur)
(left, right) = self.__data.get_range()
self.__data.set_range(left + delta, right + delta)
self.__force_full_redraw = True
self.__moving_top_start = event.x
self.queue_draw()
return True
(d_x, d_y, d_width, d_height) = self.__data.get_data_rectangle()
if event.y > y and event.y < y + height:
if abs(event.x - x) < 5 or abs(event.x - (x + width)) < 5:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.SB_H_DOUBLE_ARROW))
return True
if event.x > x and event.x < x + width:
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
if event.y > d_y and event.y < (d_y + d_height):
if event.x > d_x and event.x < (d_x + d_width):
widget.window.set_cursor(gtk.gdk.Cursor(gtk.gdk.FLEUR))
return True
widget.window.set_cursor(None)
return False
def size_allocate(self, widget, allocation):
"""! Size Allocate
@param self this object
@param widget widget
@param allocation allocation
@return none
"""
self.__width = allocation.width
self.__height = allocation.height
self.__data.layout(allocation.width, allocation.height)
self.__force_full_redraw = True
self.queue_draw()
def expose(self, widget, event):
"""! Expose
@param self this object
@param widget widget
@param event event
@return false
"""
if self.__force_full_redraw:
self.__buffer_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
self.__data.get_width(),
self.__data.get_height())
ctx = cairo.Context(self.__buffer_surface)
self.__data.draw(ctx)
self.__force_full_redraw = False
ctx = widget.window.cairo_create()
ctx.rectangle(event.area.x, event.area.y,
event.area.width, event.area.height)
ctx.clip()
ctx.set_source_surface(self.__buffer_surface)
ctx.paint()
(x, y, width, height) = self.__data.get_selection_rectangle()
if self.__moving_left:
ctx.move_to(max(self.__moving_left_cur, 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_right:
ctx.move_to(min(self.__moving_right_cur, self.__width - 2), y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_line_width(1)
ctx.set_source_rgb(0, 0, 0)
ctx.stroke()
if self.__moving_both:
delta_x = self.__moving_both_cur - self.__moving_both_start
left_x = x + delta_x
ctx.move_to(x + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.move_to(x + width + delta_x, y)
ctx.rel_line_to(0, height)
ctx.close_path()
ctx.set_source_rgb(0, 0, 0)
ctx.set_line_width(1)
ctx.stroke()
return False
## MainWindow class
class MainWindow:
## @var __window
# window
## @var __render
# render
## @var __dialog
# dialog
def __init__(self):
"""! Initializer
@param self this object
@return none
"""
return
def run(self, graphic):
"""! Run function
@param self this object
@param graphic graphic
@return none
"""
window = gtk.Window()
self.__window = window
window.set_default_size(200, 200)
vbox = gtk.VBox()
window.add(vbox)
render = GtkGraphicRenderer(graphic)
self.__render = render
vbox.pack_end(render, True, True, 0)
hbox = gtk.HBox()
vbox.pack_start(hbox, False, False, 0)
smaller_zoom = gtk.Button("Zoom Out")
smaller_zoom.connect("clicked", self.__set_smaller_cb)
hbox.pack_start(smaller_zoom)
bigger_zoom = gtk.Button("Zoom In")
bigger_zoom.connect("clicked", self.__set_bigger_cb)
hbox.pack_start(bigger_zoom)
output_png = gtk.Button("Output Png")
output_png.connect("clicked", self.__output_png_cb)
hbox.pack_start(output_png)
window.connect('destroy', gtk.main_quit)
window.show_all()
#gtk.bindings_activate(gtk.main_quit, 'q', 0)
gtk.main()
def __set_smaller_cb(self, widget):
"""! Set Smaller Callback
@param self this object
@param widget widget
@return none
"""
self.__render.set_smaller_zoom()
def __set_bigger_cb(self, widget):
"""! Set Bigger Callback
@param self this object
@param widget widget
@return none
"""
self.__render.set_bigger_zoom()
def __output_png_cb(self, widget):
"""! Output PNG Callback
@param self this object
@param widget widget
@return none
"""
dialog = gtk.FileChooserDialog("Output Png", self.__window,
gtk.FILE_CHOOSER_ACTION_SAVE, ("Save", 1))
self.__dialog = dialog
dialog.set_default_response(1)
dialog.connect("response", self.__dialog_response_cb)
dialog.show()
return
def __dialog_response_cb(self, widget, response):
"""! Dialog Response Callback
@param self this object
@param widget widget
@param response response
@return none
"""
if response == 1:
filename = self.__dialog.get_filename()
self.__render.output_png(filename)
widget.hide()
return
## read_data function
def read_data(filename):
timelines = Timelines()
colors = Colors()
m1 = re.compile('range ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m2 = re.compile('event-str ([^ ]+) ([^ ]+) ([^ ]+) ([0-9]+)')
m3 = re.compile('event-int ([^ ]+) ([^ ]+) ([0-9]+) ([0-9]+)')
m4 = re.compile('color ([^ ]+) #([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})([a-fA-F0-9]{2,2})')
with open(filename) as fh:
for line in fh.readlines():
m = m1.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
rang = timeline.get_range(m.group(2))
data_range = DataRange()
data_range.value = m.group(3)
data_range.start = int(m.group(4))
data_range.end = int(m.group(5))
rang.add_range(data_range)
continue
m = m2.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_str(m.group(2))
event = EventString()
event.value = m.group(3)
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m3.match(line)
if m:
line_name = m.group(1)
timeline = timelines.get(m.group(1))
ev = timeline.get_event_int(m.group(2))
event = EventInt()
event.value = int(m.group(3))
event.at = int(m.group(4))
ev.add_event(event)
continue
m = m4.match(line)
if m:
r = int(m.group(2), 16)
g = int(m.group(3), 16)
b = int(m.group(4), 16)
color = Color(r / 255, g / 255, b / 255)
colors.add(m.group(1), color)
continue
timelines.sort()
return (colors, timelines)
def main():
(colors, timelines) = read_data(sys.argv[1])
(lower_bound, upper_bound) = timelines.get_bounds()
graphic = GraphicRenderer(lower_bound, upper_bound)
top_legend = TopLegendRenderer()
range_values = timelines.get_all_range_values()
range_colors = []
for range_value in range_values:
range_colors.append(colors.lookup(range_value))
top_legend.set_legends(range_values,
range_colors)
graphic.set_top_legend(top_legend)
data = TimelinesRenderer()
data.set_timelines(timelines, colors)
graphic.set_data(data)
# default range
range_mid = (upper_bound - lower_bound) / 2
range_width = (upper_bound - lower_bound) / 10
range_lo = range_mid - range_width / 2
range_hi = range_mid + range_width / 2
graphic.set_range(range_lo, range_hi)
main_window = MainWindow()
main_window.run(graphic)
main()
| Gabrielcarvfer/NS3 | utils/grid.py | Python | gpl-2.0 | 54,401 | [
"FLEUR"
] | f07b6790a90f5c893a6cec5353aae07b6d074241ebbf1a4c58839ea9c62831e8 |
#!/usr/bin/env python
#
# Copyright (c) 2000 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
#
from DataTypes import UNIQUE_ID, ANY, ASCII_ARMORED_DATA, NON_NEGATIVE_INTEGER, MOD_VAL, INTEGER, ListMarker, OptionMarker
from OurMessagesPublicKey import *
BASE_COMM_STRAT_TEMPL = {
'broker id': OptionMarker([UNIQUE_ID, None]), # The "or `None'" is for backwards-compatibility -- we have accidentally been storing "Nones" under 'broker id' in the metatracker pickle... --Zooko 2001-09-02
'comm strat sequence num': OptionMarker(NON_NEGATIVE_INTEGER),
}
TCP_COMM_STRAT_TEMPL = {}
TCP_COMM_STRAT_TEMPL.update(BASE_COMM_STRAT_TEMPL)
RELAY_COMM_STRAT_TEMPL = {}
RELAY_COMM_STRAT_TEMPL .update(BASE_COMM_STRAT_TEMPL)
PICKUP_COMM_STRAT_TEMPL = {}
PICKUP_COMM_STRAT_TEMPL .update(BASE_COMM_STRAT_TEMPL)
TCP_COMM_STRAT_TEMPL.update({
'comm strategy type': "TCP",
'IP address': ANY,
'port number': NON_NEGATIVE_INTEGER
})
RELAY_COMM_STRAT_TEMPL.update({
'comm strategy type': "relay",
'relayer id': UNIQUE_ID,
})
PICKUP_COMM_STRAT_TEMPL.update({
'comm strategy type': "pickup",
})
CRYPTO_COMM_STRAT_TEMPL = {
'comm strategy type': "crypto",
'pubkey': PKFC_TEMPL,
'lowerstrategy': [
TCP_COMM_STRAT_TEMPL,
RELAY_COMM_STRAT_TEMPL,
PICKUP_COMM_STRAT_TEMPL,
]
}
COMM_STRAT_TEMPL = [
CRYPTO_COMM_STRAT_TEMPL,
TCP_COMM_STRAT_TEMPL,
RELAY_COMM_STRAT_TEMPL,
PICKUP_COMM_STRAT_TEMPL,
]
# !!! Zooko: make "IP ADDRESS" template item type. --Zooko 2000/05/12
| zooko/egtp | common/OurMessagesCommStrat.py | Python | agpl-3.0 | 1,686 | [
"VisIt"
] | 1078bbf4f9e9280825c046f5a1a93d458f416424a911ce64fc1cbab95583a313 |
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides testing capabilities and customisations specific to Iris.
.. note:: This module needs to control the matplotlib backend, so it
**must** be imported before ``matplotlib.pyplot``.
The primary class for this module is :class:`IrisTest`.
By default, this module sets the matplotlib backend to "agg". But when
this module is imported it checks ``sys.argv`` for the flag "-d". If
found, it is removed from ``sys.argv`` and the matplotlib backend is
switched to "tkagg" to allow the interactive visual inspection of
graphical test results.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import codecs
import collections
import contextlib
import datetime
import difflib
import filecmp
import functools
import gzip
import inspect
import json
import io
import logging
import math
import os
import os.path
import shutil
import subprocess
import sys
import unittest
import threading
import warnings
import xml.dom.minidom
import zlib
try:
from unittest import mock
except ImportError:
import mock
import filelock
import numpy as np
import numpy.ma as ma
import requests
import iris.cube
import iris.config
import iris.util
# Test for availability of matplotlib.
# (And remove matplotlib as an iris.tests dependency.)
try:
import matplotlib
matplotlib.use('agg')
import matplotlib.testing.compare as mcompare
import matplotlib.pyplot as plt
except ImportError:
MPL_AVAILABLE = False
else:
MPL_AVAILABLE = True
try:
from osgeo import gdal
except ImportError:
GDAL_AVAILABLE = False
else:
GDAL_AVAILABLE = True
try:
import iris_grib
GRIB_AVAILABLE = True
from iris_grib.message import GribMessage
except ImportError:
try:
import gribapi
GRIB_AVAILABLE = True
from iris.fileformats.grib.message import GribMessage
except ImportError:
GRIB_AVAILABLE = False
try:
import iris_sample_data
except ImportError:
SAMPLE_DATA_AVAILABLE = False
else:
SAMPLE_DATA_AVAILABLE = True
try:
import nc_time_axis
NC_TIME_AXIS_AVAILABLE = True
except ImportError:
NC_TIME_AXIS_AVAILABLE = False
try:
requests.get('https://github.com/SciTools/iris')
INET_AVAILABLE = True
except requests.exceptions.ConnectionError:
INET_AVAILABLE = False
try:
import stratify
STRATIFY_AVAILABLE = True
except ImportError:
STRATIFY_AVAILABLE = False
#: Basepath for test results.
_RESULT_PATH = os.path.join(os.path.dirname(__file__), 'results')
#: Default perceptual hash size.
_HASH_SIZE = 16
#: Default maximum perceptual hash hamming distance.
_HAMMING_DISTANCE = 2
if '--data-files-used' in sys.argv:
sys.argv.remove('--data-files-used')
fname = '/var/tmp/all_iris_test_resource_paths.txt'
print('saving list of files used by tests to %s' % fname)
_EXPORT_DATAPATHS_FILE = open(fname, 'w')
else:
_EXPORT_DATAPATHS_FILE = None
if '--create-missing' in sys.argv:
sys.argv.remove('--create-missing')
print('Allowing creation of missing test results.')
os.environ['IRIS_TEST_CREATE_MISSING'] = 'true'
# A shared logger for use by unit tests
logger = logging.getLogger('tests')
# Whether to display matplotlib output to the screen.
_DISPLAY_FIGURES = False
if (MPL_AVAILABLE and '-d' in sys.argv):
sys.argv.remove('-d')
plt.switch_backend('tkagg')
_DISPLAY_FIGURES = True
# Threading non re-entrant blocking lock to ensure thread-safe plotting.
_lock = threading.Lock()
def main():
"""A wrapper for unittest.main() which adds iris.test specific options to the help (-h) output."""
if '-h' in sys.argv or '--help' in sys.argv:
stdout = sys.stdout
buff = io.StringIO()
# NB. unittest.main() raises an exception after it's shown the help text
try:
sys.stdout = buff
unittest.main()
finally:
sys.stdout = stdout
lines = buff.getvalue().split('\n')
lines.insert(9, 'Iris-specific options:')
lines.insert(10, ' -d Display matplotlib figures (uses tkagg).')
lines.insert(11, ' NOTE: To compare results of failing tests, ')
lines.insert(12, ' use idiff.py instead')
lines.insert(13, ' --data-files-used Save a list of files used to a temporary file')
lines.insert(
14, ' -m Create missing test results')
print('\n'.join(lines))
else:
unittest.main()
def get_data_path(relative_path):
"""
Return the absolute path to a data file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
test_data_dir = iris.config.TEST_DATA_DIR
if test_data_dir is None:
test_data_dir = ''
data_path = os.path.join(test_data_dir, relative_path)
if _EXPORT_DATAPATHS_FILE is not None:
_EXPORT_DATAPATHS_FILE.write(data_path + '\n')
if isinstance(data_path, six.string_types) and not os.path.exists(data_path):
# if the file is gzipped, ungzip it and return the path of the ungzipped
# file.
gzipped_fname = data_path + '.gz'
if os.path.exists(gzipped_fname):
with gzip.open(gzipped_fname, 'rb') as gz_fh:
try:
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
except IOError:
# Put ungzipped data file in a temporary path, since we
# can't write to the original path (maybe it is owned by
# the system.)
_, ext = os.path.splitext(data_path)
data_path = iris.util.create_temp_filename(suffix=ext)
with open(data_path, 'wb') as fh:
fh.writelines(gz_fh)
return data_path
class IrisTest_nometa(unittest.TestCase):
"""A subclass of unittest.TestCase which provides Iris specific testing functionality."""
_assertion_counts = collections.defaultdict(int)
@classmethod
def setUpClass(cls):
# Ensure that the CF profile if turned-off for testing.
iris.site_configuration['cf_profile'] = None
def _assert_str_same(self, reference_str, test_str, reference_filename, type_comparison_name='Strings'):
if reference_str != test_str:
diff = ''.join(difflib.unified_diff(reference_str.splitlines(1), test_str.splitlines(1),
'Reference', 'Test result', '', '', 0))
self.fail("%s do not match: %s\n%s" % (type_comparison_name, reference_filename, diff))
@staticmethod
def get_result_path(relative_path):
"""
Returns the absolute path to a result file when given the relative path
as a string, or sequence of strings.
"""
if not isinstance(relative_path, six.string_types):
relative_path = os.path.join(*relative_path)
return os.path.abspath(os.path.join(_RESULT_PATH, relative_path))
def result_path(self, basename=None, ext=''):
"""
Return the full path to a test result, generated from the \
calling file, class and, optionally, method.
Optional kwargs :
* basename - File basename. If omitted, this is \
generated from the calling method.
* ext - Appended file extension.
"""
if ext and not ext.startswith('.'):
ext = '.' + ext
# Generate the folder name from the calling file name.
path = os.path.abspath(inspect.getfile(self.__class__))
path = os.path.splitext(path)[0]
sub_path = path.rsplit('iris', 1)[1].split('tests', 1)[1][1:]
# Generate the file name from the calling function name?
if basename is None:
stack = inspect.stack()
for frame in stack[1:]:
if 'test_' in frame[3]:
basename = frame[3].replace('test_', '')
break
filename = basename + ext
result = os.path.join(self.get_result_path(''),
sub_path.replace('test_', ''),
self.__class__.__name__.replace('Test_', ''),
filename)
return result
def assertCMLApproxData(self, cubes, reference_filename=None, **kwargs):
# passes args and kwargs on to approx equal
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
reference_filename = [self.get_result_path(reference_filename)]
for i, cube in enumerate(cubes):
fname = list(reference_filename)
# don't want the ".cml" for the json stats file
if fname[-1].endswith(".cml"):
fname[-1] = fname[-1][:-4]
fname[-1] += '.data.%d.json' % i
self.assertCubeDataAlmostEqual(cube, fname, **kwargs)
self.assertCML(cubes, reference_filename, checksum=False)
def assertCDL(self, netcdf_filename, reference_filename=None, flags='-h'):
"""
Test that the CDL for the given netCDF file matches the contents
of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* netcdf_filename:
The path to the netCDF file.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* flags:
Command-line flags for `ncdump`, as either a whitespace
separated string or an iterable. Defaults to '-h'.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'cdl')
else:
reference_path = self.get_result_path(reference_filename)
# Convert the netCDF file to CDL file format.
cdl_filename = iris.util.create_temp_filename(suffix='.cdl')
if flags is None:
flags = []
elif isinstance(flags, six.string_types):
flags = flags.split()
else:
flags = list(map(str, flags))
with open(cdl_filename, 'w') as cdl_file:
subprocess.check_call(['ncdump'] + flags + [netcdf_filename],
stderr=cdl_file, stdout=cdl_file)
# Ingest the CDL for comparison, excluding first line.
with open(cdl_filename, 'r') as cdl_file:
lines = cdl_file.readlines()[1:]
# Sort the dimensions (except for the first, which can be unlimited).
# This gives consistent CDL across different platforms.
sort_key = lambda line: ('UNLIMITED' not in line, line)
dimension_lines = slice(lines.index('dimensions:\n') + 1,
lines.index('variables:\n'))
lines[dimension_lines] = sorted(lines[dimension_lines], key=sort_key)
cdl = ''.join(lines)
os.remove(cdl_filename)
self._check_same(cdl, reference_path, type_comparison_name='CDL')
def assertCML(self, cubes, reference_filename=None, checksum=True):
"""
Test that the CML for the given cubes matches the contents of
the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* cubes:
Either a Cube or a sequence of Cubes.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
* checksum:
When True, causes the CML to include a checksum for each
Cube's data. Defaults to True.
"""
if isinstance(cubes, iris.cube.Cube):
cubes = [cubes]
if reference_filename is None:
reference_filename = self.result_path(None, 'cml')
if isinstance(cubes, (list, tuple)):
xml = iris.cube.CubeList(cubes).xml(checksum=checksum, order=False,
byteorder=False)
else:
xml = cubes.xml(checksum=checksum, order=False, byteorder=False)
reference_path = self.get_result_path(reference_filename)
self._check_same(xml, reference_path)
def assertTextFile(self, source_filename, reference_filename, desc="text file"):
"""Check if two text files are the same, printing any diffs."""
with open(source_filename) as source_file:
source_text = source_file.readlines()
with open(reference_filename) as reference_file:
reference_text = reference_file.readlines()
if reference_text != source_text:
diff = ''.join(difflib.unified_diff(reference_text, source_text, 'Reference', 'Test result', '', '', 0))
self.fail("%s does not match reference file: %s\n%s" % (desc, reference_filename, diff))
def assertCubeDataAlmostEqual(self, cube, reference_filename, **kwargs):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
with open(reference_path, 'r') as reference_file:
stats = json.load(reference_file)
self.assertEqual(stats.get('shape', []), list(cube.shape))
self.assertEqual(stats.get('masked', False),
isinstance(cube.data, ma.MaskedArray))
nstats = np.array((stats.get('mean', 0.), stats.get('std', 0.),
stats.get('max', 0.), stats.get('min', 0.)),
dtype=np.float_)
if math.isnan(stats.get('mean', 0.)):
self.assertTrue(math.isnan(cube.data.mean()))
else:
cube_stats = np.array((cube.data.mean(), cube.data.std(),
cube.data.max(), cube.data.min()),
dtype=np.float_)
self.assertArrayAllClose(nstats, cube_stats, **kwargs)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
masked = False
if isinstance(cube.data, ma.MaskedArray):
masked = True
stats = {'mean': np.float_(cube.data.mean()),
'std': np.float_(cube.data.std()),
'max': np.float_(cube.data.max()),
'min': np.float_(cube.data.min()),
'shape': cube.shape, 'masked': masked}
with open(reference_path, 'w') as reference_file:
reference_file.write(json.dumps(stats))
def assertFilesEqual(self, test_filename, reference_filename):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
fmt = 'test file {!r} does not match reference {!r}.'
self.assertTrue(filecmp.cmp(test_filename, reference_path),
fmt.format(test_filename, reference_path))
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
shutil.copy(test_filename, reference_path)
def assertString(self, string, reference_filename=None):
"""
Test that `string` matches the contents of the reference file.
If the environment variable IRIS_TEST_CREATE_MISSING is
non-empty, the reference file is created if it doesn't exist.
Args:
* string:
The string to check.
Kwargs:
* reference_filename:
The relative path (relative to the test results directory).
If omitted, the result is generated from the calling
method's name, class, and module using
:meth:`iris.tests.IrisTest.result_path`.
"""
if reference_filename is None:
reference_path = self.result_path(None, 'txt')
else:
reference_path = self.get_result_path(reference_filename)
self._check_same(string, reference_path,
type_comparison_name='Strings')
def assertRepr(self, obj, reference_filename):
self.assertString(repr(obj), reference_filename)
def _check_same(self, item, reference_path, type_comparison_name='CML'):
if self._check_reference_file(reference_path):
with open(reference_path, 'rb') as reference_fh:
reference = ''.join(part.decode('utf-8')
for part in reference_fh.readlines())
self._assert_str_same(reference, item, reference_path,
type_comparison_name)
else:
self._ensure_folder(reference_path)
logger.warning('Creating result file: %s', reference_path)
with open(reference_path, 'wb') as reference_fh:
reference_fh.writelines(
part.encode('utf-8')
for part in item)
def assertXMLElement(self, obj, reference_filename):
"""
Calls the xml_element method given obj and asserts the result is the same as the test file.
"""
doc = xml.dom.minidom.Document()
doc.appendChild(obj.xml_element(doc))
pretty_xml = doc.toprettyxml(indent=" ")
reference_path = self.get_result_path(reference_filename)
self._check_same(pretty_xml, reference_path,
type_comparison_name='XML')
def assertArrayEqual(self, a, b, err_msg=''):
np.testing.assert_array_equal(a, b, err_msg=err_msg)
def _assertMaskedArray(self, assertion, a, b, strict, **kwargs):
# Define helper function to extract unmasked values as a 1d
# array.
def unmasked_data_as_1d_array(array):
array = ma.asarray(array)
if array.ndim == 0:
if array.mask:
data = np.array([])
else:
data = np.array([array.data])
else:
data = array.data[~ma.getmaskarray(array)]
return data
# Compare masks. This will also check that the array shapes
# match, which is not tested when comparing unmasked values if
# strict is False.
a_mask, b_mask = ma.getmaskarray(a), ma.getmaskarray(b)
np.testing.assert_array_equal(a_mask, b_mask)
if strict:
assertion(a.data, b.data, **kwargs)
else:
assertion(unmasked_data_as_1d_array(a),
unmasked_data_as_1d_array(b),
**kwargs)
def assertMaskedArrayEqual(self, a, b, strict=False):
"""
Check that masked arrays are equal. This requires the
unmasked values and masks to be identical.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
"""
self._assertMaskedArray(np.testing.assert_array_equal, a, b, strict)
def assertArrayAlmostEqual(self, a, b, decimal=6):
np.testing.assert_array_almost_equal(a, b, decimal=decimal)
def assertMaskedArrayAlmostEqual(self, a, b, decimal=6, strict=False):
"""
Check that masked arrays are almost equal. This requires the
masks to be identical, and the unmasked values to be almost
equal.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* strict (bool):
If True, perform a complete mask and data array equality check.
If False (default), the data array equality considers only unmasked
elements.
* decimal (int):
Equality tolerance level for
:meth:`numpy.testing.assert_array_almost_equal`, with the meaning
'abs(desired-actual) < 0.5 * 10**(-decimal)'
"""
self._assertMaskedArray(np.testing.assert_array_almost_equal, a, b,
strict, decimal=decimal)
def assertArrayAllClose(self, a, b, rtol=1.0e-7, atol=0.0, **kwargs):
"""
Check arrays are equal, within given relative + absolute tolerances.
Args:
* a, b (array-like):
Two arrays to compare.
Kwargs:
* rtol, atol (float):
Relative and absolute tolerances to apply.
Any additional kwargs are passed to numpy.testing.assert_allclose.
Performs pointwise toleranced comparison, and raises an assertion if
the two are not equal 'near enough'.
For full details see underlying routine numpy.testing.assert_allclose.
"""
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, **kwargs)
@contextlib.contextmanager
def temp_filename(self, suffix=''):
filename = iris.util.create_temp_filename(suffix)
try:
yield filename
finally:
os.remove(filename)
def file_checksum(self, file_path):
"""
Generate checksum from file.
"""
with open(file_path, "rb") as in_file:
return zlib.crc32(in_file.read())
def _unique_id(self):
"""
Returns the unique ID for the current assertion.
The ID is composed of two parts: a unique ID for the current test
(which is itself composed of the module, class, and test names), and
a sequential counter (specific to the current test) that is incremented
on each call.
For example, calls from a "test_tx" routine followed by a "test_ty"
routine might result in::
test_plot.TestContourf.test_tx.0
test_plot.TestContourf.test_tx.1
test_plot.TestContourf.test_tx.2
test_plot.TestContourf.test_ty.0
"""
# Obtain a consistent ID for the current test.
# NB. unittest.TestCase.id() returns different values depending on
# whether the test has been run explicitly, or via test discovery.
# For example:
# python tests/test_plot.py => '__main__.TestContourf.test_tx'
# ird -t => 'iris.tests.test_plot.TestContourf.test_tx'
bits = self.id().split('.')
if bits[0] == '__main__':
floc = sys.modules['__main__'].__file__
path, file_name = os.path.split(os.path.abspath(floc))
bits[0] = os.path.splitext(file_name)[0]
folder, location = os.path.split(path)
bits = [location] + bits
while location not in ['iris', 'example_tests']:
folder, location = os.path.split(folder)
bits = [location] + bits
test_id = '.'.join(bits)
# Derive the sequential assertion ID within the test
assertion_id = self._assertion_counts[test_id]
self._assertion_counts[test_id] += 1
return test_id + '.' + str(assertion_id)
def _check_reference_file(self, reference_path):
reference_exists = os.path.isfile(reference_path)
if not (reference_exists or
os.environ.get('IRIS_TEST_CREATE_MISSING')):
msg = 'Missing test result: {}'.format(reference_path)
raise AssertionError(msg)
return reference_exists
def _ensure_folder(self, path):
dir_path = os.path.dirname(path)
if not os.path.exists(dir_path):
logger.warning('Creating folder: %s', dir_path)
os.makedirs(dir_path)
def check_graphic(self):
"""
Check the hash of the current matplotlib figure matches the expected
image hash for the current graphic test.
To create missing image test results, set the IRIS_TEST_CREATE_MISSING
environment variable before running the tests. This will result in new
and appropriately "<hash>.png" image files being generated in the image
output directory, and the imagerepo.json file being updated.
"""
import imagehash
from PIL import Image
dev_mode = os.environ.get('IRIS_TEST_CREATE_MISSING')
unique_id = self._unique_id()
repo_fname = os.path.join(_RESULT_PATH, 'imagerepo.json')
with open(repo_fname, 'rb') as fi:
repo = json.load(codecs.getreader('utf-8')(fi))
try:
#: The path where the images generated by the tests should go.
image_output_directory = os.path.join(os.path.dirname(__file__),
'result_image_comparison')
if not os.access(image_output_directory, os.W_OK):
if not os.access(os.getcwd(), os.W_OK):
raise IOError('Write access to a local disk is required '
'to run image tests. Run the tests from a '
'current working directory you have write '
'access to to avoid this issue.')
else:
image_output_directory = os.path.join(
os.getcwd(), 'iris_image_test_output')
result_fname = os.path.join(image_output_directory,
'result-' + unique_id + '.png')
if not os.path.isdir(image_output_directory):
# Handle race-condition where the directories are
# created sometime between the check above and the
# creation attempt below.
try:
os.makedirs(image_output_directory)
except OSError as err:
# Don't care about "File exists"
if err.errno != 17:
raise
def _create_missing():
fname = '{}.png'.format(phash)
base_uri = ('https://scitools.github.io/test-iris-imagehash/'
'images/{}')
uri = base_uri.format(fname)
hash_fname = os.path.join(image_output_directory, fname)
uris = repo.setdefault(unique_id, [])
uris.append(uri)
print('Creating image file: {}'.format(hash_fname))
figure.savefig(hash_fname)
msg = 'Creating imagerepo entry: {} -> {}'
print(msg.format(unique_id, uri))
lock = filelock.FileLock(os.path.join(_RESULT_PATH,
'imagerepo.lock'))
# The imagerepo.json file is a critical resource, so ensure
# thread safe read/write behaviour via platform independent
# file locking.
with lock.acquire(timeout=600):
with open(repo_fname, 'wb') as fo:
json.dump(repo, codecs.getwriter('utf-8')(fo),
indent=4, sort_keys=True)
# Calculate the test result perceptual image hash.
buffer = io.BytesIO()
figure = plt.gcf()
figure.savefig(buffer, format='png')
buffer.seek(0)
phash = imagehash.phash(Image.open(buffer), hash_size=_HASH_SIZE)
if unique_id not in repo:
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
emsg = 'Missing image test result: {}.'
raise AssertionError(emsg.format(unique_id))
else:
uris = repo[unique_id]
# Create the expected perceptual image hashes from the uris.
to_hash = imagehash.hex_to_hash
expected = [to_hash(os.path.splitext(os.path.basename(uri))[0],
hash_size=_HASH_SIZE)
for uri in uris]
# Calculate the hamming distance vector for the result hash.
distances = [e - phash for e in expected]
if np.all([hd > _HAMMING_DISTANCE for hd in distances]):
if dev_mode:
_create_missing()
else:
figure.savefig(result_fname)
msg = ('Bad phash {} with hamming distance {} '
'for test {}.')
msg = msg.format(phash, distances, unique_id)
if _DISPLAY_FIGURES:
emsg = 'Image comparion would have failed: {}'
print(emsg.format(msg))
else:
emsg = 'Image comparison failed: {}'
raise AssertionError(emsg.format(msg))
if _DISPLAY_FIGURES:
plt.show()
finally:
plt.close()
def _remove_testcase_patches(self):
"""Helper to remove per-testcase patches installed by :meth:`patch`."""
# Remove all patches made, ignoring errors.
for p in self.testcase_patches:
p.stop()
# Reset per-test patch control variable.
self.testcase_patches.clear()
def patch(self, *args, **kwargs):
"""
Install a mock.patch, to be removed after the current test.
The patch is created with mock.patch(*args, **kwargs).
Returns:
The substitute object returned by patch.start().
For example::
mock_call = self.patch('module.Class.call', return_value=1)
module_Class_instance.call(3, 4)
self.assertEqual(mock_call.call_args_list, [mock.call(3, 4)])
"""
# Make the new patch and start it.
patch = mock.patch(*args, **kwargs)
start_result = patch.start()
# Create the per-testcases control variable if it does not exist.
# NOTE: this mimics a setUp method, but continues to work when a
# subclass defines its own setUp.
if not hasattr(self, 'testcase_patches'):
self.testcase_patches = {}
# When installing the first patch, schedule remove-all at cleanup.
if not self.testcase_patches:
self.addCleanup(self._remove_testcase_patches)
# Record the new patch and start object for reference.
self.testcase_patches[patch] = start_result
# Return patch replacement object.
return start_result
def assertArrayShapeStats(self, result, shape, mean, std_dev, rtol=1e-6):
"""
Assert that the result, a cube, has the provided shape and that the
mean and standard deviation of the data array are also as provided.
Thus build confidence that a cube processing operation, such as a
cube.regrid, has maintained its behaviour.
"""
self.assertEqual(result.shape, shape)
self.assertArrayAllClose(result.data.mean(), mean, rtol=rtol)
self.assertArrayAllClose(result.data.std(), std_dev, rtol=rtol)
# An environment variable controls whether test timings are output.
#
# NOTE: to run tests with timing output, nosetests cannot be used.
# At present, that includes not using "python setup.py test"
# The typically best way is like this :
# $ export IRIS_TEST_TIMINGS=1
# $ python -m unittest discover -s iris.tests
# and commonly adding ...
# | grep "TIMING TEST" >iris_test_output.txt
#
_PRINT_TEST_TIMINGS = bool(int(os.environ.get('IRIS_TEST_TIMINGS', 0)))
def _method_path(meth):
cls = meth.im_class
return '.'.join([cls.__module__, cls.__name__, meth.__name__])
def _testfunction_timing_decorator(fn):
# Function decorator for making a testcase print its execution time.
@functools.wraps(fn)
def inner(*args, **kwargs):
start_time = datetime.datetime.now()
try:
result = fn(*args, **kwargs)
finally:
end_time = datetime.datetime.now()
elapsed_time = (end_time - start_time).total_seconds()
msg = '\n TEST TIMING -- "{}" took : {:12.6f} sec.'
name = _method_path(fn)
print(msg.format(name, elapsed_time))
return result
return inner
def iristest_timing_decorator(cls):
# Class decorator to make all "test_.." functions print execution timings.
if _PRINT_TEST_TIMINGS:
# NOTE: 'dir' scans *all* class properties, including inherited ones.
attr_names = dir(cls)
for attr_name in attr_names:
attr = getattr(cls, attr_name)
if callable(attr) and attr_name.startswith('test'):
attr = _testfunction_timing_decorator(attr)
setattr(cls, attr_name, attr)
return cls
class _TestTimingsMetaclass(type):
# An alternative metaclass for IrisTest subclasses, which makes
# them print execution timings for all the testcases.
# This is equivalent to applying the @iristest_timing_decorator to
# every test class that inherits from IrisTest.
# NOTE: however, it means you *cannot* specify a different metaclass for
# your test class inheriting from IrisTest.
# See below for how to solve that where needed.
def __new__(cls, clsname, base_classes, attrs):
result = type.__new__(cls, clsname, base_classes, attrs)
if _PRINT_TEST_TIMINGS:
result = iristest_timing_decorator(result)
return result
class IrisTest(six.with_metaclass(_TestTimingsMetaclass, IrisTest_nometa)):
# Derive the 'ordinary' IrisTest from IrisTest_nometa, but add the
# metaclass that enables test timings output.
# This means that all subclasses also get the timing behaviour.
# However, if a different metaclass is *wanted* for an IrisTest subclass,
# this would cause a metaclass conflict.
# Instead, you can inherit from IrisTest_nometa and apply the
# @iristest_timing_decorator explicitly to your new testclass.
pass
get_result_path = IrisTest.get_result_path
class GraphicsTestMixin(object):
# nose directive: dispatch tests concurrently.
_multiprocess_can_split_ = True
def setUp(self):
# Acquire threading non re-entrant blocking lock to ensure
# thread-safe plotting.
_lock.acquire()
# Make sure we have no unclosed plots from previous tests before
# generating this one.
if MPL_AVAILABLE:
plt.close('all')
def tearDown(self):
# If a plotting test bombs out it can leave the current figure
# in an odd state, so we make sure it's been disposed of.
if MPL_AVAILABLE:
plt.close('all')
# Release the non re-entrant blocking lock.
_lock.release()
class GraphicsTest(GraphicsTestMixin, IrisTest):
pass
class GraphicsTest_nometa(GraphicsTestMixin, IrisTest_nometa):
# Graphicstest without the metaclass providing test timings.
pass
class TestGribMessage(IrisTest):
def assertGribMessageContents(self, filename, contents):
"""
Evaluate whether all messages in a GRIB2 file contain the provided
contents.
* filename (string)
The path on disk of an existing GRIB file
* contents
An iterable of GRIB message keys and expected values.
"""
messages = GribMessage.messages_from_filename(filename)
for message in messages:
for element in contents:
section, key, val = element
self.assertEqual(message.sections[section][key], val)
def assertGribMessageDifference(self, filename1, filename2, diffs,
skip_keys=(), skip_sections=()):
"""
Evaluate that the two messages only differ in the ways specified.
* filename[0|1] (string)
The path on disk of existing GRIB files
* diffs
An dictionary of GRIB message keys and expected diff values:
{key: (m1val, m2val),...} .
* skip_keys
An iterable of key names to ignore during comparison.
* skip_sections
An iterable of section numbers to ignore during comparison.
"""
messages1 = list(GribMessage.messages_from_filename(filename1))
messages2 = list(GribMessage.messages_from_filename(filename2))
self.assertEqual(len(messages1), len(messages2))
for m1, m2 in zip(messages1, messages2):
m1_sect = set(m1.sections.keys())
m2_sect = set(m2.sections.keys())
for missing_section in (m1_sect ^ m2_sect):
what = ('introduced'
if missing_section in m1_sect else 'removed')
# Assert that an introduced section is in the diffs.
self.assertIn(missing_section, skip_sections,
msg='Section {} {}'.format(missing_section,
what))
for section in (m1_sect & m2_sect):
# For each section, check that the differences are
# known diffs.
m1_keys = set(m1.sections[section]._keys)
m2_keys = set(m2.sections[section]._keys)
difference = m1_keys ^ m2_keys
unexpected_differences = difference - set(skip_keys)
if unexpected_differences:
self.fail("There were keys in section {} which \n"
"weren't in both messages and which weren't "
"skipped.\n{}"
"".format(section,
', '.join(unexpected_differences)))
keys_to_compare = m1_keys & m2_keys - set(skip_keys)
for key in keys_to_compare:
m1_value = m1.sections[section][key]
m2_value = m2.sections[section][key]
msg = '{} {} != {}'
if key not in diffs:
# We have a key which we expect to be the same for
# both messages.
if isinstance(m1_value, np.ndarray):
# A large tolerance appears to be required for
# gribapi 1.12, but not for 1.14.
self.assertArrayAlmostEqual(m1_value, m2_value,
decimal=2)
else:
self.assertEqual(m1_value, m2_value,
msg=msg.format(key, m1_value,
m2_value))
else:
# We have a key which we expect to be different
# for each message.
self.assertEqual(m1_value, diffs[key][0],
msg=msg.format(key, m1_value,
diffs[key][0]))
self.assertEqual(m2_value, diffs[key][1],
msg=msg.format(key, m2_value,
diffs[key][1]))
def skip_data(fn):
"""
Decorator to choose whether to run tests, based on the availability of
external data.
Example usage:
@skip_data
class MyDataTests(tests.IrisTest):
...
"""
no_data = (not iris.config.TEST_DATA_DIR
or not os.path.isdir(iris.config.TEST_DATA_DIR)
or os.environ.get('IRIS_TEST_NO_DATA'))
skip = unittest.skipIf(
condition=no_data,
reason='Test(s) require external data.')
return skip(fn)
def skip_gdal(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
GDAL library.
Example usage:
@skip_gdal
class MyGeoTiffTests(test.IrisTest):
...
"""
skip = unittest.skipIf(
condition=not GDAL_AVAILABLE,
reason="Test requires 'gdal'.")
return skip(fn)
def skip_plot(fn):
"""
Decorator to choose whether to run tests, based on the availability of the
matplotlib library.
Example usage:
@skip_plot
class MyPlotTests(test.GraphicsTest):
...
"""
skip = unittest.skipIf(
condition=not MPL_AVAILABLE,
reason='Graphics tests require the matplotlib library.')
return skip(fn)
skip_grib = unittest.skipIf(not GRIB_AVAILABLE, 'Test(s) require "gribapi", '
'which is not available.')
skip_sample_data = unittest.skipIf(not SAMPLE_DATA_AVAILABLE,
('Test(s) require "iris_sample_data", '
'which is not available.'))
skip_nc_time_axis = unittest.skipIf(
not NC_TIME_AXIS_AVAILABLE,
'Test(s) require "nc_time_axis", which is not available.')
skip_inet = unittest.skipIf(not INET_AVAILABLE,
('Test(s) require an "internet connection", '
'which is not available.'))
skip_stratify = unittest.skipIf(
not STRATIFY_AVAILABLE,
'Test(s) require "python-stratify", which is not available.')
def no_warnings(func):
"""
Provides a decorator to ensure that there are no warnings raised
within the test, otherwise the test will fail.
"""
@functools.wraps(func)
def wrapped(self, *args, **kwargs):
with mock.patch('warnings.warn') as warn:
result = func(self, *args, **kwargs)
self.assertEqual(0, warn.call_count,
('Got unexpected warnings.'
' \n{}'.format(warn.call_args_list)))
return result
return wrapped
| zak-k/iris | lib/iris/tests/__init__.py | Python | gpl-3.0 | 43,664 | [
"NetCDF"
] | 7ea598e2e279a075e9b35926d7a98b6cb5f03ab8880aa78417abb9371958ccb5 |
from enthought.mayavi import mlab
def cutPlanes( volume, colormap='gist_ncar' ):
'''Display a 3D volume of scalars with two cut planes.
volume: a three dimensional array of scalars
'''
scalarField = mlab.pipeline.scalar_field( volume )
mlab.pipeline.image_plane_widget(scalarField,
plane_orientation='z_axes',
slice_index=10,
colormap = colormap
)
mlab.pipeline.image_plane_widget(scalarField,
plane_orientation='y_axes',
slice_index=10,
colormap = colormap
)
mlab.outline()
mlab.axes()
mlab.colorbar(orientation='vertical')
| chaubold/hytra | empryonic/plot.py | Python | mit | 815 | [
"Mayavi"
] | 79e49b8a3eed869cce3b5d1fc4ac06e52726aaf1d556d50cc791684111a2c5a1 |
import unittest
import tempfile
import os
import numpy as np
import caffe
def simple_net_file(num_output):
"""Make a simple net prototxt, based on test_net.cpp, returning the name
of the (temporary) file."""
f = tempfile.NamedTemporaryFile(delete=False)
f.write("""name: 'testnet' force_backward: true
layer { type: 'DummyData' name: 'data' top: 'data' top: 'label'
dummy_data_param { num: 5 channels: 2 height: 3 width: 4
num: 5 channels: 1 height: 1 width: 1
data_filler { type: 'gaussian' std: 1 }
data_filler { type: 'constant' } } }
layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv'
convolution_param { num_output: 11 kernel_size: 2 pad: 3
weight_filler { type: 'gaussian' std: 1 }
bias_filler { type: 'constant' value: 2 } }
param { decay_mult: 1 } param { decay_mult: 0 }
}
layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip'
inner_product_param { num_output: """ + str(num_output) + """
weight_filler { type: 'gaussian' std: 2.5 }
bias_filler { type: 'constant' value: -3 } } }
layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label'
top: 'loss' }""")
f.close()
return f.name
class TestNet(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_file = simple_net_file(self.num_output)
self.net = caffe.Net(net_file, caffe.TRAIN)
# fill in valid labels
self.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.net.blobs['label'].data.shape)
os.remove(net_file)
def test_memory(self):
"""Check that holding onto blob data beyond the life of a Net is OK"""
params = sum(map(list, self.net.params.itervalues()), [])
blobs = self.net.blobs.values()
del self.net
# now sum everything (forcing all memory to be read)
total = 0
for p in params:
total += p.data.sum() + p.diff.sum()
for bl in blobs:
total += bl.data.sum() + bl.diff.sum()
def test_forward_backward(self):
self.net.forward()
self.net.backward()
def test_inputs_outputs(self):
self.assertEqual(self.net.inputs, [])
self.assertEqual(self.net.outputs, ['loss'])
def test_save_and_read(self):
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
self.net.save(f.name)
net_file = simple_net_file(self.num_output)
net2 = caffe.Net(net_file, f.name, caffe.TRAIN)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
| gogartom/caffe-textmaps | python/caffe/test/test_net.py | Python | mit | 2,891 | [
"Gaussian"
] | 3b75859dd0752cc0ab4a0bee9ba791c5e51832bf05b1f8288631db5e2d330c18 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
urlutils.py -- helper functions for URL related problems such as
argument washing, redirection, etc.
"""
__revision__ = "$Id$"
import time
import base64
import hmac
import re
import sys
import os
import inspect
import urllib
import urllib2
from urllib import urlencode, quote_plus, quote, FancyURLopener
from urlparse import urlparse, urlunparse
from cgi import parse_qs, parse_qsl, escape
try:
import BeautifulSoup
BEAUTIFUL_SOUP_IMPORTED = True
except ImportError:
BEAUTIFUL_SOUP_IMPORTED = False
from invenio.hashutils import sha1, md5, HASHLIB_IMPORTED
from invenio import webinterface_handler_config as apache
from invenio.config import \
CFG_SITE_URL, CFG_SITE_SECURE_URL, \
CFG_WEBSTYLE_EMAIL_ADDRESSES_OBFUSCATION_MODE, \
CFG_WEBDIR, CFG_SITE_NAME, CFG_VERSION, CFG_SITE_LANGS
def wash_url_argument(var, new_type):
"""
Wash argument into 'new_type', that can be 'list', 'str',
'int', 'tuple' or 'dict'.
If needed, the check 'type(var) is not None' should be done before
calling this function.
@param var: variable value
@param new_type: variable type, 'list', 'str', 'int', 'tuple' or 'dict'
@return: as much as possible, value var as type new_type
If var is a list, will change first element into new_type.
If int check unsuccessful, returns 0
"""
out = []
if new_type == 'list': # return lst
if isinstance(var, list):
out = var
else:
out = [var]
elif new_type == 'str': # return str
if isinstance(var, list):
try:
out = "%s" % var[0]
except:
out = ""
elif isinstance(var, str):
out = var
else:
out = "%s" % var
elif new_type == 'int': # return int
if isinstance(var, list):
try:
out = int(var[0])
except:
out = 0
elif isinstance(var, (int, long)):
out = var
elif isinstance(var, str):
try:
out = int(var)
except:
out = 0
else:
out = 0
elif new_type == 'tuple': # return tuple
if isinstance(var, tuple):
out = var
else:
out = (var, )
elif new_type == 'dict': # return dictionary
if isinstance(var, dict):
out = var
else:
out = {0: var}
return out
def redirect_to_url(req, url, redirection_type=None, norobot=False):
"""
Redirect current page to url.
@param req: request as received from apache
@param url: url to redirect to
@param redirection_type: what kind of redirection is required:
e.g.: apache.HTTP_MULTIPLE_CHOICES = 300
apache.HTTP_MOVED_PERMANENTLY = 301
apache.HTTP_MOVED_TEMPORARILY = 302
apache.HTTP_SEE_OTHER = 303
apache.HTTP_NOT_MODIFIED = 304
apache.HTTP_USE_PROXY = 305
apache.HTTP_TEMPORARY_REDIRECT = 307
The default is apache.HTTP_MOVED_TEMPORARILY
@param norobot: wether to instruct crawlers and robots such as GoogleBot
not to index past this point.
@see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3
"""
if redirection_type is None:
redirection_type = apache.HTTP_MOVED_TEMPORARILY
from flask import redirect
r = redirect(url, code=redirection_type)
raise apache.SERVER_RETURN, r
#FIXME enable code bellow
del req.headers_out["Cache-Control"]
req.headers_out["Cache-Control"] = "no-cache, private, no-store, " \
"must-revalidate, post-check=0, pre-check=0, max-age=0"
req.headers_out["Pragma"] = "no-cache"
if norobot:
req.headers_out["X-Robots-Tag"] = "noarchive, nosnippet, noindex, nocache"
user_agent = req.headers_in.get('User-Agent', '')
if 'Microsoft Office Existence Discovery' in user_agent or 'ms-office' in user_agent:
## HACK: this is to workaround Microsoft Office trying to be smart
## when users click on URLs in Office documents that require
## authentication. Office will check the validity of the URL
## but will pass the browser the redirected URL rather than
## the original one. This is incompatible with e.g. Shibboleth
## based SSO since the referer would be lost.
## See: http://support.microsoft.com/kb/899927
req.status = 200
req.content_type = 'text/html'
if req.method != 'HEAD':
req.write("""
<html>
<head>
<title>Intermediate page for URLs clicked on MS Office Documents</title>
<meta http-equiv="REFRESH" content="5;url=%(url)s"></meta>
</head>
<body>
<p>You are going to be redirected to the desired content within 5 seconds. If the redirection does not happen automatically please click on <a href="%(url)s">%(url_ok)s</a>.</p>
</body>
</html>""" % {
'url': escape(req.unparsed_uri, True),
'url_ok': escape(req.unparsed_uri)
})
raise apache.SERVER_RETURN(apache.DONE)
req.headers_out["Location"] = url
if req.response_sent_p:
raise IOError("Cannot redirect after headers have already been sent.")
req.status = redirection_type
req.write('<p>Please go to <a href="%s">here</a></p>\n' % url)
raise apache.SERVER_RETURN, apache.DONE
def rewrite_to_secure_url(url, secure_base=CFG_SITE_SECURE_URL):
"""
Rewrite URL to a Secure URL
@param url URL to be rewritten to a secure URL.
@param secure_base: Base URL of secure site (defaults to CFG_SITE_SECURE_URL).
"""
url_parts = list(urlparse(url))
url_secure_parts = urlparse(secure_base)
url_parts[0] = url_secure_parts[0]
url_parts[1] = url_secure_parts[1]
return urlunparse(url_parts)
def get_referer(req, replace_ampersands=False):
""" Return the referring page of a request.
Referer (wikipedia): Referer is a common misspelling of the word
"referrer"; so common, in fact, that it made it into the official
specification of HTTP. When visiting a webpage, the referer or
referring page is the URL of the previous webpage from which a link was
followed.
@param req: request
@param replace_ampersands: if 1, replace & by & in url
(correct HTML cannot contain & characters alone)
"""
try:
referer = req.headers_in['Referer']
if replace_ampersands == 1:
return referer.replace('&', '&')
return referer
except KeyError:
return ''
def drop_default_urlargd(urlargd, default_urlargd):
lndefault = {}
lndefault.update(default_urlargd)
## Commented out. An Invenio URL now should always specify the desired
## language, in order not to raise the automatic language discovery
## (client browser language can be used now in place of CFG_SITE_LANG)
# lndefault['ln'] = (str, CFG_SITE_LANG)
canonical = {}
canonical.update(urlargd)
for k, v in urlargd.items():
try:
d = lndefault[k]
if d[1] == v:
del canonical[k]
except KeyError:
pass
return canonical
def make_canonical_urlargd(urlargd, default_urlargd):
""" Build up the query part of an URL from the arguments passed in
the 'urlargd' dictionary. 'default_urlargd' is a secondary dictionary which
contains tuples of the form (type, default value) for the query
arguments (this is the same dictionary as the one you can pass to
webinterface_handler.wash_urlargd).
When a query element has its default value, it is discarded, so
that the simplest (canonical) url query is returned.
The result contains the initial '?' if there are actual query
items remaining.
"""
canonical = drop_default_urlargd(urlargd, default_urlargd)
if canonical:
return '?' + urlencode(canonical, doseq=True).replace('&', '&')
return ''
def create_html_link(urlbase, urlargd, link_label, linkattrd=None,
escape_urlargd=True, escape_linkattrd=True,
urlhash=None):
"""Creates a W3C compliant link.
@param urlbase: base url (e.g. invenio.config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'})
@param link_label: text displayed in a browser (has to be already escaped)
@param linkattrd: dictionary of attributes (e.g. a={'class': 'img'})
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param escape_linkattrd: boolean indicating if the function should escape
attributes (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link
"""
attributes_separator = ' '
output = '<a href="' + \
create_url(urlbase, urlargd, escape_urlargd, urlhash) + '"'
if linkattrd:
output += ' '
if escape_linkattrd:
attributes = [escape(str(key), quote=True) + '="' + \
escape(str(linkattrd[key]), quote=True) + '"'
for key in linkattrd.keys()]
else:
attributes = [str(key) + '="' + str(linkattrd[key]) + '"'
for key in linkattrd.keys()]
output += attributes_separator.join(attributes)
output += '>' + link_label + '</a>'
return output
def create_html_mailto(email, subject=None, body=None, cc=None, bcc=None,
link_label="%(email)s", linkattrd=None,
escape_urlargd=True, escape_linkattrd=True,
email_obfuscation_mode=CFG_WEBSTYLE_EMAIL_ADDRESSES_OBFUSCATION_MODE):
"""Creates a W3C compliant 'mailto' link.
Encode/encrypt given email to reduce undesired automated email
harvesting when embedded in a web page.
NOTE: there is no ultimate solution to protect against email
harvesting. All have drawbacks and can more or less be
circumvented. There are other techniques to protect email
adresses. We implement the less annoying one for users.
@param email: the recipient of the email
@param subject: a default subject for the email (must not contain
line feeds)
@param body: a default body for the email
@param cc: the co-recipient(s) of the email
@param bcc: the hidden co-recpient(s) of the email
@param link_label: the label of this mailto link. String
replacement is performed on key %(email)s with
the email address if needed.
@param linkattrd: dictionary of attributes (e.g. a={'class': 'img'})
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param escape_linkattrd: boolean indicating if the function should escape
attributes (e.g. < becomes < or " becomes ")
@param email_obfuscation_mode: the protection mode. See below:
You can choose among several modes to protect emails. It is
advised to keep the default
CFG_MISCUTIL_EMAIL_HARVESTING_PROTECTION value, so that it is
possible for an admin to change the policy globally.
Available modes ([t] means "transparent" for the user):
-1: hide all emails, excepted CFG_SITE_ADMIN_EMAIL and
CFG_SITE_SUPPORT_EMAIL.
[t] 0 : no protection, email returned as is.
foo@example.com => foo@example.com
1 : basic email munging: replaces @ by [at] and . by [dot]
foo@example.com => foo [at] example [dot] com
[t] 2 : transparent name mangling: characters are replaced by
equivalent HTML entities.
foo@example.com => foo@example.com
[t] 3 : javascript insertion. Requires Javascript enabled on client side.
4 : replaces @ and . characters by gif equivalents.
foo@example.com => foo<img src="at.gif" alt=" [at] ">example<img src="dot.gif" alt=" [dot] ">com
"""
# TODO: implement other protection modes to encode/encript email:
#
## [t] 5 : form submission. User is redirected to a form that he can
## fills in to send the email (??Use webmessage??).
## Depending on WebAccess, ask to answer a question.
##
## [t] 6 : if user can see (controlled by WebAccess), display. Else
## ask to login to see email. If user cannot see, display
## form submission.
if linkattrd is None:
linkattrd = {}
parameters = {}
if subject:
parameters["subject"] = subject
if body:
parameters["body"] = body.replace('\r\n', '\n').replace('\n', '\r\n')
if cc:
parameters["cc"] = cc
if bcc:
parameters["bcc"] = bcc
# Preprocessing values for some modes
if email_obfuscation_mode == 1:
# Basic Munging
email = email.replace("@", " [at] ").replace(".", " [dot] ")
elif email_obfuscation_mode == 2:
# Transparent name mangling
email = string_to_numeric_char_reference(email)
if '%(email)s' in link_label:
link_label = link_label % {'email': email}
mailto_link = create_html_link('mailto:' + email, parameters,
link_label, linkattrd,
escape_urlargd, escape_linkattrd)
if email_obfuscation_mode == 0:
# Return "as is"
return mailto_link
elif email_obfuscation_mode == 1:
# Basic Munging
return mailto_link
elif email_obfuscation_mode == 2:
# Transparent name mangling
return mailto_link
elif email_obfuscation_mode == 3:
# Javascript-based
return '''<script language="JavaScript" ''' \
'''type="text/javascript">''' \
'''document.write('%s'.split("").reverse().join(""))''' \
'''</script>''' % \
mailto_link[::-1].replace("'", "\\'")
elif email_obfuscation_mode == 4:
# GIFs-based
email = email.replace('.',
'<img src="%s/img/dot.gif" alt=" [dot] " '
'style="vertical-align:bottom" />' % CFG_SITE_URL)
email = email.replace('@',
'<img src="%s/img/at.gif" alt=" [at] " '
'style="vertical-align:baseline" />' % CFG_SITE_URL)
return email
# All other cases, including mode -1:
return ""
def string_to_numeric_char_reference(string):
"""
Encode a string to HTML-compatible numeric character reference.
Eg: encode_html_entities("abc") == 'abc'
"""
out = ""
for char in string:
out += "&#" + str(ord(char)) + ";"
return out
def get_canonical_and_alternates_urls(url, drop_ln=True, washed_argd=None):
"""
Given an Invenio URL returns a tuple with two elements. The first is the
canonical URL, that is the original URL with CFG_SITE_URL prefix, and
where the ln= argument stripped. The second element element is mapping,
language code -> alternate URL
"""
dummy_scheme, dummy_netloc, path, dummy_params, query, fragment = urlparse(url)
canonical_scheme, canonical_netloc = urlparse(CFG_SITE_URL)[0:2]
parsed_query = washed_argd or parse_qsl(query)
no_ln_parsed_query = [(key, value) for (key, value) in parsed_query if key != 'ln']
if drop_ln:
canonical_parsed_query = no_ln_parsed_query
else:
canonical_parsed_query = parsed_query
canonical_query = urlencode(canonical_parsed_query)
canonical_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, canonical_query, fragment))
alternate_urls = {}
for ln in CFG_SITE_LANGS:
alternate_query = urlencode(no_ln_parsed_query + [('ln', ln)])
alternate_url = urlunparse((canonical_scheme, canonical_netloc, path, dummy_params, alternate_query, fragment))
alternate_urls[ln] = alternate_url
return canonical_url, alternate_urls
def create_url(urlbase, urlargd, escape_urlargd=True, urlhash=None):
"""Creates a W3C compliant URL. Output will look like this:
'urlbase?param1=value1&param2=value2'
@param urlbase: base url (e.g. invenio.config.CFG_SITE_URL/search)
@param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}
@param escape_urlargd: boolean indicating if the function should escape
arguments (e.g. < becomes < or " becomes ")
@param urlhash: hash string to add at the end of the link
"""
separator = '&'
output = urlbase
if urlargd:
output += '?'
if escape_urlargd:
arguments = [escape(quote(str(key)), quote=True) + '=' + \
escape(quote(str(urlargd[key])), quote=True)
for key in urlargd.keys()]
else:
arguments = [str(key) + '=' + str(urlargd[key])
for key in urlargd.keys()]
output += separator.join(arguments)
if urlhash:
output += "#" + escape(quote(str(urlhash)))
return output
def same_urls_p(a, b):
""" Compare two URLs, ignoring reorganizing of query arguments """
ua = list(urlparse(a))
ub = list(urlparse(b))
ua[4] = parse_qs(ua[4])
ub[4] = parse_qs(ub[4])
return ua == ub
def urlargs_replace_text_in_arg(urlargs, regexp_argname, text_old, text_new):
"""Analyze `urlargs' (URL CGI GET query arguments in string form)
and for each occurrence of argument matching `regexp_argname'
replace every substring `text_old' by `text_new'. Return the
resulting new URL.
Used to be used for search engine's create_nearest_terms_box,
now it is not used there anymore. It is left here in case it
will become possibly useful later.
"""
out = ""
# parse URL arguments into a dictionary:
urlargsdict = parse_qs(urlargs)
## construct new URL arguments:
urlargsdictnew = {}
for key in urlargsdict.keys():
if re.match(regexp_argname, key): # replace `arg' by new values
urlargsdictnew[key] = []
for parg in urlargsdict[key]:
urlargsdictnew[key].append(parg.replace(text_old, text_new))
else: # keep old values
urlargsdictnew[key] = urlargsdict[key]
# build new URL for this word:
for key in urlargsdictnew.keys():
for val in urlargsdictnew[key]:
out += "&" + key + "=" + quote_plus(val, '')
if out.startswith("&"):
out = out[5:]
return out
def get_title_of_page(url):
"""
@param url: page to get the title from
@return: the page title in utf-8 or None in case
that any kind of exception occured e.g. connection error,
URL not known
"""
if BEAUTIFUL_SOUP_IMPORTED:
try:
opener = make_invenio_opener('UrlUtils')
soup = BeautifulSoup.BeautifulSoup(opener.open(url))
return soup.title.string.encode("utf-8")
except:
return None
else:
return "Title not available"
def make_user_agent_string(component=None):
"""
Return a nice and uniform user-agent string to be used when Invenio
act as a client in HTTP requests.
"""
ret = "Invenio-%s (+%s; \"%s\")" % (CFG_VERSION, CFG_SITE_URL, CFG_SITE_NAME)
if component:
ret += " %s" % component
return ret
class InvenioFancyURLopener(FancyURLopener):
## Provide default user agent string
version = make_user_agent_string()
def prompt_user_passwd(self, host, realm):
"""Don't prompt"""
return None, None
## Let's override default useragent string
## See: http://docs.python.org/release/2.4.4/lib/module-urllib.html
urllib._urlopener = InvenioFancyURLopener()
def make_invenio_opener(component=None):
"""
Return an urllib2 opener with the useragent already set in the appropriate
way.
"""
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', make_user_agent_string(component))]
return opener
def create_AWS_request_url(base_url, argd, _amazon_secret_access_key,
_timestamp=None):
"""
Create a signed AWS (Amazon Web Service) request URL corresponding
to the given parameters.
Example:
>> create_AWS_request_url("http://ecs.amazon.com/onca/xml",
{'AWSAccessKeyID': '0000000000',
'Service': 'AWSECommerceService',
'Operation': 'ItemLookup',
'ItemID': '0679722769',
'ResponseGroup': 'ItemAttributes,Offers,Images,Review'},
"1234567890")
@param base_url: Service URL of the Amazon store to query
@param argd: dictionary of arguments defining the query
@param _amazon_secret_access_key: your Amazon secret key
@param _timestamp: for testing purpose only (default: current timestamp)
@type base_url: string
@type argd: dict
@type _amazon_secret_access_key: string
@type _timestamp: string
@return signed URL of the request (string)
"""
## First define a few util functions
def get_AWS_signature(argd, _amazon_secret_access_key,
method="GET", request_host="webservices.amazon.com",
request_uri="/onca/xml",
_timestamp=None):
"""
Returns the signature of an Amazon request, based on the
arguments of the request.
@param argd: dictionary of arguments defining the query
@param _amazon_secret_access_key: your Amazon secret key
@param method: method of the request POST or GET
@param request_host: host contacted for the query. To embed in the signature.
@param request_uri: uri contacted at 'request_host'. To embed in the signature.
@param _timestamp: for testing purpose only (default: current timestamp)
@type argd: dict
@type _amazon_secret_access_key: string
@type method: string
@type host_header: string
@type http_request_uri: string
@type _timestamp: string
@return signature of the request (string)
"""
# Add timestamp
if not _timestamp:
argd["Timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime())
else:
argd["Timestamp"] = _timestamp
# Order parameter keys by byte value
parameter_keys = argd.keys()
parameter_keys.sort()
# Encode arguments, according to RFC 3986. Make sure we
# generate a list which is ordered by byte value of the keys
arguments = [quote(str(key), safe="~/") + "=" + \
quote(str(argd[key]), safe="~/") \
for key in parameter_keys]
# Join
parameters_string = "&".join(arguments)
# Prefix
parameters_string = method.upper() + "\n" + \
request_host.lower() + "\n" + \
(request_uri or "/") + "\n" + \
parameters_string
# Sign and return
return calculate_RFC2104_HMAC(parameters_string,
_amazon_secret_access_key)
def calculate_RFC2104_HMAC(data, _amazon_secret_access_key):
"""
Computes a RFC 2104 compliant HMAC Signature and then Base64
encodes it.
Module hashlib must be installed if Python < 2.5
<http://pypi.python.org/pypi/hashlib/20081119>
@param data: data to sign
@param _amazon_secret_access_key: your Amazon secret key
@type data: string
@type _amazon_secret_access_key: string. Empty if hashlib module not installed
"""
if not HASHLIB_IMPORTED:
try:
raise Exception("Module hashlib not installed. Please install it.")
except:
from invenio.errorlib import register_exception
register_exception(stream='warning', alert_admin=True, subject='Cannot create AWS signature')
return ""
else:
if sys.version_info < (2, 5):
# compatibility mode for Python < 2.5 and hashlib
my_digest_algo = _MySHA256(sha256())
else:
my_digest_algo = sha256
return base64.encodestring(hmac.new(_amazon_secret_access_key,
data, my_digest_algo).digest()).strip()
## End util functions
parsed_url = urlparse(base_url)
signature = get_AWS_signature(argd, _amazon_secret_access_key,
request_host=parsed_url[1],
request_uri=parsed_url[2],
_timestamp=_timestamp)
if signature:
argd["Signature"] = signature
return base_url + "?" + urlencode(argd)
def create_Indico_request_url(base_url, indico_what, indico_loc, indico_id, indico_type, indico_params, indico_key, indico_sig, _timestamp=None):
"""
Create a signed Indico request URL to access Indico HTTP Export APIs.
See U{http://indico.cern.ch/ihelp/html/ExportAPI/index.html} for more
information.
Example:
>> create_Indico_request_url("https://indico.cern.ch",
"categ",
"",
[1, 7],
"xml",
{'onlypublic': 'yes',
'order': 'title',
'from': 'today',
'to': 'tomorrow'},
'00000000-0000-0000-0000-000000000000',
'00000000-0000-0000-0000-000000000000')
@param base_url: Service base URL of the Indico instance to query
@param indico_what: element to export
@type indico_what: one of the strings: C{categ}, C{event}, C{room}, C{reservation}
@param indico_loc: location of the element(s) specified by ID (only used for some elements)
@param indico_id: ID of the element to be exported
@type indico_id: a string or a list/tuple of strings
@param indico_type: output format
@type indico_type: one of the strings: C{json}, C{jsonp}, C{xml}, C{html}, C{ics}, C{atom}
@param indico_params: parameters of the query. See U{http://indico.cern.ch/ihelp/html/ExportAPI/common.html}
@param indico_key: API key provided for the given Indico instance
@param indico_sig: API secret key (signature) provided for the given Indico instance
@param _timestamp: for testing purpose only (default: current timestamp)
@return signed URL of the request (string)
"""
url = '/export/' + indico_what + '/'
if indico_loc:
url += indico_loc + '/'
if type(indico_id) in (list, tuple):
# dash separated list of values
indico_id = '-'.join([str(x) for x in indico_id])
url += indico_id + '.' + str(indico_type)
if hasattr(indico_params, 'items'):
items = indico_params.items()
else:
items = list(indico_params)
if indico_key:
items.append(('apikey', indico_key))
if indico_sig and HASHLIB_IMPORTED:
if _timestamp:
items.append(('timestamp', str(_timestamp)))
else:
items.append(('timestamp', str(int(time.time()))))
items = sorted(items, key=lambda x: x[0].lower())
url_to_sign = '%s?%s' % (url, urlencode(items))
if sys.version_info < (2, 5):
# compatibility mode for Python < 2.5 and hashlib
my_digest_algo = _MySHA1(sha1())
else:
my_digest_algo = sha1
signature = hmac.new(indico_sig, url_to_sign, my_digest_algo).hexdigest()
items.append(('signature', signature))
elif not HASHLIB_IMPORTED:
try:
raise Exception("Module hashlib not installed. Please install it.")
except:
from invenio.errorlib import register_exception
register_exception(stream='warning', alert_admin=True, subject='Cannot create AWS signature')
if not items:
return url
url = '%s%s?%s' % (base_url.strip('/'), url, urlencode(items))
return url
class _MyHashlibAlgo(object):
'''
Define a subclass of any hashlib algorithm class, with an additional "new()"
function, to work with the Python < 2.5 version of the hmac module.
(This class is more complex than it should, but it is not
possible to subclass haslib algorithm)
'''
def __init__(self, obj):
"""Set the wrapped object."""
super(_MyHashlibAlgo, self).__setattr__('_obj', obj)
methods = []
for name_value in inspect.getmembers(obj, inspect.ismethod):
methods.append(name_value[0])
super(_MyHashlibAlgo, self).__setattr__('__methods__', methods)
def isnotmethod(object_):
"Opposite of ismethod(..)"
return not inspect.ismethod(object_)
members = []
for name_value in inspect.getmembers(obj, isnotmethod):
members.append(name_value[0])
super(_MyHashlibAlgo, self).__setattr__('__members__', members)
def __getattr__(self, name):
"""Redirect unhandled get attribute to self._obj."""
if not hasattr(self._obj, name):
raise AttributeError, ("'%s' has no attribute %s" %
(self.__class__.__name__, name))
else:
return getattr(self._obj, name)
def __setattr__(self, name, value):
"""Redirect set attribute to self._obj if necessary."""
self_has_attr = True
try:
super(_MyHashlibAlgo, self).__getattribute__(name)
except AttributeError:
self_has_attr = False
if (name == "_obj" or not hasattr(self, "_obj") or
not hasattr(self._obj, name) or self_has_attr):
return super(_MyHashlibAlgo, self).__setattr__(name, value)
else:
return setattr(self._obj, name, value)
if HASHLIB_IMPORTED:
from invenio.hashutils import sha256
class _MySHA256(_MyHashlibAlgo):
"A _MyHashlibAlgo subsclass for sha256"
new = lambda d = '': sha256()
class _MySHA1(_MyHashlibAlgo):
"A _MyHashlibAlgo subsclass for sha1"
new = lambda d = '': sha1()
def auto_version_url(file_path):
""" Appends modification time of the file to the request URL in order for the
browser to refresh the cache when file changes
@param file_path: path to the file, e.g js/foo.js
@return: file_path with modification time appended to URL
"""
file_md5 = ""
try:
file_md5 = md5(open(CFG_WEBDIR + os.sep + file_path).read()).hexdigest()
except IOError:
pass
return file_path + "?%s" % file_md5
# VS: Make holdings tab data + html.
def VoyagerHoldings(recid):
from invenio.bibformat_engine import BibFormatObject
from invenio.bibformat_elements import bfe_ILO_links
from invenio.messages import gettext_set_language
# prepare variables
from flask import g
bfo = BibFormatObject(recid)
_ = gettext_set_language(g.ln) # load the right message language
holdings = {}
out_table = []
out_html = ''
row = ''
callno = ''
issues_info = ''
HQ = False
conv = bfo.field('970__a')
item_type = bfo.field("996__a")
holdings = bfo.fields("964")
kept = bfo.field("866_0a")
kept_note = bfo.field("866_0z")
voyager_sysno = bfo.field("970__a")
voyager_sysno = re.sub('^LABORDOC-', '', voyager_sysno)
links = bfe_ILO_links.format_element(bfo, style='',
prefix_en='',
suffix='', separator='<br />',
show_icons='no', focus_on_main_file='yes')
#request_url = '''http://golf.ilo.org/cgi-bin/Pwebrecon.cgi?bbid=%s&BIB=%s&PAGE=REQUESTBIB" onclick="newWin(this.href); return false;''' % (voyager_sysno, voyager_sysno)
request_url = '''http://ringo.ilo.org:7008/vwebv/patronRequests?&sk=en_ILO&bibId=%s" onclick="newWin(this.href); return false;''' % (voyager_sysno)
# Start of the html, request text and link plus holdings table headings
out_html ="""
<script language="JavaScript" src="http://www.ilo.org/webcommon/s-includes/popups.js" type="text/javascript"></script>
<div class="span8">
<div>
ILO staff may request this item by clicking on the blue Request button. Enter your client number and
last name and click Log in. If you do not have a Client number, contact the Loans Service
at tel. +(4122)799 8705; Email:
<a href="mailto:informloan@ilo.org">informloan@ilo.org</a><br /><br />
If you are not ILO staff and would like to see this item, you may wish to contact your library to
ask for an inter-library loan or visit an ILO Library near you.
</div>"""
out_html += """<div class="pull-left tableHoldings">
<table>
<thead>
<tr>
<th>%s</th>
<th>%s</th>
</tr>
</thead>
<tbody>
""" % ("Location", "Call Number")
# iterate through holdings and make rows for the table
for out in holdings:
location = str(out.get("m", " "))
callno = str(out.get("e", " "))
if callno.find('NYP') >= 1:
callno = callno.replace('NYP','not yet published')
callno = ' ' + callno + ' '
if callno.find('NYP') >= 1:
callno = callno.replace('NYP','not yet published')
row = "<tr><td>" + location + "</td><td>" + callno + "</td></tr>"
if row.find('Main collection') >= 1 and HQ == False:
row = row.replace('Library - Main collection','HQ Library')
if kept != 0 or kept_note != 0:
issues_info = 'HQ Library' + kept + ' ' + kept_note
row = row.replace('HQ Library', issues_info)
out_table.append(row)
HQ = True
elif row.find('Electronic') >= 1:
links = bfe_ILO_links.format_element(bfo, style='',
prefix_en='',
suffix='', separator='<br /',
show_icons='no', focus_on_main_file='yes')
links = '</td><td>' + links + '</td></tr>'
row = re.sub('</td><td>.*</td></tr>', links, row)
out_table.append(row)
elif row.find('Main collection') >= 1 and HQ:
pass
else:
out_table.append(row)
if conv.startswith('ILOCONV'):
row = "<tr><td>" + 'Electronic documents' + "</td><td>" + links + "</td></tr>"
out_table.append(row)
out_table.sort()
out_html += ''.join(out_table) + '</tbody></table></div>'
out_html += """<div class="span2 requestButton"> <a href="%s">
<h4><i class="icon-book"> </i> %s </h4></a> </div></div>""" % (request_url, _("Request item"))
return out_html
| labordoc/labordoc-next | modules/miscutil/lib/urlutils.py | Python | gpl-2.0 | 36,723 | [
"VisIt"
] | f821613cad1edfdb4b1b83bbd739d5815af303f082a1b6c35ecf473f4fe69c26 |
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import urllib2
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.utils.unicode import to_unicode
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
validate_certs = kwargs.get('validate_certs', True)
ret = []
for term in terms:
try:
response = open_url(term, validate_certs=validate_certs)
except urllib2.URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e)))
except urllib2.HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, str(e)))
for line in response.read().splitlines():
ret.append(to_unicode(line))
return ret
| krishnazure/ansible | lib/ansible/plugins/lookup/url.py | Python | gpl-3.0 | 2,024 | [
"Brian"
] | 07ffd78a33eff1f2b2fdf2271391515285084a565e840fdd968ef0ebb1c4e3ea |
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2012 Doug Blank
# Copyright (C) 2013 John Ralls <jralls@ceridwen.us>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Provides constants for other modules
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import os
import sys
import uuid
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .git_revision import get_git_revision
from .constfunc import get_env_var
from ..version import VERSION, VERSION_TUPLE, major_version, DEV_VERSION
from .utils.resourcepath import ResourcePath
from .utils.grampslocale import GrampsLocale
#-------------------------------------------------------------------------
#
# Gramps program name
#
#-------------------------------------------------------------------------
PROGRAM_NAME = "Gramps"
#-------------------------------------------------------------------------
#
# Standard Gramps Websites
#
#-------------------------------------------------------------------------
URL_HOMEPAGE = "http://gramps-project.org/"
URL_MAILINGLIST = "http://sourceforge.net/mail/?group_id=25770"
URL_BUGHOME = "http://gramps-project.org/bugs"
URL_BUGTRACKER = "http://gramps-project.org/bugs/bug_report_page.php"
URL_WIKISTRING = "http://gramps-project.org/wiki/index.php?title="
URL_MANUAL_PAGE = "Gramps_%s_Wiki_Manual" % major_version
URL_MANUAL_DATA = '%s_-_Entering_and_editing_data:_detailed' % URL_MANUAL_PAGE
URL_MANUAL_SECT1 = '%s_-_part_1' % URL_MANUAL_DATA
URL_MANUAL_SECT2 = '%s_-_part_2' % URL_MANUAL_DATA
URL_MANUAL_SECT3 = '%s_-_part_3' % URL_MANUAL_DATA
WIKI_FAQ = "FAQ"
WIKI_KEYBINDINGS = "Gramps_%s_Wiki_Manual_-_Keybindings" % major_version
WIKI_EXTRAPLUGINS = "%s_Addons" % major_version
WIKI_EXTRAPLUGINS_RAWDATA = "Plugins%s&action=raw" % major_version
#-------------------------------------------------------------------------
#
# Mime Types
#
#-------------------------------------------------------------------------
APP_FAMTREE = 'x-directory/normal'
APP_GRAMPS = "application/x-gramps"
APP_GRAMPS_XML = "application/x-gramps-xml"
APP_GEDCOM = "application/x-gedcom"
APP_GRAMPS_PKG = "application/x-gramps-package"
APP_GENEWEB = "application/x-geneweb"
APP_VCARD = ["text/x-vcard", "text/x-vcalendar"]
#-------------------------------------------------------------------------
#
# Determine the home directory. According to Wikipedia, most UNIX like
# systems use HOME. I'm assuming that this would apply to OS X as well.
# Windows apparently uses USERPROFILE
#
#-------------------------------------------------------------------------
if 'GRAMPSHOME' in os.environ:
USER_HOME = get_env_var('GRAMPSHOME')
HOME_DIR = os.path.join(USER_HOME, 'gramps')
elif 'USERPROFILE' in os.environ:
USER_HOME = get_env_var('USERPROFILE')
if 'APPDATA' in os.environ:
HOME_DIR = os.path.join(get_env_var('APPDATA'), 'gramps')
else:
HOME_DIR = os.path.join(USER_HOME, 'gramps')
else:
USER_HOME = get_env_var('HOME')
HOME_DIR = os.path.join(USER_HOME, '.gramps')
ORIG_HOME_DIR = HOME_DIR
if 'SAFEMODE' in os.environ:
if 'USERPROFILE' in os.environ:
USER_HOME = get_env_var('USERPROFILE')
else:
USER_HOME = get_env_var('HOME')
HOME_DIR = get_env_var('SAFEMODE')
VERSION_DIR = os.path.join(
HOME_DIR, "gramps%s%s" % (VERSION_TUPLE[0], VERSION_TUPLE[1]))
CUSTOM_FILTERS = os.path.join(VERSION_DIR, "custom_filters.xml")
REPORT_OPTIONS = os.path.join(HOME_DIR, "report_options.xml")
TOOL_OPTIONS = os.path.join(HOME_DIR, "tool_options.xml")
PLACE_FORMATS = os.path.join(HOME_DIR, "place_formats.xml")
ENV_DIR = os.path.join(HOME_DIR, "env")
THUMB_DIR = os.path.join(HOME_DIR, "thumb")
THUMB_NORMAL = os.path.join(THUMB_DIR, "normal")
THUMB_LARGE = os.path.join(THUMB_DIR, "large")
USER_PLUGINS = os.path.join(VERSION_DIR, "plugins")
USER_CSS = os.path.join(HOME_DIR, "css")
# dirs checked/made for each Gramps session
USER_DIRLIST = (USER_HOME, HOME_DIR, VERSION_DIR, ENV_DIR, THUMB_DIR,
THUMB_NORMAL, THUMB_LARGE, USER_PLUGINS, USER_CSS)
#-------------------------------------------------------------------------
#
# Paths to python modules - assumes that the root directory is one level
# above this one, and that the plugins directory is below the root directory.
#
#-------------------------------------------------------------------------
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.insert(0, ROOT_DIR)
git_revision = get_git_revision(ROOT_DIR).replace('\n', '')
if sys.platform == 'win32' and git_revision == "":
git_revision = get_git_revision(os.path.split(ROOT_DIR)[1])
if DEV_VERSION:
VERSION += git_revision
#VERSION += "-1"
#
# Glade files
#
GLADE_DIR = os.path.join(ROOT_DIR, "gui", "glade")
GLADE_FILE = os.path.join(GLADE_DIR, "gramps.glade")
PERSON_GLADE = os.path.join(GLADE_DIR, "edit_person.glade")
PLUGINS_GLADE = os.path.join(GLADE_DIR, "plugins.glade")
MERGE_GLADE = os.path.join(GLADE_DIR, "mergedata.glade")
RULE_GLADE = os.path.join(GLADE_DIR, "rule.glade")
PLUGINS_DIR = os.path.join(ROOT_DIR, "plugins")
USE_TIPS = False
if sys.platform == 'win32':
USE_THUMBNAILER = False
else:
USE_THUMBNAILER = True
#-------------------------------------------------------------------------
#
# Paths to data files.
#
#-------------------------------------------------------------------------
_resources = ResourcePath()
DATA_DIR = _resources.data_dir
IMAGE_DIR = _resources.image_dir
TIP_DATA = os.path.join(DATA_DIR, "tips.xml")
PAPERSIZE = os.path.join(DATA_DIR, "papersize.xml")
ICON = os.path.join(IMAGE_DIR, "gramps.png")
LOGO = os.path.join(IMAGE_DIR, "logo.png")
SPLASH = os.path.join(IMAGE_DIR, "splash.jpg")
LICENSE_FILE = os.path.join(_resources.doc_dir, 'COPYING')
#-------------------------------------------------------------------------
#
# Gramps environment variables dictionary
#
#-------------------------------------------------------------------------
ENV = {
"USER_HOME": USER_HOME,
"HOME_DIR": HOME_DIR,
"VERSION": VERSION,
"major_version": major_version,
"VERSION_DIR": VERSION_DIR,
"ENV_DIR": ENV_DIR,
"THUMB_DIR": THUMB_DIR,
"THUMB_NORMAL": THUMB_NORMAL,
"THUMB_LARGE": THUMB_LARGE,
"USER_PLUGINS": USER_PLUGINS,
"ROOT_DIR": ROOT_DIR,
"GLADE_DIR": GLADE_DIR,
"PLUGINS_DIR": PLUGINS_DIR,
"DATA_DIR": DATA_DIR,
"IMAGE_DIR": IMAGE_DIR,
}
#-------------------------------------------------------------------------
#
# Init Localization
#
#-------------------------------------------------------------------------
GRAMPS_LOCALE = GrampsLocale(localedir=_resources.locale_dir)
_ = GRAMPS_LOCALE.translation.sgettext
GTK_GETTEXT_DOMAIN = 'gtk30'
#-------------------------------------------------------------------------
#
# About box information
#
#-------------------------------------------------------------------------
COPYRIGHT_MSG = "© 2001-2006 Donald N. Allingham\n" \
"© 2007-2022 The Gramps Developers"
COMMENTS = _("Gramps\n (Genealogical Research and Analysis "
"Management Programming System)\n"
"is a personal genealogy program.")
AUTHORS = [
"Alexander Roitman",
"Benny Malengier",
"Brian Matherly",
"Donald A. Peterson",
"Donald N. Allingham",
"David Hampton",
"Martin Hawlisch",
"Richard Taylor",
"Tim Waugh",
"John Ralls"
]
AUTHORS_FILE = os.path.join(DATA_DIR, "authors.xml")
DOCUMENTERS = [
'Alexander Roitman',
]
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
THUMBSCALE = 96.0
THUMBSCALE_LARGE = 180.0
XMLFILE = "data.gramps"
NO_SURNAME = "(%s)" % _("none", "surname")
NO_GIVEN = "(%s)" % _("none", "given-name")
ARABIC_COMMA = "،"
ARABIC_SEMICOLON = "؛"
DOCGEN_OPTIONS = 'Docgen Options'
COLON = _(':') # Translators: needed for French, ignore otherwise
#-------------------------------------------------------------------------
#
# Options Constants
#
#-------------------------------------------------------------------------
LONGOPTS = [
"action=",
"class=",
"config=",
"debug=",
"default=",
"display=",
"disable-sound",
"disable-crash-dialog",
"enable-sound",
"espeaker=",
"export=",
"force-unlock",
"format=",
"gdk-debug=",
"gdk-no-debug=",
"gtk-debug=",
"gtk-no-debug=",
"gtk-module=",
"g-fatal-warnings",
"help",
"import=",
"load-modules=",
"list"
"name=",
"oaf-activate-iid=",
"oaf-ior-fd=",
"oaf-private",
"open=",
"username=",
"password=",
"create=",
"options=",
"safe",
"screen=",
"show",
"sm-client-id=",
"sm-config-prefix=",
"sm-disable",
"sync",
"remove=",
"usage",
"version",
"yes",
"quiet",
]
SHORTOPTS = "O:U:P:C:i:e:f:a:p:d:c:r:lLthuv?syqSD:"
GRAMPS_UUID = uuid.UUID('516cd010-5a41-470f-99f8-eb22f1098ad6')
#-------------------------------------------------------------------------
#
# Fanchart Constants
#
#-------------------------------------------------------------------------
PIXELS_PER_GENERATION = 50 # size of radius for generation
BORDER_EDGE_WIDTH = 10 # empty white box size at edge to indicate parents
CHILDRING_WIDTH = 12 # width of the children ring inside the person
TRANSLATE_PX = 10 # size of the central circle, used to move the chart
PAD_PX = 4 # padding with edges
PAD_TEXT = 2 # padding for text in boxes
BACKGROUND_SCHEME1 = 0
BACKGROUND_SCHEME2 = 1
BACKGROUND_GENDER = 2
BACKGROUND_WHITE = 3
BACKGROUND_GRAD_GEN = 4
BACKGROUND_GRAD_AGE = 5
BACKGROUND_SINGLE_COLOR = 6
BACKGROUND_GRAD_PERIOD = 7
GENCOLOR = {
BACKGROUND_SCHEME1: ((255, 63, 0),
(255, 175, 15),
(255, 223, 87),
(255, 255, 111),
(159, 255, 159),
(111, 215, 255),
(79, 151, 255),
(231, 23, 255),
(231, 23, 121),
(210, 170, 124),
(189, 153, 112)),
BACKGROUND_SCHEME2: ((229, 191, 252),
(191, 191, 252),
(191, 222, 252),
(183, 219, 197),
(206, 246, 209)),
BACKGROUND_WHITE: ((255, 255, 255),
(255, 255, 255),),
}
MAX_AGE = 100
GRADIENTSCALE = 5
FORM_CIRCLE = 0
FORM_HALFCIRCLE = 1
FORM_QUADRANT = 2
COLLAPSED = 0
NORMAL = 1
EXPANDED = 2
TYPE_BOX_NORMAL = 0
TYPE_BOX_FAMILY = 1
| gramps-project/gramps | gramps/gen/const.py | Python | gpl-2.0 | 11,713 | [
"Brian"
] | b126fb55c1a5c158a9feeec3f5a6114ad2089df76c926ac6dadddfbcee35cd0d |
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
This script demonstrates the creation of ligand interfaces and
preoptimization screening of possible interfaces.
"""
from six.moves import range
from pymatgen.core import Molecule, Structure
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.core.operations import SymmOp
from mpinterfaces.interface import Interface, Ligand
import numpy as np
def coloumb_configured_interface(iface, random=True,
translations=None,
rotations=None,
samples=10, lowest=5, ecut=None):
"""
Creates Ligand Slab interfaces of user specified translations
and rotations away from the initial guess of binding site
configuration, returns lowest energy structure according to
Coulomb model
Args:
Interface: Interface object: initial interface object
random: True for using Gaussian sampled random numbers for
rotations and translations
translations: list of [x,y,z] translations to be performed
rotation: list of [a,b,c] rotations to be performed w.r.t
Ligand axis
samples: number of interfaces to create
lowest: number of structures to return according to order of
minimum energies
Returns:
list of lowest energy interface objects
"""
ifaces = []
transform = []
for i in range(samples):
if random:
x = np.random.normal() # shift along x direction
y = np.random.normal() # shift along y direction
z = np.random.normal() # shift aling z direction
a = SymmOp.from_axis_angle_and_translation(axis=[1, 0, 0], \
angle=np.random.normal(
0, 180))
b = SymmOp.from_axis_angle_and_translation(axis=[0, 1, 0], \
angle=np.random.normal(
0, 180))
c = SymmOp.from_axis_angle_and_translation(axis=[0, 0, 1], \
angle=np.random.normal(
0, 180))
ligand = iface.ligand
ligand.apply_operation(a)
ligand.apply_operation(b)
ligand.apply_operation(c)
# check if created interface maintains the ligand adsorbed
# over the surface
for j in iface.top_atoms:
if not iface.cart_coords[j][2] + iface.displacement > \
min(ligand.cart_coords[:, 2]):
transform.append(True)
if all(transform):
iface = Interface(iface.strt, hkl=iface.hkl,
min_thick=iface.min_thick,
min_vac=iface.min_vac,
supercell=iface.supercell,
surface_coverage=iface.surface_coverage,
ligand=iface.ligand, displacement=z,
adatom_on_lig=iface.adatom_on_lig,
adsorb_on_species=iface.adsorb_on_species,
primitive=False, from_ase=True,
x_shift=x, y_shift=y)
iface.create_interface()
energy = iface.calc_energy()
iface.sort()
if energy < ecut:
ifaces.append((energy, iface))
# ifaces.zip(energy, iface)
return ifaces
if __name__ == '__main__':
# PbS 100 surface with single hydrazine as ligand
# sample pre-relaxed structure files for Bulk (strt) and Molecule
strt = Structure.from_file("POSCAR_PbS")
mol_struct = Structure.from_file("POSCAR_Hydrazine")
mol = Molecule(mol_struct.species, mol_struct.cart_coords)
hydrazine = Ligand([mol])
# intital supercell, this wont be the final supercell if
# surface coverage is specified
supercell = [1, 1, 1]
# miller index
hkl = [1, 0, 0]
# minimum slab thickness in Angstroms
min_thick = 19
# minimum vacuum thickness in Angstroms
# mind: the ligand will be placed in this vacuum, so the
# final effective vacuum space will be smaller than this
min_vac = 12
# surface coverage in the units of lig/ang^2
# mind: exact coverage as provided cannot be guaranteed, the slab
# will be constructed
# with a coverage value thats close to the requested one
# note: maximum supercell size possible is 10 x 10
# note: 1 lig/nm^2 = 0.01 lig/ang^2
surface_coverage = 0.01
# atom on the slab surface on which the ligand will be attached,
# no need to specify if the slab is made of only a single species
adsorb_on_species = 'Pb'
# atom on ligand that will be attached to the slab surface
adatom_on_lig = 'N'
# ligand displacement from the slab surface along the surface normal
# i.e adatom_on_lig will be displced by this amount from the
# adsorb_on_species atom
# on the slab
# in Angstrom
displacement = 3.0
# here we create the adsorbate slab Interface
iface = Interface(strt, hkl=hkl, min_thick=min_thick,
min_vac=min_vac, supercell=supercell,
surface_coverage=surface_coverage,
ligand=hydrazine, displacement=displacement,
adatom_on_lig=adatom_on_lig,
adsorb_on_species=adsorb_on_species,
primitive=False, from_ase=True)
iface.create_interface()
iface.sort()
energy = iface.calc_energy()
iface.to('poscar', 'POSCAR_interface.vasp')
interfaces = coloumb_configured_interface(iface, random=True,
translations=None,
rotations=None,
samples=20, lowest=5,
ecut=energy)
for i, iface in enumerate(interfaces):
print("Coloumb Energy")
print(i, iface[0])
iface[1].to('poscar', 'POSCAR_interface' + str(iface[0]) + '.vasp')
iface_slab = iface[1].slab
iface_slab.sort()
# set selective dynamics flags as required
true_site = [1, 1, 1]
false_site = [0, 0, 0]
sd_flag_iface = []
sd_flag_slab = []
# selective dynamics flags for the interface
for i in iface.sites:
sd_flag_iface.append(false_site)
# selective dynamics flags for the bare slab
for i in iface_slab.sites:
sd_flag_slab.append(false_site)
interface_poscar = Poscar(iface[1],
selective_dynamics=sd_flag_iface)
slab_poscar = Poscar(iface_slab,
selective_dynamics=sd_flag_slab)
# slab poscars without selective dynamics flag
iface_slab.to('poscar', 'POSCAR_slab' + str(iface[0]) + '.vasp')
# poscars with selective dynamics flag
interface_poscar.write_file(
"POSCAR_interface_with_sd" + str(iface[0]) + '.vasp')
slab_poscar.write_file(
"POSCAR_slab_with_sd" + str(iface[0]) + '.vasp')
| henniggroup/MPInterfaces | examples/ligand_interface.py | Python | mit | 7,593 | [
"Gaussian",
"VASP",
"pymatgen"
] | 3c4a7568cebef65bc8ce0b112d3baf9dc932c90d3dfd6623de4c04f3f5580024 |
#!/usr/bin/env python
# encoding: utf-8
import unittest
from nose.tools import assert_equal, assert_true, assert_false
from os.path import join, dirname, abspath
import datetime
from scrapers import caledonia
from scrapers import leaf
from scrapers import stgeorgeshall
from scrapers import fact
from scrapers import kazimier
from scrapers import ljmu
from scrapers import bluecoat
from scrapers import philosophyinpubs
SAMPLE_DIR = join(dirname(abspath(__file__)), 'sample_data')
class CaledoniaScraperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR, 'caledonia.html'), 'r') as f:
cls.rows = list(caledonia.process(f))
def test_correct_number_of_events(self):
assert_equal(8, len(self.rows))
def test_venue_always_the_caledonia(self):
assert_equal(
set(['The Caledonia']),
set([x['venue'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_the_headlines_are_correct(self):
assert_equal([
'Live Music: Loose Moose String Band',
'Live Music: Cajun Session',
'Live Music: Buffalo Clover (from Nashville, Tennessee)',
'Live Music: Downtown Dixieland Band',
'Live Music: The Martin Smith Quartet',
'Live Music: Loose Moose String Band',
'Live Music: The Manouchetones',
'Live Music: Marley Chingus'],
[row['headline'] for row in self.rows])
def test_urls(self):
assert_equal(
set(['http://www.thecaledonialiverpool.com/whats-on/']),
set([x['url'] for x in self.rows]))
class StGeorgesHallTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR, 'stgeorgeshall.html'), 'r') as f:
cls.rows = list(stgeorgeshall.process(f))
def test_correct_number_of_events(self):
assert_equal(7, len(self.rows))
def test_venue_always_st_georges_hall(self):
assert_equal(
set(["St George's Hall"]),
set([x['venue'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_the_headlines_are_correct(self):
assert_equal(
[
'The Charlatans',
'In conversation with Lynda La Plante',
],
[row['headline'] for row in self.rows[0:2]])
assert_equal(
[
"Murder at St George's Hall",
'Llyr Williams - piano'
],
[row['headline'] for row in self.rows[-2:]])
def test_urls(self):
urls = [x['url'] for x in self.rows]
assert_equal(
'http://www.stgeorgesliverpool.co.uk/whatson/details.asp'
'?id=226947',
urls[0])
class LeafScraperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR, 'leaf.html'), 'r') as f:
cls.rows = list(leaf.process(f))
def test_correct_number_of_events(self):
assert_equal(20, len(self.rows))
def test_venue_always_leaf(self):
assert_equal(
set(['LEAF on Bold Street']),
set([x['venue'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_ambiguous_dates_are_correct(self):
assert_equal(datetime.date(2013, 7, 3), self.rows[-1]['date'])
def test_the_headlines_are_correct(self):
assert_equal([
'Leaf Pudding Club',
'Retro Sunday',
],
[row['headline'] for row in self.rows[0:2]])
assert_equal([
'Tea with an Architect',
'Spotify Wednesdays'
],
[row['headline'] for row in self.rows[-2:]])
def test_urls(self):
assert_equal(
set(['http://www.thisisleaf.co.uk/#/on-bold-street/events/']),
set([x['url'] for x in self.rows]))
class FactScraperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR, 'fact_listings.html'), 'r') as f:
cls.rows = list(fact.get_all_listings(f))
def test_correct_number_of_events(self):
assert_equal(114, len(self.rows))
def test_venue_always_fact(self):
assert_equal(
set(['FACT']),
set([x['venue'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_date_range_as_expected(self):
"""
Check that dates without a year are being filled in correctly as next
year rather than this year.
"""
dates = [row['date'] for row in self.rows]
assert_equal(datetime.date(2013, 8, 17), min(dates))
assert_equal(datetime.date(2014, 6, 24), max(dates))
def test_the_headlines_are_correct(self):
assert_equal([
u'Alan Partridge: Alpha Papa (1)',
u"The World's End"
],
[row['headline'] for row in self.rows[0:2]])
assert_equal([
u'Met. Encore: La Cenerentola',
u'ROH. Live: Manon Lescaut'
],
[row['headline'] for row in self.rows[-2:]])
def test_repeated_event_identified(self):
with open(join(SAMPLE_DIR, 'fact_repeated.html'), 'r') as f:
assert_false(fact.is_single_event(f))
def test_exhibition_date_range_event_identified(self):
with open(join(SAMPLE_DIR, 'fact_exhib.html'), 'r') as f:
assert_false(fact.is_single_event(f))
def test_single_event_identified(self):
with open(join(SAMPLE_DIR, 'fact_single.html'), 'r') as f:
assert_true(fact.is_single_event(f))
def test_child_event_identified(self):
with open(join(SAMPLE_DIR, 'fact_child_event.html'), 'r') as f:
assert_true(fact.is_child_event(f))
def test_non_child_event_identified(self):
with open(join(SAMPLE_DIR, 'fact_single.html'), 'r') as f:
assert_false(fact.is_child_event(f))
class KazimierScraperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR, 'kazimier_listings.html'), 'r') as f:
cls.rows = list(kazimier.process(f))
def test_correct_number_of_events(self):
assert_equal(27, len(self.rows))
def test_venue_always_fact(self):
assert_equal(
set(['Kazimier']),
set([x['venue'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_date_range_as_expected(self):
"""
Check that dates without a year are being filled in correctly as next
year rather than this year.
"""
dates = [row['date'] for row in self.rows]
assert_equal(datetime.date(2013, 8, 21), min(dates))
assert_equal(datetime.date(2013, 12, 7), max(dates))
def test_the_headlines_are_correct(self):
assert_equal([
u'Manifold - A living exhibition',
u'Othello'
],
[row['headline'] for row in self.rows[0:2]])
assert_equal([
u'Jonathan Wilson',
u'John Smith'
],
[row['headline'] for row in self.rows[-2:]])
def test_urls(self):
urls = [x['url'] for x in self.rows]
assert_equal(
'http://www.thekazimier.co.uk/listing/00000000175/',
urls[0])
class BluecoatScraperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR, 'bluecoat_listings.html'), 'r') as f:
cls.rows = list(bluecoat.process(f))
def test_correct_number_of_events(self):
assert_equal(16, len(self.rows))
def test_venue(self):
assert_equal(
set(['The Bluecoat']),
set([x['venue'] for x in self.rows]))
def test_organiser(self):
assert_equal(
set(['The Bluecoat']),
set([x['organiser'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_date_range_as_expected(self):
"""
Check that dates without a year are being filled in correctly as next
year rather than this year.
"""
dates = [row['date'] for row in self.rows]
assert_equal(datetime.date(2013, 7, 11), min(dates))
assert_equal(datetime.date(2013, 10, 17), max(dates))
def test_the_headlines_are_correct(self):
assert_equal([
'Philosophy in Pubs',
'Talking Poetry'
],
[row['headline'] for row in self.rows[0:2]])
assert_equal([
'Jarlath Killeen and ',
'Scrips'
],
[row['headline'] for row in self.rows[-2:]])
def test_urls(self):
urls = [x['url'] for x in self.rows]
assert_equal(
'http://www.thebluecoat.org.uk/events/view/events/1054',
urls[0])
class PhilosophyInPubsScraperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR,
'philosophyinpubs_venues.html'), 'r') as f:
cls.excluded_venues = philosophyinpubs.get_excluded_venue_ids(f)
assert_equal(47, len(cls.excluded_venues))
assert_equal(
'05cccecb1a256b561cfc858a4e907879',
cls.excluded_venues[0])
with open(join(SAMPLE_DIR,
'philosophyinpubs_listings.html'), 'r') as f:
cls.rows = list(philosophyinpubs.process(f, cls.excluded_venues))
def test_correct_number_of_events(self):
assert_equal(28, len(self.rows))
def test_venue(self):
assert_equal(
set([
'The Halfway House',
'Chinese Pagoda Centre',
'The Victoria Hotel (Waterloo)',
'The Birkey Hotel (Crosby)',
'Allerton Community Philosophy Group',
'Bluecoat Art Centre',
'The Friday Forum',
'The Buck ith Vine (Ormskirk)',
]),
set([x['venue'] for x in self.rows]))
def test_organiser(self):
assert_equal(
set(['Philosophy In Pubs']),
set([x['organiser'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_date_range_as_expected(self):
"""
Check that dates without a year are being filled in correctly as next
year rather than this year.
"""
dates = [row['date'] for row in self.rows]
assert_equal(datetime.date(2013, 8, 20), min(dates))
assert_equal(datetime.date(2013, 12, 12), max(dates))
def test_the_headlines_are_correct(self):
assert_equal([
'Call me cynical ...',
'Does Freedom of Speech serve any Purpose ?'],
[row['headline'] for row in self.rows[0:2]])
assert_equal([
'Is there meaning to life?',
'How can we judge other people?'
],
[row['headline'] for row in self.rows[-2:]])
def test_urls(self):
urls = [x['url'] for x in self.rows]
assert_equal(
('http://www.philosophyinpubs.org.uk/venues/view/'
'ce780115d412d1d9ffb2ffcb5e85136d'),
urls[0])
class LjmuScraperTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with open(join(SAMPLE_DIR, 'ljmu_listings.html'), 'r') as f:
cls.rows = list(ljmu.process(f))
def test_correct_number_of_events(self):
assert_equal(4, len(self.rows))
def test_venue(self):
assert_equal(
set(['Redmonds Building', 'Liverpool Philharmonic Hall']),
set([x['venue'] for x in self.rows]))
def test_organiser(self):
assert_equal(
set(['LJMU']),
set([x['organiser'] for x in self.rows]))
def test_all_dates_are_datetime_dates(self):
dates = [row['date'] for row in self.rows]
assert_true(
all([isinstance(date, datetime.date) for date in dates]))
def test_date_range_as_expected(self):
"""
Check that dates without a year are being filled in correctly as next
year rather than this year.
"""
dates = [row['date'] for row in self.rows]
assert_equal(datetime.date(2013, 9, 5), min(dates))
assert_equal(datetime.date(2013, 11, 28), max(dates))
def test_the_headlines_are_correct(self):
assert_equal([
u'Astrobiology: The Hunt for Alien Life',
(u'Roscoe Lecture Series: Mayors and their Cities – the '
'Challenges and Opportunities')
],
[row['headline'] for row in self.rows[0:2]])
assert_equal([
u'Roscoe Lecture Series: ‘Overcoming Disability and Adversity’',
(u'Roscoe Lecture Series: 1914 – Why Remembering the Great War'
' Matters')
],
[row['headline'] for row in self.rows[-2:]])
def test_urls(self):
urls = [x['url'] for x in self.rows]
assert_equal(
'http://ljmuastrobiologypubliclecture.eventbrite.co.uk/',
urls[0])
| paulfurley/liverpool-events-scrapers | tool/tests.py | Python | bsd-2-clause | 14,195 | [
"MOOSE"
] | 32b9f331d3a2bdb710eb8243d007d41f9259511fc18fa26ad361fb296cfff07e |
#!/usr/bin/env python
import os, sys, re
import argparse
from forcebalance.molecule import Molecule
from collections import defaultdict, OrderedDict
parser = argparse.ArgumentParser()
parser.add_argument('-bd', action="store_true", help='Label bond parameters')
parser.add_argument('-an', action="store_true", help='Label angle parameters')
parser.add_argument('-bk', action="store_true", help='Label backbone torsion angles')
parser.add_argument('-sd', action="store_true", help='Label sidechain torsion angles')
parser.add_argument('-eq', action="store_true", help='Label equilibrium positions in addition to force constants')
parser.add_argument('-sel', type=str, nargs='+', help='Activate parameters for one or more selected residues')
args, sys.argv= parser.parse_known_args(sys.argv)
llist = []
if args.sel != None:
llist += [i.upper() for i in args.sel]
if args.bd:
llist.append('bd')
if args.an:
llist.append('an')
if args.bk:
llist.append('bk')
if args.sd:
llist.append('sd')
if args.eq:
llist.append('eq')
if len(llist) == 0:
print 'Did not specify any parameters to fit, use -h argument for help'
if len(sys.argv) != 2:
raise RuntimeError('Did not specify force field file, use -h argument for help')
label = '-'+'-'.join(llist)
# Highlight pertinent dihedral interactions for amino acids in AMBER force field.
AACodes = []
AADirs = []
for d in sorted(os.listdir('../targets/')):
if 'phipsi' not in d: continue
if os.path.isdir('../targets/%s' % d):
AACode = re.match('^[A-Z]*', d).group(0)
if AACode not in AACodes:
if args.sel != None and AACode not in [i.upper() for i in args.sel]: continue
AACodes.append(AACode)
AADirs.append('../targets/%s' % d)
# Get a mapping from atom names to atom classes for a molecule name.
def build_atomname_to_atomclass(mnm):
atomgrp = 0
content = ''
this_mnm = ''
anac = OrderedDict()
for line in open('aminoacids.rtp').readlines():
match = re.match('^ *\[ *([^ ]*) *\]', line)
if match:
old_content = content
content = match.group(1)
if content == 'atoms':
this_mnm = old_content
atomgrp = 1
else:
atomgrp = 0
elif atomgrp and mnm == this_mnm:
s = line.split()
anac[s[0]] = s[1]
return anac
# if re.match('^ *\[', line):
# if re.match('^ *\[ atoms \]', line):
# print line,
# else:
# Backbone goes like N, CT, C, N, CT, C...
def is_backbone(dac):
return dac[1:3] in [['N', 'CT'], ['CT', 'C'], ['C', 'N'], ['CT', 'N'], ['C', 'CT'], ['N', 'C']]
print AACodes
allbac = []
allaac = []
alldac = []
for iAA, AA in enumerate(AACodes):
print "Detecting parameters for", AA
anac = build_atomname_to_atomclass(AA)
anac.update(build_atomname_to_atomclass('ACE'))
anac.update(build_atomname_to_atomclass('NME'))
M = Molecule(os.path.join(AADirs[iAA], 'all.gro'))
for i in M.atomname:
if i not in anac.keys():
print '%s not in list of atom names' % i
for d in M.find_dihedrals():
if all([M.atomname[i] in anac for i in d]):
dac = [anac[M.atomname[i]] for i in d]
if is_backbone(dac):
if args.bk and dac not in alldac:
alldac.append(dac[:])
alldac.append(dac[::-1])
elif args.sd and dac not in alldac:
alldac.append(dac[:])
alldac.append(dac[::-1])
else:
print dac, "is not parameterized"
for a in M.find_angles():
if all([M.atomname[i] in anac for i in a]):
aac = [anac[M.atomname[i]] for i in a]
if args.an and aac not in allaac:
allaac.append(aac[:])
allaac.append(aac[::-1])
for b in M.bonds:
if all([M.atomname[i] in anac for i in b]):
bac = [anac[M.atomname[i]] for i in b]
if args.bd and bac not in allbac:
allbac.append(bac[:])
allbac.append(bac[::-1])
mode = 'N'
foutnm = '%s%s%s' % (os.path.splitext(sys.argv[1])[0], label, os.path.splitext(sys.argv[1])[1])
print "Output file is", foutnm
fout = open(foutnm,'w')
nprm = 0
for line in open(sys.argv[1]).readlines():
line = re.sub(';.*$', '', line.replace('\n',''))
match = re.match('^ *\[ *([^ ]*) *\]', line)
if match:
print >> fout
content = match.group(1)
if content == 'dihedraltypes':
mode = 'D'
elif content == 'angletypes':
mode = 'A'
elif content == 'bondtypes':
mode = 'B'
else:
mode = 'N'
elif mode == 'D':
if len(re.sub(';.*$', '', line).strip()) > 0:
# We are now at a line of dihedral data
s = line.split()
if any([all([(i == j or j == 'X') for i, j in zip(dac, s[:4])]) for dac in alldac]):
if args.eq:
line += ' ; PRM 5 6'
nprm += 2
else:
line += ' ; PARM 6'
nprm += 1
elif mode == 'A':
if len(re.sub(';.*$', '', line).strip()) > 0:
# We are now at a line of angle data
s = line.split()
if any([all([i == j for i, j in zip(aac, s[:3])]) for aac in allaac]):
if args.eq:
line += ' ; PRM 4 5'
nprm += 2
else:
line += ' ; PARM 5'
nprm += 1
elif mode == 'B':
if len(re.sub(';.*$', '', line).strip()) > 0:
# We are now at a line of bond data
s = line.split()
if any([all([i == j for i, j in zip(bac, s[:2])]) for bac in allbac]):
if args.eq:
line += ' ; PRM 3 4'
nprm += 2
else:
line += ' ; PARM 4'
nprm += 1
if len(line) > 0:
print >> fout, line
print nprm, "total fitting parameters"
fout.close()
# if any([all([i == j or j == 'X'] for i, j in zip(dac, s[:4])) for dac in alldac]):
# print s[:4]
# print sorted(list(set(anac.keys())))
# print sorted(list(set(M.atomname)))
sys.exit()
| hainm/open-forcefield-group | forcebalance/amber-intra/forcefield/highlight-amber.py | Python | gpl-2.0 | 6,409 | [
"Amber"
] | 21c611fb8caad1388f70d60c113406bf9de49ad0ad9d2553ea178aa2a158e716 |
../../../../../../share/pyshared/orca/scripts/toolkits/GAIL.py | Alberto-Beralix/Beralix | i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/toolkits/GAIL.py | Python | gpl-3.0 | 62 | [
"ORCA"
] | 5e07f4a5a3b9b5f2d17c04cb622b80e19a73c8d827b0a815b5b9351c42dfdf1f |
from datetime import datetime
from projectp import db
from projectp.serializer import Serializer
def _epoch_to_datetime(epoch):
return datetime.fromtimestamp(float(epoch))
class Visit(db.Model, Serializer):
__tablename__ = 'visits'
id = db.Column(db.Integer, primary_key=True)
start_time = db.Column(db.DateTime, nullable=False)
end_time = db.Column(db.DateTime, nullable=False)
duration = db.Column(db.Integer, nullable=False)
location_id = db.Column(db.Integer, db.ForeignKey('locations.id'))
def __init__(self, start_time, end_time, location_id):
self.start_time = _epoch_to_datetime(start_time)
self.end_time = _epoch_to_datetime(end_time)
duration_delta = self.end_time - self.start_time
self.duration = duration_delta.seconds
self.location_id = location_id
def __repr__(self):
return "<Visit(id='%s', start_time='%s, end_time='%s', duration='%s')>" % (
self.id, self.start_time, self.end_time, self.duration)
# Serialize
def serialize(self):
return Serializer.serialize(self)
# d = Serializer.serialize(self)
# # Remove filepath from response since its used only internally
# del d['filepath']
# return d
| Proj-P/project-p-api | projectp/visits/models.py | Python | mit | 1,274 | [
"VisIt"
] | cad382e96eed36e6dbbd9f5c0a40e3a9328104df5390aa639df2d70be3ffea05 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from datetime import datetime, timedelta
from flaky import flaky
from textwrap import dedent
from unittest import skip
from nose.plugins.attrib import attr
import pytz
import urllib
from bok_choy.promise import EmptyPromise
from ..helpers import (
UniqueCourseTest,
EventsTestMixin,
load_data_str,
generate_course_key,
select_option_by_value,
element_has_text,
select_option_by_text,
get_selected_option_text
)
from ...pages.lms import BASE_URL
from ...pages.lms.account_settings import AccountSettingsPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.common.logout import LogoutPage
from ...pages.lms.course_info import CourseInfoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.progress import ProgressPage
from ...pages.lms.dashboard import DashboardPage
from ...pages.lms.problem import ProblemPage
from ...pages.lms.video.video import VideoPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.studio.settings import SettingsPage
from ...pages.lms.login_and_register import CombinedLoginAndRegisterPage, ResetPasswordPage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from ...pages.lms.course_wiki import CourseWikiPage, CourseWikiEditPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
@attr('shard_8')
class ForgotPasswordPageTest(UniqueCourseTest):
"""
Test that forgot password forms is rendered if url contains 'forgot-password-modal'
in hash.
"""
def setUp(self):
""" Initialize the page object """
super(ForgotPasswordPageTest, self).setUp()
self.user_info = self._create_user()
self.reset_password_page = ResetPasswordPage(self.browser)
def _create_user(self):
"""
Create a unique user
"""
auto_auth = AutoAuthPage(self.browser).visit()
user_info = auto_auth.user_info
LogoutPage(self.browser).visit()
return user_info
def test_reset_password_form_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Expect that reset password form is visible on the page
self.assertTrue(self.reset_password_page.is_form_visible())
def test_reset_password_confirmation_box_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Navigate to the password reset form and try to submit it
self.reset_password_page.fill_password_reset_form(self.user_info['email'])
self.reset_password_page.is_success_visible(".submission-success")
# Expect that we're shown a success message
self.assertIn("Password Reset Email Sent", self.reset_password_page.get_success_message())
@attr('shard_8')
class LoginFromCombinedPageTest(UniqueCourseTest):
"""Test that we can log in using the combined login/registration page.
Also test that we can request a password reset from the combined
login/registration page.
"""
def setUp(self):
"""Initialize the page objects and create a test course. """
super(LoginFromCombinedPageTest, self).setUp()
self.login_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="login",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_login_success(self):
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page and try to log in
self.login_page.visit().login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_login_failure(self):
# Navigate to the login page
self.login_page.visit()
# User account does not exist
self.login_page.login(email="nobody@nowhere.com", password="password")
# Verify that an error is displayed
self.assertIn("Email or password is incorrect.", self.login_page.wait_for_errors())
def test_toggle_to_register_form(self):
self.login_page.visit().toggle_form()
self.assertEqual(self.login_page.current_form, "register")
@flaky # ECOM-1165
def test_password_reset_success(self):
# Create a user account
email, password = self._create_unique_user() # pylint: disable=unused-variable
# Navigate to the password reset form and try to submit it
self.login_page.visit().password_reset(email=email)
# Expect that we're shown a success message
self.assertIn("Password Reset Email Sent", self.login_page.wait_for_success())
def test_password_reset_failure(self):
# Navigate to the password reset form
self.login_page.visit()
# User account does not exist
self.login_page.password_reset(email="nobody@nowhere.com")
# Expect that we're shown a failure message
self.assertIn(
"No user with the provided email address exists.",
self.login_page.wait_for_errors()
)
def test_third_party_login(self):
"""
Test that we can login using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page
self.login_page.visit()
# Baseline screen-shots are different for chrome and firefox.
self.assertScreenshot('#login .login-providers', 'login-providers-{}'.format(self.browser.name))
# Try to log in using "Dummy" provider
self.login_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the login page:
msg_text = self.login_page.wait_for_auth_status_message()
self.assertIn("You have successfully signed into Dummy", msg_text)
self.assertIn("To link your accounts, sign in now using your edX password", msg_text)
# Now login with username and password:
self.login_page.login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
try:
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
self.login_page.visit()
self.login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
finally:
self._unlink_dummy_account()
def test_hinted_login(self):
""" Test the login page when coming from course URL that specified which third party provider to use """
# Create a user account and link it to third party auth with the dummy provider:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self._link_dummy_account()
try:
LogoutPage(self.browser).visit()
# When not logged in, try to load a course URL that includes the provider hint ?tpa_hint=...
course_page = CoursewarePage(self.browser, self.course_id)
self.browser.get(course_page.url + '?tpa_hint=oa2-dummy')
# We should now be redirected to the login page
self.login_page.wait_for_page()
self.assertIn(
"Would you like to sign in using your Dummy credentials?",
self.login_page.hinted_login_prompt
)
# Baseline screen-shots are different for chrome and firefox.
self.assertScreenshot('#hinted-login-form', 'hinted-login-{}'.format(self.browser.name))
self.login_page.click_third_party_dummy_provider()
# We should now be redirected to the course page
course_page.wait_for_page()
finally:
self._unlink_dummy_account()
def _link_dummy_account(self):
""" Go to Account Settings page and link the user's account to the Dummy provider """
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Link Your Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
# make sure we are on "Linked Accounts" tab after the account settings
# page is reloaded
account_settings.switch_account_settings_tabs('accounts-tab')
account_settings.wait_for_link_title_for_link_field(field_id, "Unlink This Account")
def _unlink_dummy_account(self):
""" Verify that the 'Dummy' third party auth provider is linked, then unlink it """
# This must be done after linking the account, or we'll get cross-test side effects
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
def _create_unique_user(self):
"""
Create a new user with a unique name and email.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
password = "password"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password
).visit()
# Log out
LogoutPage(self.browser).visit()
return (email, password)
@attr('shard_8')
class RegisterFromCombinedPageTest(UniqueCourseTest):
"""Test that we can register a new user from the combined login/registration page. """
def setUp(self):
"""Initialize the page objects and create a test course. """
super(RegisterFromCombinedPageTest, self).setUp()
self.register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register_success(self):
# Navigate to the registration page
self.register_page.visit()
# Fill in the form and submit it
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username=username,
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_register_failure(self):
# Navigate to the registration page
self.register_page.visit()
# Enter a blank for the username field, which is required
# Don't agree to the terms of service / honor code.
# Don't specify a country code, which is required.
# Don't specify a favorite movie.
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username="",
full_name="Test User",
terms_of_service=False
)
# Verify that the expected errors are displayed.
errors = self.register_page.wait_for_errors()
self.assertIn(u'Please enter your Public username.', errors)
self.assertIn(u'You must agree to the edX Terms of Service and Honor Code.', errors)
self.assertIn(u'Please select your Country.', errors)
self.assertIn(u'Please tell us your favorite movie.', errors)
def test_toggle_to_login_form(self):
self.register_page.visit().toggle_form()
self.assertEqual(self.register_page.current_form, "login")
def test_third_party_register(self):
"""
Test that we can register using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Navigate to the register page
self.register_page.visit()
# Baseline screen-shots are different for chrome and firefox.
self.assertScreenshot('#register .login-providers', 'register-providers-{}'.format(self.browser.name))
# Try to authenticate using the "Dummy" provider
self.register_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the register page:
msg_text = self.register_page.wait_for_auth_status_message()
self.assertEqual(self.register_page.current_form, "register")
self.assertIn("You've successfully signed into Dummy", msg_text)
self.assertIn("We just need a little more information", msg_text)
# Now the form should be pre-filled with the data from the Dummy provider:
self.assertEqual(self.register_page.email_value, "adama@fleet.colonies.gov")
self.assertEqual(self.register_page.full_name_value, "William Adama")
self.assertIn("Galactica1", self.register_page.username_value)
# Set country, accept the terms, and submit the form:
self.register_page.register(country="US", favorite_movie="Battlestar Galactica", terms_of_service=True)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
login_page = CombinedLoginAndRegisterPage(self.browser, start_page="login")
login_page.visit()
login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
# Now unlink the account (To test the account settings view and also to prevent cross-test side effects)
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
@attr('shard_8')
class PayAndVerifyTest(EventsTestMixin, UniqueCourseTest):
"""Test that we can proceed through the payment and verification flow."""
def setUp(self):
"""Initialize the test.
Create the necessary page objects, create a test course and configure its modes,
create a user and log them in.
"""
super(PayAndVerifyTest, self).setUp()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='verify-now')
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
# Create a course
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate', min_price=10, suggested_prices='10,20').visit()
@skip("Flaky 02/02/2015")
def test_immediate_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Proceed to verification
self.payment_and_verification_flow.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_deferred_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_enrollment_upgrade(self):
# Create a user, log them in, and enroll them in the honor mode
student_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as honor in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'honor')
# Click the upsell button on the dashboard
self.dashboard_page.upgrade_enrollment(self.course_info["display_name"], self.upgrade_page)
# Select the first contribution option appearing on the page
self.upgrade_page.indicate_contribution()
# Proceed to the fake payment page
self.upgrade_page.proceed_to_payment()
def only_enrollment_events(event):
"""Filter out all non-enrollment events."""
return event['event_type'].startswith('edx.course.enrollment.')
expected_events = [
{
'event_type': 'edx.course.enrollment.mode_changed',
'event': {
'user_id': int(student_id),
'mode': 'verified',
}
}
]
with self.assert_events_match_during(event_filter=only_enrollment_events, expected_events=expected_events):
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
@attr('shard_1')
class CourseWikiTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(CourseWikiTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Theory')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
def test_edit_course_wiki(self):
"""
Wiki page by default is editable for students.
After accessing the course wiki,
Replace the content of the default page
Confirm new content has been saved
"""
content = "hello"
self._open_editor()
self.course_wiki_edit_page.replace_wiki_content(content)
self.course_wiki_edit_page.save_wiki_content()
actual_content = unicode(self.course_wiki_page.q(css='.wiki-article p').text[0])
self.assertEqual(content, actual_content)
@attr('shard_1')
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab', data=r"static tab data with mathjax \(E=mc^2\)"),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3'),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_info(self):
"""
Navigate to the course info page.
"""
# Navigate to the course info page from the progress page
self.progress_page.visit()
self.tab_nav.go_to_tab('Home')
# Expect just one update
self.assertEqual(self.course_info_page.num_updates, 1)
# Expect a link to the demo handout pdf
handout_links = self.course_info_page.handout_links
self.assertEqual(len(handout_links), 1)
self.assertIn('demoPDF.pdf', handout_links[0])
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
def test_static_tab_with_mathjax(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
# Verify that Mathjax has rendered
self.tab_nav.mathjax_has_rendered()
def test_wiki_tab_first_time(self):
"""
Navigate to the course wiki tab. When the wiki is accessed for
the first time, it is created on the fly.
"""
course_wiki = CourseWikiPage(self.browser, self.course_id)
# From the course info page, navigate to the wiki tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Theory')
self.assertTrue(self.tab_nav.is_on_tab('Theory'))
# Assert that a default wiki is created
expected_article_name = "{org}.{course_number}.{course_run}".format(
org=self.course_info['org'],
course_number=self.course_info['number'],
course_run=self.course_info['run']
)
self.assertEqual(expected_article_name, course_wiki.article_name)
def test_courseware_nav(self):
"""
Navigate to a particular unit in the course.
"""
# Navigate to the course page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
# Check that the course navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.course_nav.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.course_nav.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.course_nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
@attr('shard_1')
class PDFTextBooksTabTest(UniqueCourseTest):
"""
Tests that verify each of the textbook tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PDFTextBooksTabTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with TextBooks
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Add PDF textbooks to course fixture.
for i in range(1, 3):
course_fix.add_textbook("PDF Book {}".format(i), [{"title": "Chapter Of Book {}".format(i), "url": ""}])
course_fix.install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_verify_textbook_tabs(self):
"""
Test multiple pdf textbooks loads correctly in lms.
"""
self.course_info_page.visit()
# Verify each PDF textbook tab by visiting, it will fail if correct tab is not loaded.
for i in range(1, 3):
self.tab_nav.go_to_tab("PDF Book {}".format(i))
@attr('shard_1')
class VisibleToStaffOnlyTest(UniqueCourseTest):
"""
Tests that content with visible_to_staff_only set to True cannot be viewed by students.
"""
def setUp(self):
super(VisibleToStaffOnlyTest, self).setUp()
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection With Locked Unit').add_children(
XBlockFixtureDesc('vertical', 'Locked Unit', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('html', 'Html Child in locked unit', data="<html>Visible only to staff</html>"),
),
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in unlocked unit', data="<html>Visible only to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in visible unit', data="<html>Visible to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Locked Subsection', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'html', 'Html Child in locked subsection', data="<html>Visible only to staff</html>"
)
)
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
def test_visible_to_staff(self):
"""
Scenario: All content is visible for a user marked is_staff (different from course staff)
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an account marked 'is_staff'
Then I can see all course content
"""
AutoAuthPage(self.browser, username="STAFF_TESTER", email="johndoe_staff@example.com",
course_id=self.course_id, staff=True).visit()
self.courseware_page.visit()
self.assertEqual(3, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual([u'Locked Unit', u'Unlocked Unit'], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual([u'Test Unit'], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Locked Subsection")
self.assertEqual([u'Test Unit'], self.course_nav.sequence_items)
def test_visible_to_student(self):
"""
Scenario: Content marked 'visible_to_staff_only' is not visible for students in the course
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an authorized student account
Then I can only see content without 'visible_to_staff_only' set to True
"""
AutoAuthPage(self.browser, username="STUDENT_TESTER", email="johndoe_student@example.com",
course_id=self.course_id, staff=False).visit()
self.courseware_page.visit()
self.assertEqual(2, len(self.course_nav.sections['Test Section']))
self.course_nav.go_to_section("Test Section", "Subsection With Locked Unit")
self.assertEqual([u'Unlocked Unit'], self.course_nav.sequence_items)
self.course_nav.go_to_section("Test Section", "Unlocked Subsection")
self.assertEqual([u'Test Unit'], self.course_nav.sequence_items)
@attr('shard_1')
class TooltipTest(UniqueCourseTest):
"""
Tests that tooltips are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(TooltipTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_tooltip(self):
"""
Verify that tooltips are displayed when you hover over the sequence nav bar.
"""
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
self.courseware_page.verify_tooltips_displayed()
@attr('shard_1')
class PreRequisiteCourseTest(UniqueCourseTest):
"""
Tests that pre-requisite course messages are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PreRequisiteCourseTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.prc_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'prc_test_run',
'display_name': 'PR Test Course' + self.unique_id
}
CourseFixture(
self.prc_info['org'], self.prc_info['number'],
self.prc_info['run'], self.prc_info['display_name']
).install()
pre_requisite_course_key = generate_course_key(
self.prc_info['org'],
self.prc_info['number'],
self.prc_info['run']
)
self.pre_requisite_course_id = unicode(pre_requisite_course_key)
self.dashboard_page = DashboardPage(self.browser)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_dashboard_message(self):
"""
Scenario: Any course where there is a Pre-Requisite course Student dashboard should have
appropriate messaging.
Given that I am on the Student dashboard
When I view a course with a pre-requisite course set
Then At the bottom of course I should see course requirements message.'
"""
# visit dashboard page and make sure there is not pre-requisite course message
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.pre_requisite_message_displayed())
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set pre-requisite course
self.settings_page.visit()
self._set_pre_requisite_course()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit dashboard page again now it should have pre-requisite course message
self.dashboard_page.visit()
EmptyPromise(lambda: self.dashboard_page.available_courses > 0, 'Dashboard page loaded').fulfill()
self.assertTrue(self.dashboard_page.pre_requisite_message_displayed())
def _set_pre_requisite_course(self):
"""
set pre-requisite course
"""
select_option_by_value(self.settings_page.pre_requisite_course_options, self.pre_requisite_course_id)
self.settings_page.save_changes()
@attr('shard_1')
class ProblemExecutionTest(UniqueCourseTest):
"""
Tests of problems.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(ProblemExecutionTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
# Install a course with sections and problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_asset(['python_lib.zip'])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Python Problem', data=dedent(
"""\
<problem>
<script type="loncapa/python">
from number_helpers import seventeen, fortytwo
oneseven = seventeen()
def check_function(expect, ans):
if int(ans) == fortytwo(-22):
return True
else:
return False
</script>
<p>What is the sum of $oneseven and 3?</p>
<customresponse expect="20" cfn="check_function">
<textline/>
</customresponse>
</problem>
"""
))
)
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_python_execution_in_problem(self):
# Navigate to the problem page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
self.course_nav.go_to_section('Test Section', 'Test Subsection')
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name.upper(), 'PYTHON PROBLEM')
# Does the page have computation results?
self.assertIn("What is the sum of 17 and 3?", problem_page.problem_text)
# Fill in the answer correctly.
problem_page.fill_answer("20")
problem_page.click_check()
self.assertTrue(problem_page.is_correct())
# Fill in the answer incorrectly.
problem_page.fill_answer("4")
problem_page.click_check()
self.assertFalse(problem_page.is_correct())
@attr('shard_1')
class EntranceExamTest(UniqueCourseTest):
"""
Tests that course has an entrance exam.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EntranceExamTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_entrance_exam_section(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have entrance exam chapter at course
page.
Given that I am on the course page
When I view the course that has an entrance exam
Then there should be an "Entrance Exam" chapter.'
"""
entrance_exam_link_selector = '.accordion .course-navigation .chapter .group-heading'
# visit course page and make sure there is not entrance exam chapter.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertFalse(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.wait_for_page()
self.assertTrue(self.settings_page.is_browser_on_page())
self.settings_page.entrance_exam_field.click()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit course info page and make sure there is an "Entrance Exam" section.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertTrue(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
@attr('shard_1')
class NotLiveRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to
the dashboard from a non-live course.
"""
def setUp(self):
"""Create a course that isn't live yet and enroll for it."""
super(NotLiveRedirectTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name'],
start_date=datetime(year=2099, month=1, day=1)
).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
url = BASE_URL + "/courses/" + self.course_id + "/" + 'info'
self.browser.get(url)
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for does not start until',
page.banner_text
)
@attr('shard_1')
class EnrollmentClosedRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to the
dashboard after trying to view the track selection page for a
course after enrollment has ended.
"""
def setUp(self):
"""Create a course that is closed for enrollment, and sign in as a user."""
super(EnrollmentClosedRedirectTest, self).setUp()
course = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
now = datetime.now(pytz.UTC)
course.add_course_details({
'enrollment_start': (now - timedelta(days=30)).isoformat(),
'enrollment_end': (now - timedelta(days=1)).isoformat()
})
course.install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser,
self.course_id,
mode_slug=u'verified',
mode_display_name=u'Verified Certificate',
min_price=10,
suggested_prices='10,20'
).visit()
def _assert_dashboard_message(self):
"""
Assert that the 'closed for enrollment' text is present on the
dashboard.
"""
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for is closed for enrollment',
page.banner_text
)
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
AutoAuthPage(self.browser).visit()
url = BASE_URL + "/course_modes/choose/" + self.course_id
self.browser.get(url)
self._assert_dashboard_message()
def test_login_redirect(self):
"""
Test that the user is correctly redirected after logistration when
attempting to enroll in a closed course.
"""
url = '{base_url}/register?{params}'.format(
base_url=BASE_URL,
params=urllib.urlencode({
'course_id': self.course_id,
'enrollment_action': 'enroll',
'email_opt_in': 'false'
})
)
self.browser.get(url)
register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
register_page.wait_for_page()
register_page.register(
email="email@example.com",
password="password",
username="username",
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
self._assert_dashboard_message()
@attr('shard_1')
class LMSLanguageTest(UniqueCourseTest):
""" Test suite for the LMS Language """
def setUp(self):
super(LMSLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_lms_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in LMS. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
After that I select the 'English' language and visit the dashboard page again.
Then I can see that top level language selector persist its value to 'English'.
"""
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, 'Dummy Language (Esperanto)')
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'Dummy Language (Esperanto)')
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
# changed back to English language.
select_option_by_text(language_selector, 'English')
self.account_settings.wait_for_ajax()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'English')
self.dashboard_page.visit()
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
@attr('a11y')
class CourseInfoA11yTest(UniqueCourseTest):
"""Accessibility test for course home/info page."""
def setUp(self):
super(CourseInfoA11yTest, self).setUp()
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.course_fixture.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='February 5th, 2014', content='Test course update2')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='March 31st, 2014', content='Test course update3')
)
self.course_fixture.install()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_home_a11y(self):
self.course_info_page.visit()
self.course_info_page.a11y_audit.check_for_accessibility_errors()
| Livit/Livit.Learn.EdX | common/test/acceptance/tests/lms/test_lms.py | Python | agpl-3.0 | 52,892 | [
"VisIt"
] | 4963198a81d715c6ddf20cf15c5cf8ce64a2d7a00d62d3f882a74d3dd13da52b |
# $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
# All Rights Reserved
#
import sys
from rdkit import six
class VLibNode(object):
""" base class for all virtual library nodes,
defines minimal required interface
"""
def __init__(self, *args, **kwargs):
self._children = []
self._parents = []
# ------------------------------------
#
# Iteration
#
def __iter__(self):
""" part of the iterator interface """
self.reset()
return self
def next(self):
""" part of the iterator interface
raises StopIteration on failure
"""
pass
def reset(self):
""" resets our iteration state
"""
for parent in self.GetParents():
parent.reset()
# ------------------------------------
#
# Library graph operations
# Probably most of these won't need to be reimplemented in
# child classes
#
def AddChild(self, child, notify=1):
"""
>>> p1 = VLibNode()
>>> p2 = VLibNode()
>>> c1 = VLibNode()
>>> p1.AddChild(c1)
>>> len(c1.GetParents())
1
>>> len(p1.GetChildren())
1
>>> p2.AddChild(c1,notify=0)
>>> len(c1.GetParents())
1
>>> len(p2.GetChildren())
1
>>> c1.AddParent(p2,notify=0)
>>> len(c1.GetParents())
2
>>> len(p2.GetChildren())
1
"""
self._children.append(child)
if notify:
child.AddParent(self, notify=0)
def RemoveChild(self, child, notify=1):
"""
>>> p1 = VLibNode()
>>> c1 = VLibNode()
>>> p1.AddChild(c1)
>>> len(c1.GetParents())
1
>>> len(p1.GetChildren())
1
>>> p1.RemoveChild(c1)
>>> len(c1.GetParents())
0
>>> len(p1.GetChildren())
0
"""
self._children.remove(child)
if notify:
child.RemoveParent(self, notify=0)
def GetChildren(self):
return tuple(self._children)
def AddParent(self, parent, notify=True):
"""
>>> p1 = VLibNode()
>>> p2 = VLibNode()
>>> c1 = VLibNode()
>>> c1.AddParent(p1)
>>> len(c1.GetParents())
1
>>> len(p1.GetChildren())
1
>>> c1.AddParent(p2,notify=0)
>>> len(c1.GetParents())
2
>>> len(p2.GetChildren())
0
>>> p2.AddChild(c1,notify=0)
>>> len(c1.GetParents())
2
>>> len(p2.GetChildren())
1
"""
self._parents.append(parent)
if notify:
parent.AddChild(self, notify=False)
def RemoveParent(self, parent, notify=True):
"""
>>> p1 = VLibNode()
>>> c1 = VLibNode()
>>> p1.AddChild(c1)
>>> len(c1.GetParents())
1
>>> len(p1.GetChildren())
1
>>> c1.RemoveParent(p1)
>>> len(c1.GetParents())
0
>>> len(p1.GetChildren())
0
"""
self._parents.remove(parent)
if notify:
parent.RemoveChild(self, notify=False)
def GetParents(self):
return tuple(self._parents)
def Destroy(self, notify=True, propagateDown=False, propagateUp=False):
"""
>>> p1 = VLibNode()
>>> p2 = VLibNode()
>>> c1 = VLibNode()
>>> c2 = VLibNode()
>>> p1.AddChild(c1)
>>> p2.AddChild(c1)
>>> p2.AddChild(c2)
>>> len(c1.GetParents())
2
>>> len(c2.GetParents())
1
>>> len(p1.GetChildren())
1
>>> len(p2.GetChildren())
2
>>> c1.Destroy(propagateUp=True)
>>> len(p2.GetChildren())
0
>>> len(c1.GetParents())
0
>>> len(c2.GetParents())
0
"""
if hasattr(self, '_destroyed'):
return
self._destroyed = True
if notify:
for o in self.GetChildren():
o.RemoveParent(self, notify=False)
if propagateDown:
o.Destroy(notify=True, propagateDown=True, propagateUp=propagateUp)
for o in self.GetParents():
o.RemoveChild(self, notify=False)
if propagateUp:
o.Destroy(notify=True, propagateDown=propagateDown, propagateUp=True)
self._children = []
self._parents = []
if six.PY3:
VLibNode.__next__ = VLibNode.next
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
| rvianello/rdkit | rdkit/VLib/Node.py | Python | bsd-3-clause | 4,231 | [
"RDKit"
] | 075a7f2eae2e4202c80a770e428024b7b3816c36ac45bc744f4c3dda21e8e653 |
# -*- coding: utf-8 -*-
"""
iCSD testing suite
"""
import os
import numpy as np
import numpy.testing as nt
import quantities as pq
import scipy.integrate as si
from scipy.interpolate import interp1d
from elephant.current_source_density import icsd
import unittest
# patch quantities with the SI unit Siemens if it does not exist
for symbol, prefix, definition, u_symbol in zip(
['siemens', 'S', 'mS', 'uS', 'nS', 'pS'],
['', '', 'milli', 'micro', 'nano', 'pico'],
[pq.A / pq.V, pq.A / pq.V, 'S', 'mS', 'uS', 'nS'],
[None, None, None, None, u'µS', None]):
if isinstance(definition, str):
definition = lastdefinition / 1000
if not hasattr(pq, symbol):
setattr(pq, symbol, pq.UnitQuantity(
prefix + 'siemens',
definition,
symbol=symbol,
u_symbol=u_symbol))
lastdefinition = definition
def potential_of_plane(z_j, z_i=0. * pq.m,
C_i=1 * pq.A / pq.m**2,
sigma=0.3 * pq.S / pq.m):
"""
Return potential of infinite horizontal plane with constant
current source density at a vertical offset z_j.
Arguments
---------
z_j : float*pq.m
distance perpendicular to source layer
z_i : float*pq.m
z-position of source layer
C_i : float*pq.A/pq.m**2
current source density on circular disk in units of charge per area
sigma : float*pq.S/pq.m
conductivity of medium in units of S/m
Notes
-----
The potential is 0 at the plane, as the potential goes to infinity for
large distances
"""
try:
assert(z_j.units == z_i.units)
except AssertionError as ae:
print('units of z_j ({}) and z_i ({}) not equal'.format(z_j.units,
z_i.units))
raise ae
return -C_i / (2 * sigma) * abs(z_j - z_i).simplified
def potential_of_disk(z_j,
z_i=0. * pq.m,
C_i=1 * pq.A / pq.m**2,
R_i=1E-3 * pq.m,
sigma=0.3 * pq.S / pq.m):
"""
Return potential of circular disk in horizontal plane with constant
current source density at a vertical offset z_j.
Arguments
---------
z_j : float*pq.m
distance perpendicular to center of disk
z_i : float*pq.m
z_j-position of source disk
C_i : float*pq.A/pq.m**2
current source density on circular disk in units of charge per area
R_i : float*pq.m
radius of disk source
sigma : float*pq.S/pq.m
conductivity of medium in units of S/m
"""
try:
assert(z_j.units == z_i.units == R_i.units)
except AssertionError as ae:
print('units of z_j ({}), z_i ({}) and R_i ({}) not equal'.format(
z_j.units, z_i.units, R_i.units))
raise ae
return C_i / (2 * sigma) * (
np.sqrt((z_j - z_i) ** 2 + R_i**2) - abs(z_j - z_i)).simplified
def potential_of_cylinder(z_j,
z_i=0. * pq.m,
C_i=1 * pq.A / pq.m**3,
R_i=1E-3 * pq.m,
h_i=0.1 * pq.m,
sigma=0.3 * pq.S / pq.m,
):
"""
Return potential of cylinder in horizontal plane with constant homogeneous
current source density at a vertical offset z_j.
Arguments
---------
z_j : float*pq.m
distance perpendicular to center of disk
z_i : float*pq.m
z-position of center of source cylinder
h_i : float*pq.m
thickness of cylinder
C_i : float*pq.A/pq.m**3
current source density on circular disk in units of charge per area
R_i : float*pq.m
radius of disk source
sigma : float*pq.S/pq.m
conductivity of medium in units of S/m
Notes
-----
Sympy can't deal with eq. 11 in Pettersen et al 2006, J neurosci Meth,
so we numerically evaluate it in this function.
Tested with
>>>from sympy import *
>>>C_i, z_i, h, z_j, z_j, sigma, R = symbols('C_i z_i h z z_j sigma R')
>>>C_i*integrate(1/(2*sigma)*(sqrt((z-z_j)**2 + R**2) -
... abs(z-z_j)), (z, z_i-h/2, z_i+h/2))
"""
try:
assert(z_j.units == z_i.units == R_i.units == h_i.units)
except AssertionError as ae:
print('units of z_j ({}), z_i ({}), R_i ({}) and h ({}) not equal'
.format(z_j.units, z_i.units, R_i.units, h_i.units))
raise ae
# speed up tests by stripping units
_sigma = float(sigma)
_R_i = float(R_i)
_z_i = float(z_i)
_z_j = float(z_j)
# evaluate integrand using quad
def integrand(z):
return 1 / (2 * _sigma) * \
(np.sqrt((z - _z_j)**2 + _R_i**2) - abs(z - _z_j))
phi_j, abserr = C_i * si.quad(integrand, z_i - h_i / 2, z_i + h_i / 2)
return (phi_j * z_i.units**2 / sigma.units)
def get_lfp_of_planes(z_j=np.arange(21) * 1E-4 * pq.m,
z_i=np.array([8E-4, 10E-4, 12E-4]) * pq.m,
C_i=np.array([-.5, 1., -.5]) * pq.A / pq.m**2,
sigma=0.3 * pq.S / pq.m,
plot=True):
"""
Compute the lfp of spatially separated planes with given current source
density
"""
phi_j = np.zeros(z_j.size) * pq.V
for i, (zi, Ci) in enumerate(zip(z_i, C_i)):
for j, zj in enumerate(z_j):
phi_j[j] += potential_of_plane(zj, zi, Ci, sigma)
# test plot
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
ax = plt.gca()
ax.plot(np.zeros(z_j.size), z_j, 'r-o')
for i, C in enumerate(C_i):
ax.plot((0, C), (z_i[i], z_i[i]), 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_ylabel('z_j ({})'.format(z_j.units))
ax.set_xlabel('C_i ({})'.format(C_i.units))
ax.set_title('planar CSD')
plt.subplot(122)
ax = plt.gca()
ax.plot(phi_j, z_j, 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_xlabel('phi_j ({})'.format(phi_j.units))
ax.set_title('LFP')
return phi_j, C_i
def get_lfp_of_disks(z_j=np.arange(21) * 1E-4 * pq.m,
z_i=np.array([8E-4, 10E-4, 12E-4]) * pq.m,
C_i=np.array([-.5, 1., -.5]) * pq.A / pq.m**2,
R_i=np.array([1, 1, 1]) * 1E-3 * pq.m,
sigma=0.3 * pq.S / pq.m,
plot=True):
"""
Compute the lfp of spatially separated disks with a given
current source density
"""
phi_j = np.zeros(z_j.size) * pq.V
for i, (zi, Ci, Ri) in enumerate(zip(z_i, C_i, R_i)):
for j, zj in enumerate(z_j):
phi_j[j] += potential_of_disk(zj, zi, Ci, Ri, sigma)
# test plot
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
ax = plt.gca()
ax.plot(np.zeros(z_j.size), z_j, 'r-o')
for i, C in enumerate(C_i):
ax.plot((0, C), (z_i[i], z_i[i]), 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_ylabel('z_j ({})'.format(z_j.units))
ax.set_xlabel('C_i ({})'.format(C_i.units))
ax.set_title('disk CSD\nR={}'.format(R_i))
plt.subplot(122)
ax = plt.gca()
ax.plot(phi_j, z_j, 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_xlabel('phi_j ({})'.format(phi_j.units))
ax.set_title('LFP')
return phi_j, C_i
def get_lfp_of_cylinders(z_j=np.arange(21) * 1E-4 * pq.m,
z_i=np.array([8E-4, 10E-4, 12E-4]) * pq.m,
C_i=np.array([-.5, 1., -.5]) * pq.A / pq.m**3,
R_i=np.array([1, 1, 1]) * 1E-3 * pq.m,
h_i=np.array([1, 1, 1]) * 1E-4 * pq.m,
sigma=0.3 * pq.S / pq.m,
plot=True):
"""
Compute the lfp of spatially separated disks with a given
current source density
"""
phi_j = np.zeros(z_j.size) * pq.V
for i, (zi, Ci, Ri, hi) in enumerate(zip(z_i, C_i, R_i, h_i)):
for j, zj in enumerate(z_j):
phi_j[j] += potential_of_cylinder(zj, zi, Ci, Ri, hi, sigma)
# test plot
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.subplot(121)
ax = plt.gca()
ax.plot(np.zeros(z_j.size), z_j, 'r-o')
ax.barh(np.asarray(z_i - h_i / 2),
np.asarray(C_i),
np.asarray(h_i), color='r')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_ylabel('z_j ({})'.format(z_j.units))
ax.set_xlabel('C_i ({})'.format(C_i.units))
ax.set_title('cylinder CSD\nR={}'.format(R_i))
plt.subplot(122)
ax = plt.gca()
ax.plot(phi_j, z_j, 'r-o')
ax.set_ylim(z_j.min(), z_j.max())
ax.set_xlabel('phi_j ({})'.format(phi_j.units))
ax.set_title('LFP')
return phi_j, C_i
class TestICSD(unittest.TestCase):
"""
Set of test functions for each CSD estimation method comparing
estimate to LFPs calculated with known ground truth CSD
"""
def test_StandardCSD_00(self):
"""test using standard SI units"""
# set some parameters for ground truth csd and csd estimates.
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# uniform conductivity
sigma = 0.3 * pq.S / pq.m
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'sigma': sigma,
'f_type': 'gaussian',
'f_order': (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StandardCSD_01(self):
"""test using non-standard SI units 1"""
# set some parameters for ground truth csd and csd estimates.
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * 1E3 * pq.A / pq.m**2
# uniform conductivity
sigma = 0.3 * pq.S / pq.m
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp': phi_j * 1E3 * pq.mV / pq.V,
'coord_electrode': z_j,
'sigma': sigma,
'f_type': 'gaussian',
'f_order': (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StandardCSD_02(self):
"""test using non-standard SI units 2"""
# set some parameters for ground truth csd and csd estimates.
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# uniform conductivity
sigma = 0.3 * pq.S / pq.m
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp': phi_j,
'coord_electrode': z_j * 1E3 * pq.mm / pq.m,
'sigma': sigma,
'f_type': 'gaussian',
'f_order': (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StandardCSD_03(self):
"""test using non-standard SI units 3"""
# set some parameters for ground truth csd and csd estimates.
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# uniform conductivity
sigma = 0.3 * pq.mS / pq.m
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_planes(z_j, z_i, C_i, sigma, plot)
std_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'sigma': sigma * 1E3 * pq.mS / pq.S,
'f_type': 'gaussian',
'f_order': (3, 1),
}
std_csd = icsd.StandardCSD(**std_input)
csd = std_csd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_00(self):
"""test using standard SI units"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'diam': R_i.mean() * 2, # source diameter
'sigma': sigma, # extracellular conductivity
'sigma_top': sigma_top, # conductivity on top of cortex
'f_type': 'gaussian', # gaussian filter
'f_order': (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_01(self):
"""test using non-standard SI units 1"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp': phi_j * 1E3 * pq.mV / pq.V,
'coord_electrode': z_j,
'diam': R_i.mean() * 2, # source diameter
'sigma': sigma, # extracellular conductivity
'sigma_top': sigma_top, # conductivity on top of cortex
'f_type': 'gaussian', # gaussian filter
'f_order': (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_02(self):
"""test using non-standard SI units 2"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp': phi_j,
'coord_electrode': z_j * 1E3 * pq.mm / pq.m,
'diam': R_i.mean() * 2 * 1E3 * pq.mm / pq.m, # source diameter
'sigma': sigma, # extracellular conductivity
'sigma_top': sigma_top, # conductivity on top of cortex
'f_type': 'gaussian', # gaussian filter
'f_order': (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_03(self):
"""test using non-standard SI units 3"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
delta_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'diam': R_i.mean() * 2, # source diameter
'sigma': sigma * 1E3 * pq.mS / pq.S, # extracellular conductivity
'sigma_top': sigma_top * 1E3 * pq.mS / pq.S, # conductivity on
# top of cortex
'f_type': 'gaussian', # gaussian filter
'f_order': (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_DeltaiCSD_04(self):
"""test non-continous z_j array"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**2
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**2
# source radius (delta, step)
R_i = np.ones(z_j.size) * 1E-3 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_disks(z_j, z_i, C_i, R_i, sigma,
plot)
inds = np.delete(np.arange(21), 5)
delta_input = {
'lfp': phi_j[inds],
'coord_electrode': z_j[inds],
'diam': R_i[inds] * 2, # source diameter
'sigma': sigma, # extracellular conductivity
'sigma_top': sigma_top, # conductivity on top of cortex
'f_type': 'gaussian', # gaussian filter
'f_order': (3, 1), # 3-point filter, sigma = 1.
}
delta_icsd = icsd.DeltaiCSD(**delta_input)
csd = delta_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i[inds], csd)
def test_StepiCSD_units_00(self):
"""test using standard SI units"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'diam': R_i.mean() * 2,
'sigma': sigma,
'sigma_top': sigma,
'h': h_i,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_01(self):
"""test using non-standard SI units 1"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp': phi_j * 1E3 * pq.mV / pq.V,
'coord_electrode': z_j,
'diam': R_i.mean() * 2,
'sigma': sigma,
'sigma_top': sigma,
'h': h_i,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_02(self):
"""test using non-standard SI units 2"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp': phi_j,
'coord_electrode': z_j * 1E3 * pq.mm / pq.m,
'diam': R_i.mean() * 2 * 1E3 * pq.mm / pq.m,
'sigma': sigma,
'sigma_top': sigma,
'h': h_i * 1E3 * pq.mm / pq.m,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_03(self):
"""test using non-standard SI units 3"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
step_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'diam': R_i.mean() * 2,
'sigma': sigma * 1E3 * pq.mS / pq.S,
'sigma_top': sigma * 1E3 * pq.mS / pq.S,
'h': h_i,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd)
def test_StepiCSD_units_04(self):
"""test non-continous z_j array"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i, C_i, R_i, h_i,
sigma, plot)
inds = np.delete(np.arange(21), 5)
step_input = {
'lfp': phi_j[inds],
'coord_electrode': z_j[inds],
'diam': R_i[inds] * 2,
'sigma': sigma,
'sigma_top': sigma,
'h': h_i[inds],
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
step_icsd = icsd.StepiCSD(**step_input)
csd = step_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i[inds], csd)
def test_SplineiCSD_00(self):
"""test using standard SI units"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# construct interpolators, spline method assume underlying source
# pattern generating LFPs that are cubic spline interpolates between
# contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(
z_i[-1]), num_steps) * z_i.units
C_i_i = f_C(np.asarray(z_i_i)) * C_i.units
R_i_i = f_R(z_i_i) * R_i.units
h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min()
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'diam': R_i * 2,
'sigma': sigma,
'sigma_top': sigma,
'num_steps': num_steps,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_01(self):
"""test using standard SI units, deep electrode coordinates"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(10, 31) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# construct interpolators, spline method assume underlying source
# pattern generating LFPs that are cubic spline interpolates between
# contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(
z_i[-1]), num_steps) * z_i.units
C_i_i = f_C(np.asarray(z_i_i)) * C_i.units
R_i_i = f_R(z_i_i) * R_i.units
h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min()
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'diam': R_i * 2,
'sigma': sigma,
'sigma_top': sigma,
'num_steps': num_steps,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_02(self):
"""test using non-standard SI units"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# construct interpolators, spline method assume underlying source
# pattern generating LFPs that are cubic spline interpolates between
# contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(
z_i[-1]), num_steps) * z_i.units
C_i_i = f_C(np.asarray(z_i_i)) * C_i.units
R_i_i = f_R(z_i_i) * R_i.units
h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min()
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp': phi_j * 1E3 * pq.mV / pq.V,
'coord_electrode': z_j,
'diam': R_i * 2,
'sigma': sigma,
'sigma_top': sigma,
'num_steps': num_steps,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_03(self):
"""test using standard SI units"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# construct interpolators, spline method assume underlying source
# pattern generating LFPs that are cubic spline interpolates between
# contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(
z_i[-1]), num_steps) * z_i.units
C_i_i = f_C(np.asarray(z_i_i)) * C_i.units
R_i_i = f_R(z_i_i) * R_i.units
h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min()
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp': phi_j,
'coord_electrode': z_j * 1E3 * pq.mm / pq.m,
'diam': R_i * 2 * 1E3 * pq.mm / pq.m,
'sigma': sigma,
'sigma_top': sigma,
'num_steps': num_steps,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
def test_SplineiCSD_04(self):
"""test using standard SI units"""
# set some parameters for ground truth csd and csd estimates., e.g.,
# we will use same source diameter as in ground truth
# contact point coordinates
z_j = np.arange(21) * 1E-4 * pq.m
# source coordinates
z_i = z_j
# current source density magnitude
C_i = np.zeros(z_i.size) * pq.A / pq.m**3
C_i[7:12:2] += np.array([-.5, 1., -.5]) * pq.A / pq.m**3
# source radius (delta, step)
R_i = np.ones(z_i.size) * 1E-3 * pq.m
# source height (cylinder)
h_i = np.ones(z_i.size) * 1E-4 * pq.m
# conductivity, use same conductivity for top layer (z_j < 0)
sigma = 0.3 * pq.S / pq.m
sigma_top = sigma
# construct interpolators, spline method assume underlying source
# pattern generating LFPs that are cubic spline interpolates between
# contacts so we generate CSD data relying on the same assumption
f_C = interp1d(z_i, C_i, kind='cubic')
f_R = interp1d(z_i, R_i)
num_steps = 201
z_i_i = np.linspace(float(z_i[0]), float(
z_i[-1]), num_steps) * z_i.units
C_i_i = f_C(np.asarray(z_i_i)) * C_i.units
R_i_i = f_R(z_i_i) * R_i.units
h_i_i = np.ones(z_i_i.size) * np.diff(z_i_i).min()
# flag for debug plots
plot = False
# get LFP and CSD at contacts
phi_j, C_i = get_lfp_of_cylinders(z_j, z_i_i, C_i_i, R_i_i, h_i_i,
sigma, plot)
spline_input = {
'lfp': phi_j,
'coord_electrode': z_j,
'diam': R_i * 2,
'sigma': sigma * 1E3 * pq.mS / pq.S,
'sigma_top': sigma * 1E3 * pq.mS / pq.S,
'num_steps': num_steps,
'tol': 1E-12, # Tolerance in numerical integration
'f_type': 'gaussian',
'f_order': (3, 1),
}
spline_icsd = icsd.SplineiCSD(**spline_input)
csd = spline_icsd.get_csd()
self.assertEqual(C_i.units, csd.units)
nt.assert_array_almost_equal(C_i, csd, decimal=3)
if __name__ == "__main__":
unittest.main(verbosity=2)
| INM-6/elephant | elephant/test/test_icsd.py | Python | bsd-3-clause | 40,049 | [
"Gaussian"
] | be1c9e398356cabbd3ed07746fee508e65ccd61ead40531c3e842ed76349ad3c |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides so-called "strategies" to determine the coordination environments of an atom in a structure.
Some strategies can favour larger or smaller environments. Some strategies uniquely identifies the environments while
some others can identify the environment as a "mix" of several environments, each of which is assigned with a given
fraction. The choice of the strategy depends on the purpose of the user.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import abc
import os
from monty.json import MSONable
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.operations import SymmOp
from pymatgen.core.sites import PeriodicSite
import numpy as np
from scipy.stats import gmean
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import UNCLEAR_ENVIRONMENT_SYMBOL
from pymatgen.analysis.chemenv.utils.coordination_geometry_utils import get_lower_and_upper_f
from pymatgen.analysis.chemenv.utils.func_utils import CSMFiniteRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import CSMInfiniteRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import DeltaCSMRatioFunction
from pymatgen.analysis.chemenv.utils.func_utils import RatioFunction
from pymatgen.analysis.chemenv.utils.chemenv_errors import EquivalentSiteSearchError
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from pymatgen.analysis.chemenv.utils.defs_utils import AdditionalConditions
from pymatgen.analysis.chemenv.coordination_environments.voronoi import DetailedVoronoiContainer
from collections import OrderedDict
module_dir = os.path.dirname(os.path.abspath(__file__))
MPSYMBOL_TO_CN = AllCoordinationGeometries().get_symbol_cn_mapping()
ALLCG = AllCoordinationGeometries()
class StrategyOption(MSONable, metaclass=abc.ABCMeta):
allowed_values = None
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of this strategy option.
"""
pass
class DistanceCutoffFloat(float, StrategyOption):
allowed_values = 'Real number between 1.0 and +infinity'
def __new__(cls, myfloat):
flt = float.__new__(cls, myfloat)
if flt < 1.0:
raise ValueError("Distance cutoff should be between 1.0 and +infinity")
return flt
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class AngleCutoffFloat(float, StrategyOption):
allowed_values = 'Real number between 0.0 and 1.0'
def __new__(cls, myfloat):
flt = float.__new__(cls, myfloat)
if flt < 0.0 or flt > 1.0:
raise ValueError("Angle cutoff should be between 0.0 and 1.0")
return flt
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class CSMFloat(float, StrategyOption):
allowed_values = 'Real number between 0.0 and 100.0'
def __new__(cls, myfloat):
flt = float.__new__(cls, myfloat)
if flt < 0.0 or flt > 100.0:
raise ValueError("Continuous symmetry measure limits should be between 0.0 and 100.0")
return flt
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class AdditionalConditionInt(int, StrategyOption):
allowed_values = 'Integer amongst :\n'
for integer, description in AdditionalConditions.CONDITION_DESCRIPTION.items():
allowed_values += ' - {:d} for "{}"\n'.format(integer, description)
def __new__(cls, integer):
if str(int(integer)) != str(integer):
raise ValueError("Additional condition {} is not an integer".format(str(integer)))
intger = int.__new__(cls, integer)
if intger not in AdditionalConditions.ALL:
raise ValueError("Additional condition {:d} is not allowed".format(integer))
return intger
def as_dict(self):
return {'@module': self.__class__.__module__,
'@class': self.__class__.__name__,
'value': self}
@classmethod
def from_dict(cls, d):
return cls(d['value'])
class AbstractChemenvStrategy(MSONable, metaclass=abc.ABCMeta):
"""
Class used to define a Chemenv strategy for the neighbors and coordination environment to be applied to a
StructureEnvironments object
"""
AC = AdditionalConditions()
STRATEGY_OPTIONS = OrderedDict()
STRATEGY_DESCRIPTION = None
STRATEGY_INFO_FIELDS = []
DEFAULT_SYMMETRY_MEASURE_TYPE = 'csm_wcs_ctwcc'
def __init__(self, structure_environments=None, symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE):
"""
Abstract constructor for the all chemenv strategies.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
self.structure_environments = None
if structure_environments is not None:
self.set_structure_environments(structure_environments)
self._symmetry_measure_type = symmetry_measure_type
@property
def symmetry_measure_type(self):
return self._symmetry_measure_type
def set_structure_environments(self, structure_environments):
self.structure_environments = structure_environments
if not isinstance(self.structure_environments.voronoi, DetailedVoronoiContainer):
raise ValueError('Voronoi Container not of type "DetailedVoronoiContainer"')
self.prepare_symmetries()
def prepare_symmetries(self):
try:
self.spg_analyzer = SpacegroupAnalyzer(self.structure_environments.structure)
self.symops = self.spg_analyzer.get_symmetry_operations()
except:
self.symops = []
def equivalent_site_index_and_transform(self, psite):
# Get the index of the site in the unit cell of which the PeriodicSite psite is a replica.
try:
isite = self.structure_environments.structure.index(psite)
except ValueError:
try:
uc_psite = psite.to_unit_cell()
isite = self.structure_environments.structure.index(uc_psite)
except ValueError:
for isite2, site2 in enumerate(self.structure_environments.structure):
if psite.is_periodic_image(site2):
isite = isite2
break
# Get the translation between psite and its corresponding site in the unit cell (Translation I)
thissite = self.structure_environments.structure[isite]
dthissite = psite.frac_coords - thissite.frac_coords
# Get the translation between the equivalent site for which the neighbors have been computed and the site in
# the unit cell that corresponds to psite (Translation II)
equivsite = self.structure_environments.structure[self.structure_environments.sites_map[isite]].to_unit_cell()
#equivsite = self.structure_environments.structure[self.structure_environments.sites_map[isite]]
dequivsite = (self.structure_environments.structure[self.structure_environments.sites_map[isite]].frac_coords
- equivsite.frac_coords)
found = False
# Find the symmetry that applies the site in the unit cell to the equivalent site, as well as the translation
# that gets back the site to the unit cell (Translation III)
#TODO: check that these tolerances are needed, now that the structures are refined before analyzing environments
tolerances = [1e-8, 1e-7, 1e-6, 1e-5, 1e-4]
for tolerance in tolerances:
for symop in self.symops:
newsite = PeriodicSite(equivsite._species, symop.operate(equivsite.frac_coords), equivsite._lattice)
if newsite.is_periodic_image(thissite, tolerance=tolerance):
mysym = symop
dthissite2 = thissite.frac_coords - newsite.frac_coords
found = True
break
if not found:
symops = [SymmOp.from_rotation_and_translation()]
for symop in symops:
newsite = PeriodicSite(equivsite._species, symop.operate(equivsite.frac_coords), equivsite._lattice)
#if newsite.is_periodic_image(thissite):
if newsite.is_periodic_image(thissite, tolerance=tolerance):
mysym = symop
dthissite2 = thissite.frac_coords - newsite.frac_coords
found = True
break
if found:
break
if not found:
raise EquivalentSiteSearchError(psite)
return [self.structure_environments.sites_map[isite], dequivsite, dthissite + dthissite2, mysym]
@abc.abstractmethod
def get_site_neighbors(self, site):
"""
Applies the strategy to the structure_environments object in order to get the neighbors of a given site.
:param site: Site for which the neighbors are looked for
:param structure_environments: StructureEnvironments object containing all the information needed to get the
neighbors of the site
:return: The list of neighbors of the site. For complex strategies, where one allows multiple solutions, this
can return a list of list of neighbors
"""
raise NotImplementedError()
@property
def uniquely_determines_coordination_environments(self):
"""
Returns True if the strategy leads to a unique coordination environment, False otherwise.
:return: True if the strategy leads to a unique coordination environment, False otherwise.
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environment(self, site):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environments(self, site):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
@abc.abstractmethod
def get_site_coordination_environments_fractions(self, site, isite=None, dequivsite=None, dthissite=None,
mysym=None, ordered=True, min_fraction=0.0, return_maps=True,
return_strategy_dict_info=False):
"""
Applies the strategy to the structure_environments object in order to define the coordination environment of
a given site.
:param site: Site for which the coordination environment is looked for
:return: The coordination environment of the site. For complex strategies, where one allows multiple
solutions, this can return a list of coordination environments for the site
"""
raise NotImplementedError()
def get_site_ce_fractions_and_neighbors(self, site, full_ce_info=False, strategy_info=False):
"""
Applies the strategy to the structure_environments object in order to get coordination environments, their
fraction, csm, geometry_info, and neighbors
:param site: Site for which the above information is seeked
:return: The list of neighbors of the site. For complex strategies, where one allows multiple solutions, this
can return a list of list of neighbors
"""
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
geoms_and_maps_list = self.get_site_coordination_environments_fractions(site=site, isite=isite,
dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym,
return_maps=True,
return_strategy_dict_info=True)
if geoms_and_maps_list is None:
return None
site_nbs_sets = self.structure_environments.neighbors_sets[isite]
ce_and_neighbors = []
for fractions_dict in geoms_and_maps_list:
ce_map = fractions_dict['ce_map']
ce_nb_set = site_nbs_sets[ce_map[0]][ce_map[1]]
neighbors = [{'site': nb_site_and_index['site'],
'index': nb_site_and_index['index']}
for nb_site_and_index in ce_nb_set.neighb_sites_and_indices]
fractions_dict['neighbors'] = neighbors
ce_and_neighbors.append(fractions_dict)
return ce_and_neighbors
def set_option(self, option_name, option_value):
self.__setattr__(option_name, option_value)
def setup_options(self, all_options_dict):
for option_name, option_value in all_options_dict.items():
self.set_option(option_name, option_value)
@abc.abstractmethod
def __eq__(self, other):
"""
Equality method that should be implemented for any strategy
:param other: strategy to be compared with the current one
:return:
"""
raise NotImplementedError()
def __str__(self):
out = ' Chemenv Strategy "{}"\n'.format(self.__class__.__name__)
out += ' {}\n\n'.format('='*(19+len(self.__class__.__name__)))
out += ' Description :\n {}\n'.format('-'*13)
out += self.STRATEGY_DESCRIPTION
out += '\n\n'
out += ' Options :\n {}\n'.format('-'*9)
for option_name, option_dict in self.STRATEGY_OPTIONS.items():
out += ' - {} : {}\n'.format(option_name, str(getattr(self, option_name)))
return out
@abc.abstractmethod
def as_dict(self):
"""
Bson-serializable dict representation of the SimplestChemenvStrategy object.
:return: Bson-serializable dict representation of the SimplestChemenvStrategy object.
"""
raise NotImplementedError()
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimpleAbundanceChemenvStrategy object from a dict representation of the
SimpleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the SimpleAbundanceChemenvStrategy object
:return: StructureEnvironments object
"""
raise NotImplementedError()
class SimplestChemenvStrategy(AbstractChemenvStrategy):
"""
Simplest ChemenvStrategy using fixed angle and distance parameters for the definition of neighbors in the
Voronoi approach. The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
# Default values for the distance and angle cutoffs
DEFAULT_DISTANCE_CUTOFF = 1.4
DEFAULT_ANGLE_CUTOFF = 0.3
DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF = 10.0
DEFAULT_ADDITIONAL_CONDITION = AbstractChemenvStrategy.AC.ONLY_ACB
STRATEGY_OPTIONS = OrderedDict()
STRATEGY_OPTIONS['distance_cutoff'] = {'type': DistanceCutoffFloat, 'internal': '_distance_cutoff',
'default': DEFAULT_DISTANCE_CUTOFF}
STRATEGY_OPTIONS['angle_cutoff'] = {'type': AngleCutoffFloat, 'internal': '_angle_cutoff',
'default': DEFAULT_ANGLE_CUTOFF}
STRATEGY_OPTIONS['additional_condition'] = {'type': AdditionalConditionInt,
'internal': '_additional_condition',
'default': DEFAULT_ADDITIONAL_CONDITION}
STRATEGY_OPTIONS['continuous_symmetry_measure_cutoff'] = {'type': CSMFloat,
'internal': '_continuous_symmetry_measure_cutoff',
'default': DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF}
STRATEGY_DESCRIPTION = ' Simplest ChemenvStrategy using fixed angle and distance parameters \n' \
' for the definition of neighbors in the Voronoi approach. \n' \
' The coordination environment is then given as the one with the \n' \
' lowest continuous symmetry measure.'
def __init__(self, structure_environments=None, distance_cutoff=DEFAULT_DISTANCE_CUTOFF,
angle_cutoff=DEFAULT_ANGLE_CUTOFF, additional_condition=DEFAULT_ADDITIONAL_CONDITION,
continuous_symmetry_measure_cutoff=DEFAULT_CONTINUOUS_SYMMETRY_MEASURE_CUTOFF,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE):
"""
Constructor for this SimplestChemenvStrategy.
:param distance_cutoff: Distance cutoff used
:param angle_cutoff: Angle cutoff used
"""
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self.distance_cutoff = distance_cutoff
self.angle_cutoff = angle_cutoff
self.additional_condition = additional_condition
self.continuous_symmetry_measure_cutoff = continuous_symmetry_measure_cutoff
@property
def uniquely_determines_coordination_environments(self):
return True
@property
def distance_cutoff(self):
return self._distance_cutoff
@distance_cutoff.setter
def distance_cutoff(self, distance_cutoff):
self._distance_cutoff = DistanceCutoffFloat(distance_cutoff)
@property
def angle_cutoff(self):
return self._angle_cutoff
@angle_cutoff.setter
def angle_cutoff(self, angle_cutoff):
self._angle_cutoff = AngleCutoffFloat(angle_cutoff)
@property
def additional_condition(self):
return self._additional_condition
@additional_condition.setter
def additional_condition(self, additional_condition):
self._additional_condition = AdditionalConditionInt(additional_condition)
@property
def continuous_symmetry_measure_cutoff(self):
return self._continuous_symmetry_measure_cutoff
@continuous_symmetry_measure_cutoff.setter
def continuous_symmetry_measure_cutoff(self, continuous_symmetry_measure_cutoff):
self._continuous_symmetry_measure_cutoff = CSMFloat(continuous_symmetry_measure_cutoff)
def get_site_neighbors(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None):#, neighbors_map=None):
#if neighbors_map is not None:
# return self.structure_environments.voronoi.get_neighbors(isite=isite, neighbors_map=neighbors_map)
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
ce, cn_map = self.get_site_coordination_environment(site=site, isite=isite,
dequivsite=dequivsite, dthissite=dthissite, mysym=mysym,
return_map=True)
nb_set = self.structure_environments.neighbors_sets[isite][cn_map[0]][cn_map[1]]
eqsite_ps = nb_set.neighb_sites
coordinated_neighbors = []
for ips, ps in enumerate(eqsite_ps):
coords = mysym.operate(ps.frac_coords + dequivsite) + dthissite
ps_site = PeriodicSite(ps._species, coords, ps._lattice)
coordinated_neighbors.append(ps_site)
return coordinated_neighbors
def get_site_coordination_environment(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_map=False):
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
neighbors_normalized_distances = self.structure_environments.voronoi.neighbors_normalized_distances[isite]
neighbors_normalized_angles = self.structure_environments.voronoi.neighbors_normalized_angles[isite]
idist = None
for iwd, wd in enumerate(neighbors_normalized_distances):
if self.distance_cutoff >= wd['min']:
idist = iwd
else:
break
iang = None
for iwa, wa in enumerate(neighbors_normalized_angles):
if self.angle_cutoff <= wa['max']:
iang = iwa
else:
break
if idist is None or iang is None:
raise ValueError('Distance or angle parameter not found ...')
my_cn = None
my_inb_set = None
found = False
for cn, nb_sets in self.structure_environments.neighbors_sets[isite].items():
for inb_set, nb_set in enumerate(nb_sets):
sources = [src for src in nb_set.sources
if src['origin'] == 'dist_ang_ac_voronoi' and src['ac'] == self.additional_condition]
for src in sources:
if src['idp'] == idist and src['iap'] == iang:
my_cn = cn
my_inb_set = inb_set
found = True
break
if found:
break
if found:
break
if not found:
return None
cn_map = (my_cn, my_inb_set)
ce = self.structure_environments.ce_list[self.structure_environments.sites_map[isite]][cn_map[0]][cn_map[1]]
if ce is None:
return None
coord_geoms = ce.coord_geoms
if return_map:
if coord_geoms is None:
return cn_map[0], cn_map
return (ce.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type), cn_map)
else:
if coord_geoms is None:
return cn_map[0]
return ce.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def get_site_coordination_environments_fractions(self, site, isite=None, dequivsite=None, dthissite=None,
mysym=None, ordered=True, min_fraction=0.0, return_maps=True,
return_strategy_dict_info=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
site_nb_sets = self.structure_environments.neighbors_sets[isite]
if site_nb_sets is None:
return None
ce_and_map = self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym,
return_map=True)
if ce_and_map is None:
return None
ce, ce_map = ce_and_map
if ce is None:
ce_dict = {'ce_symbol': 'UNKNOWN:{:d}'.format(ce_map[0]), 'ce_dict': None, 'ce_fraction': 1.0}
else:
ce_dict = {'ce_symbol': ce[0], 'ce_dict': ce[1], 'ce_fraction': 1.0}
if return_maps:
ce_dict['ce_map'] = ce_map
if return_strategy_dict_info:
ce_dict['strategy_info'] = {}
fractions_info_list = [ce_dict]
return fractions_info_list
def get_site_coordination_environments(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_maps=False):
return [self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym, return_map=return_maps)]
def add_strategy_visualization_to_subplot(self, subplot, visualization_options=None, plot_type=None):
subplot.plot(self._distance_cutoff, self._angle_cutoff, 'o', mec=None, mfc='w', markersize=12)
subplot.plot(self._distance_cutoff, self._angle_cutoff, 'x', linewidth=2, markersize=12)
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._distance_cutoff == other._distance_cutoff and self._angle_cutoff == other._angle_cutoff and
self._additional_condition == other._additional_condition and
self._continuous_symmetry_measure_cutoff == other._continuous_symmetry_measure_cutoff and
self.symmetry_measure_type == other.symmetry_measure_type)
def as_dict(self):
"""
Bson-serializable dict representation of the SimplestChemenvStrategy object.
:return: Bson-serializable dict representation of the SimplestChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"distance_cutoff": float(self._distance_cutoff),
"angle_cutoff": float(self._angle_cutoff),
"additional_condition": int(self._additional_condition),
"continuous_symmetry_measure_cutoff": float(self._continuous_symmetry_measure_cutoff),
"symmetry_measure_type": self._symmetry_measure_type}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object
created using the as_dict method.
:param d: dict representation of the SimplestChemenvStrategy object
:return: StructureEnvironments object
"""
return cls(distance_cutoff=d["distance_cutoff"], angle_cutoff=d["angle_cutoff"],
additional_condition=d["additional_condition"],
continuous_symmetry_measure_cutoff=d["continuous_symmetry_measure_cutoff"],
symmetry_measure_type=d["symmetry_measure_type"])
class SimpleAbundanceChemenvStrategy(AbstractChemenvStrategy):
"""
Simple ChemenvStrategy using the neighbors that are the most "abundant" in the grid of angle and distance
parameters for the definition of neighbors in the Voronoi approach.
The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
DEFAULT_MAX_DIST = 2.0
DEFAULT_ADDITIONAL_CONDITION = AbstractChemenvStrategy.AC.ONLY_ACB
STRATEGY_OPTIONS = OrderedDict()
STRATEGY_OPTIONS['additional_condition'] = {'type': AdditionalConditionInt,
'internal': '_additional_condition',
'default': DEFAULT_ADDITIONAL_CONDITION}
STRATEGY_OPTIONS['surface_calculation_type'] = {}
STRATEGY_DESCRIPTION = ' Simple Abundance ChemenvStrategy using the most "abundant" neighbors map \n' \
' for the definition of neighbors in the Voronoi approach. \n' \
' The coordination environment is then given as the one with the \n' \
' lowest continuous symmetry measure.'
def __init__(self, structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE):
"""
Constructor for the SimpleAbundanceChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
raise NotImplementedError('SimpleAbundanceChemenvStrategy not yet implemented')
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self._additional_condition = additional_condition
@property
def uniquely_determines_coordination_environments(self):
return True
def get_site_neighbors(self, site):
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
eqsite_ps = (self.structure_environments.unique_coordinated_neighbors(isite, cn_map=cn_map))
coordinated_neighbors = []
for ips, ps in enumerate(eqsite_ps):
coords = mysym.operate(ps.frac_coords + dequivsite) + dthissite
ps_site = PeriodicSite(ps._species, coords, ps._lattice)
coordinated_neighbors.append(ps_site)
return coordinated_neighbors
def get_site_coordination_environment(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_map=False):
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
if cn_map is None:
return None
coord_geoms = (self.structure_environments.
ce_list[self.structure_environments.sites_map[isite]][cn_map[0]][cn_map[1]])
if return_map:
if coord_geoms is None:
return cn_map[0], cn_map
return coord_geoms.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type), cn_map
else:
if coord_geoms is None:
return cn_map[0]
return coord_geoms.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def get_site_coordination_environments(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_maps=False):
return [self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym, return_map=return_maps)]
def _get_map(self, isite):
maps_and_surfaces = self._get_maps_surfaces(isite)
if maps_and_surfaces is None:
return None
surface_max = 0.0
imax = -1
for ii, map_and_surface in enumerate(maps_and_surfaces):
all_additional_conditions = [ac[2] for ac in map_and_surface['parameters_indices']]
if self._additional_condition in all_additional_conditions and map_and_surface['surface'] > surface_max:
surface_max = map_and_surface['surface']
imax = ii
return maps_and_surfaces[imax]['map']
def _get_maps_surfaces(self, isite, surface_calculation_type=None):
if surface_calculation_type is None:
surface_calculation_type = {'distance_parameter': ('initial_normalized', None),
'angle_parameter': ('initial_normalized', None)}
return self.structure_environments.voronoi.maps_and_surfaces(isite=isite,
surface_calculation_type=surface_calculation_type,
max_dist=self.DEFAULT_MAX_DIST)
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other.additional_condition)
def as_dict(self):
"""
Bson-serializable dict representation of the SimpleAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the SimpleAbundanceChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the SimpleAbundanceChemenvStrategy object from a dict representation of the
SimpleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the SimpleAbundanceChemenvStrategy object
:return: StructureEnvironments object
"""
return cls(additional_condition=d["additional_condition"])
class TargettedPenaltiedAbundanceChemenvStrategy(SimpleAbundanceChemenvStrategy):
"""
Simple ChemenvStrategy using the neighbors that are the most "abundant" in the grid of angle and distance
parameters for the definition of neighbors in the Voronoi approach, with a bias for a given list of target
environments. This can be useful in the case of, e.g. connectivity search of some given environment.
The coordination environment is then given as the one with the lowest continuous symmetry measure
"""
DEFAULT_TARGET_ENVIRONMENTS = ['O:6']
def __init__(self, structure_environments=None, truncate_dist_ang=True,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
max_nabundant=5, target_environments=DEFAULT_TARGET_ENVIRONMENTS, target_penalty_type='max_csm',
max_csm=5.0, symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE):
raise NotImplementedError('TargettedPenaltiedAbundanceChemenvStrategy not yet implemented')
SimpleAbundanceChemenvStrategy.__init__(self, structure_environments,
additional_condition=additional_condition,
symmetry_measure_type=symmetry_measure_type)
self.max_nabundant = max_nabundant
self.target_environments = target_environments
self.target_penalty_type = target_penalty_type
self.max_csm = max_csm
def get_site_coordination_environment(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_map=False):
if isite is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
cn_map = self._get_map(isite)
if cn_map is None:
return None
chemical_environments = (self.structure_environments.ce_list
[self.structure_environments.sites_map[isite]][cn_map[0]][cn_map[1]])
if return_map:
if chemical_environments.coord_geoms is None or len(chemical_environments) == 0:
return cn_map[0], cn_map
return chemical_environments.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type), cn_map
else:
if chemical_environments.coord_geoms is None:
return cn_map[0]
return chemical_environments.minimum_geometry(symmetry_measure_type=self._symmetry_measure_type)
def _get_map(self, isite):
maps_and_surfaces = SimpleAbundanceChemenvStrategy._get_maps_surfaces(self, isite)
if maps_and_surfaces is None:
return SimpleAbundanceChemenvStrategy._get_map(self, isite)
current_map = None
current_target_env_csm = 100.0
surfaces = [map_and_surface['surface'] for map_and_surface in maps_and_surfaces]
order = np.argsort(surfaces)[::-1]
target_cgs = [AllCoordinationGeometries().get_geometry_from_mp_symbol(mp_symbol)
for mp_symbol in self.target_environments]
target_cns = [cg.coordination_number for cg in target_cgs]
for ii in range(min([len(maps_and_surfaces), self.max_nabundant])):
my_map_and_surface = maps_and_surfaces[order[ii]]
mymap = my_map_and_surface['map']
cn = mymap[0]
if cn not in target_cns or cn > 12 or cn == 0:
continue
all_conditions = [params[2] for params in my_map_and_surface['parameters_indices']]
if self._additional_condition not in all_conditions:
continue
cg, cgdict = (self.structure_environments.ce_list
[self.structure_environments.sites_map[isite]]
[mymap[0]][mymap[1]].minimum_geometry(symmetry_measure_type=self._symmetry_measure_type))
if (cg in self.target_environments and cgdict['symmetry_measure'] <= self.max_csm and
cgdict['symmetry_measure'] < current_target_env_csm):
current_map = mymap
current_target_env_csm = cgdict['symmetry_measure']
if current_map is not None:
return current_map
else:
return SimpleAbundanceChemenvStrategy._get_map(self, isite)
@property
def uniquely_determines_coordination_environments(self):
return True
def as_dict(self):
"""
Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
:return: Bson-serializable dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"max_nabundant": self.max_nabundant,
"target_environments": self.target_environments,
"target_penalty_type": self.target_penalty_type,
"max_csm": self.max_csm}
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other.additional_condition and
self.max_nabundant == other.max_nabundant and
self.target_environments == other.target_environments and
self.target_penalty_type == other.target_penalty_type and
self.max_csm == other.max_csm)
@classmethod
def from_dict(cls, d):
"""
Reconstructs the TargettedPenaltiedAbundanceChemenvStrategy object from a dict representation of the
TargettedPenaltiedAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the TargettedPenaltiedAbundanceChemenvStrategy object
:return: TargettedPenaltiedAbundanceChemenvStrategy object
"""
return cls(additional_condition=d["additional_condition"],
max_nabundant=d["max_nabundant"],
target_environments=d["target_environments"],
target_penalty_type=d["target_penalty_type"],
max_csm=d["max_csm"])
class NbSetWeight(MSONable, metaclass=abc.ABCMeta):
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of this neighbors set weight.
"""
pass
@abc.abstractmethod
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
pass
class AngleNbSetWeight(NbSetWeight):
SHORT_NAME = 'AngleWeight'
def __init__(self, aa=1.0):
self.aa = aa
if self.aa == 1.0:
self.aw = self.angle_sum
else:
self.aw = self.angle_sumn
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.aw(nb_set=nb_set)
def angle_sum(self, nb_set):
return np.sum(nb_set.angles) / (4.0 * np.pi)
def angle_sumn(self, nb_set):
return np.power(self.angle_sum(nb_set=nb_set), self.aa)
def __eq__(self, other):
return self.aa == other.aa
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"aa": self.aa
}
@classmethod
def from_dict(cls, dd):
return cls(aa=dd['aa'])
class NormalizedAngleDistanceNbSetWeight(NbSetWeight):
SHORT_NAME = 'NormAngleDistWeight'
def __init__(self, average_type, aa, bb):
self.average_type = average_type
if self.average_type == 'geometric':
self.eval = self.gweight
elif self.average_type == 'arithmetic':
self.eval = self.aweight
else:
raise ValueError('Average type is "{}" while it should be '
'"geometric" or "arithmetic"'.format(average_type))
self.aa = aa
self.bb = bb
if self.aa == 0:
if self.bb == 1:
self.fda = self.invdist
elif self.bb == 0:
raise ValueError('Both exponents are 0.')
else:
self.fda = self.invndist
elif self.bb == 0:
if self.aa == 1:
self.fda = self.ang
else:
self.fda = self.angn
else:
if self.aa == 1:
if self.bb == 1:
self.fda = self.anginvdist
else:
self.fda = self.anginvndist
else:
if self.bb == 1:
self.fda = self.angninvdist
else:
self.fda = self.angninvndist
def __eq__(self, other):
return self.average_type == other.average_type and self.aa == other.aa and self.bb == other.bb
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"average_type": self.average_type,
"aa": self.aa,
"bb": self.bb
}
@classmethod
def from_dict(cls, dd):
return cls(average_type=dd['average_type'], aa=dd['aa'], bb=dd['bb'])
def invdist(self, nb_set):
return [1.0 / dist for dist in nb_set.normalized_distances]
def invndist(self, nb_set):
return [1.0 / dist**self.bb for dist in nb_set.normalized_distances]
def ang(self, nb_set):
return nb_set.normalized_angles
def angn(self, nb_set):
return [ang**self.aa for ang in nb_set.normalized_angles]
def anginvdist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii] / dist for ii, dist in enumerate(nb_set.normalized_distances)]
def anginvndist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii] / dist**self.bb for ii, dist in enumerate(nb_set.normalized_distances)]
def angninvdist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii]**self.aa / dist for ii, dist in enumerate(nb_set.normalized_distances)]
def angninvndist(self, nb_set):
nangles = nb_set.normalized_angles
return [nangles[ii]**self.aa / dist**self.bb for ii, dist in enumerate(nb_set.normalized_distances)]
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
fda_list = self.fda(nb_set=nb_set)
return self.eval(fda_list=fda_list)
def gweight(self, fda_list):
return gmean(fda_list)
def aweight(self, fda_list):
return np.mean(fda_list)
def get_effective_csm(nb_set, cn_map, structure_environments, additional_info,
symmetry_measure_type, max_effective_csm, effective_csm_estimator_ratio_function):
try:
effective_csm = additional_info['effective_csms'][nb_set.isite][cn_map]
except KeyError:
site_ce_list = structure_environments.ce_list[nb_set.isite]
site_chemenv = site_ce_list[cn_map[0]][cn_map[1]]
if site_chemenv is None:
effective_csm = 100.0
else:
mingeoms = site_chemenv.minimum_geometries(symmetry_measure_type=symmetry_measure_type,
max_csm=max_effective_csm)
if len(mingeoms) == 0:
effective_csm = 100.0
else:
csms = [ce_dict['other_symmetry_measures'][symmetry_measure_type] for mp_symbol, ce_dict in mingeoms
if ce_dict['other_symmetry_measures'][symmetry_measure_type] <= max_effective_csm]
effective_csm = effective_csm_estimator_ratio_function.mean_estimator(csms)
set_info(additional_info=additional_info, field='effective_csms',
isite=nb_set.isite, cn_map=cn_map, value=effective_csm)
return effective_csm
def set_info(additional_info, field, isite, cn_map, value):
try:
additional_info[field][isite][cn_map] = value
except KeyError:
try:
additional_info[field][isite] = {cn_map: value}
except KeyError:
additional_info[field] = {isite: {cn_map: value}}
class SelfCSMNbSetWeight(NbSetWeight):
SHORT_NAME = 'SelfCSMWeight'
DEFAULT_EFFECTIVE_CSM_ESTIMATOR = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
DEFAULT_WEIGHT_ESTIMATOR = {'function': 'power2_decreasing_exp',
'options': {'max_csm': 8.0,
'alpha': 1.0}}
DEFAULT_SYMMETRY_MEASURE_TYPE = 'csm_wcs_ctwcc'
def __init__(self, effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR,
weight_estimator=DEFAULT_WEIGHT_ESTIMATOR,
symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE):
self.effective_csm_estimator = effective_csm_estimator
self.effective_csm_estimator_rf = CSMInfiniteRatioFunction.from_dict(effective_csm_estimator)
self.weight_estimator = weight_estimator
self.weight_estimator_rf = CSMFiniteRatioFunction.from_dict(weight_estimator)
self.symmetry_measure_type = symmetry_measure_type
self.max_effective_csm = self.effective_csm_estimator['options']['max_csm']
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
effective_csm = get_effective_csm(nb_set=nb_set, cn_map=cn_map,
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf)
weight = self.weight_estimator_rf.evaluate(effective_csm)
set_info(additional_info=additional_info, field='self_csms_weights', isite=nb_set.isite,
cn_map=cn_map, value=weight)
return weight
def __eq__(self, other):
return (self.effective_csm_estimator == other.effective_csm_estimator and
self.weight_estimator == other.weight_estimator and
self.symmetry_measure_type == other.symmetry_measure_type)
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"effective_csm_estimator": self.effective_csm_estimator,
"weight_estimator": self.weight_estimator,
"symmetry_measure_type": self.symmetry_measure_type
}
@classmethod
def from_dict(cls, dd):
return cls(effective_csm_estimator=dd['effective_csm_estimator'],
weight_estimator=dd['weight_estimator'],
symmetry_measure_type=dd['symmetry_measure_type'])
class DeltaCSMNbSetWeight(NbSetWeight):
SHORT_NAME = 'DeltaCSMWeight'
DEFAULT_EFFECTIVE_CSM_ESTIMATOR = {'function': 'power2_inverse_decreasing',
'options': {'max_csm': 8.0}}
DEFAULT_SYMMETRY_MEASURE_TYPE = 'csm_wcs_ctwcc'
DEFAULT_WEIGHT_ESTIMATOR = {'function': 'smootherstep',
'options': {'delta_csm_min': 0.5,
'delta_csm_max': 3.0}}
def __init__(self, effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR,
weight_estimator=DEFAULT_WEIGHT_ESTIMATOR,
delta_cn_weight_estimators=None,
symmetry_measure_type=DEFAULT_SYMMETRY_MEASURE_TYPE):
self.effective_csm_estimator = effective_csm_estimator
self.effective_csm_estimator_rf = CSMInfiniteRatioFunction.from_dict(effective_csm_estimator)
self.weight_estimator = weight_estimator
if self.weight_estimator is not None:
self.weight_estimator_rf = DeltaCSMRatioFunction.from_dict(weight_estimator)
self.delta_cn_weight_estimators = delta_cn_weight_estimators
self.delta_cn_weight_estimators_rfs = {}
if delta_cn_weight_estimators is not None:
for delta_cn, dcn_w_estimator in delta_cn_weight_estimators.items():
self.delta_cn_weight_estimators_rfs[delta_cn] = DeltaCSMRatioFunction.from_dict(dcn_w_estimator)
self.symmetry_measure_type = symmetry_measure_type
self.max_effective_csm = self.effective_csm_estimator['options']['max_csm']
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
effcsm = get_effective_csm(nb_set=nb_set, cn_map=cn_map,
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf)
cn = cn_map[0]
inb_set = cn_map[1]
isite = nb_set.isite
delta_csm = None
delta_csm_cn_map2 = None
nb_set_weight = 1.0
for cn2, nb_sets in structure_environments.neighbors_sets[isite].items():
if cn2 < cn:
continue
for inb_set2, nb_set2 in enumerate(nb_sets):
if cn == cn2 and inb_set == inb_set:
continue
effcsm2 = get_effective_csm(nb_set=nb_set2, cn_map=(cn2, inb_set2),
structure_environments=structure_environments,
additional_info=additional_info,
symmetry_measure_type=self.symmetry_measure_type,
max_effective_csm=self.max_effective_csm,
effective_csm_estimator_ratio_function=self.effective_csm_estimator_rf)
this_delta_csm = effcsm2 - effcsm
if cn2 == cn:
if this_delta_csm < 0.0:
set_info(additional_info=additional_info, field='delta_csms', isite=isite,
cn_map=cn_map, value=this_delta_csm)
set_info(additional_info=additional_info, field='delta_csms_weights', isite=isite,
cn_map=cn_map, value=0.0)
set_info(additional_info=additional_info, field='delta_csms_cn_map2', isite=isite,
cn_map=cn_map, value=(cn2, inb_set2))
return 0.0
else:
dcn = cn2 - cn
if dcn in self.delta_cn_weight_estimators_rfs:
this_delta_csm_weight = self.delta_cn_weight_estimators_rfs[dcn].evaluate(this_delta_csm)
else:
this_delta_csm_weight = self.weight_estimator_rf.evaluate(this_delta_csm)
if this_delta_csm_weight < nb_set_weight:
delta_csm = this_delta_csm
delta_csm_cn_map2 = (cn2, inb_set2)
nb_set_weight = this_delta_csm_weight
set_info(additional_info=additional_info, field='delta_csms', isite=isite,
cn_map=cn_map, value=delta_csm)
set_info(additional_info=additional_info, field='delta_csms_weights', isite=isite,
cn_map=cn_map, value=nb_set_weight)
set_info(additional_info=additional_info, field='delta_csms_cn_map2', isite=isite,
cn_map=cn_map, value=delta_csm_cn_map2)
return nb_set_weight
def __eq__(self, other):
return (self.effective_csm_estimator == other.effective_csm_estimator and
self.weight_estimator == other.weight_estimator and
self.delta_cn_weight_estimators == other.delta_cn_weight_estimators and
self.symmetry_measure_type == other.symmetry_measure_type)
def __ne__(self, other):
return not self == other
@classmethod
def delta_cn_specifics(cls, delta_csm_mins=None, delta_csm_maxs=None, function='smootherstep',
symmetry_measure_type='csm_wcs_ctwcc',
effective_csm_estimator=DEFAULT_EFFECTIVE_CSM_ESTIMATOR):
if delta_csm_mins is None or delta_csm_maxs is None:
delta_cn_weight_estimators = {dcn: {'function': function,
'options': {'delta_csm_min': 0.25+dcn*0.25,
'delta_csm_max': 5.0+dcn*0.25}} for dcn in range(1, 13)}
else:
delta_cn_weight_estimators = {dcn: {'function': function,
'options': {'delta_csm_min': delta_csm_mins[dcn-1],
'delta_csm_max': delta_csm_maxs[dcn-1]}}
for dcn in range(1, 13)}
return cls(effective_csm_estimator=effective_csm_estimator,
weight_estimator={'function': function,
'options': {'delta_csm_min': delta_cn_weight_estimators[12]
['options']['delta_csm_min'],
'delta_csm_max': delta_cn_weight_estimators[12]
['options']['delta_csm_max']}},
delta_cn_weight_estimators=delta_cn_weight_estimators,
symmetry_measure_type=symmetry_measure_type)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"effective_csm_estimator": self.effective_csm_estimator,
"weight_estimator": self.weight_estimator,
"delta_cn_weight_estimators": self.delta_cn_weight_estimators,
"symmetry_measure_type": self.symmetry_measure_type
}
@classmethod
def from_dict(cls, dd):
return cls(effective_csm_estimator=dd['effective_csm_estimator'],
weight_estimator=dd['weight_estimator'],
delta_cn_weight_estimators={int(dcn): dcn_estimator
for dcn, dcn_estimator in dd['delta_cn_weight_estimators'].items()}
if ('delta_cn_weight_estimators' in dd and dd['delta_cn_weight_estimators'] is not None) else None,
symmetry_measure_type=dd['symmetry_measure_type'])
class CNBiasNbSetWeight(NbSetWeight):
SHORT_NAME = 'CNBiasWeight'
def __init__(self, cn_weights, initialization_options):
self.cn_weights = cn_weights
self.initialization_options = initialization_options
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.cn_weights[len(nb_set)]
def __eq__(self, other):
return (self.cn_weights == other.cn_weights and
self.initialization_options == other.initialization_options)
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"cn_weights": {str(cn): cnw for cn, cnw in self.cn_weights.items()},
"initialization_options": self.initialization_options,
}
@classmethod
def from_dict(cls, dd):
return cls(cn_weights={int(cn): cnw for cn, cnw in dd['cn_weights'].items()},
initialization_options=dd['initialization_options'])
@classmethod
def linearly_equidistant(cls, weight_cn1, weight_cn13):
initialization_options = {'type': 'linearly_equidistant',
'weight_cn1': weight_cn1,
'weight_cn13': weight_cn13
}
dw = (weight_cn13 - weight_cn1) / 12.0
cn_weights = {cn: weight_cn1 + (cn - 1) * dw for cn in range(1, 14)}
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def geometrically_equidistant(cls, weight_cn1, weight_cn13):
initialization_options = {'type': 'geometrically_equidistant',
'weight_cn1': weight_cn1,
'weight_cn13': weight_cn13
}
factor = np.power(float(weight_cn13) / weight_cn1, 1.0 / 12.0)
cn_weights = {cn: weight_cn1 * np.power(factor, cn - 1) for cn in range(1, 14)}
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def explicit(cls, cn_weights):
initialization_options = {'type': 'explicit'}
if set(cn_weights.keys()) != set(range(1, 14)):
raise ValueError('Weights should be provided for CN 1 to 13')
return cls(cn_weights=cn_weights, initialization_options=initialization_options)
@classmethod
def from_description(cls, dd):
if dd['type'] == 'linearly_equidistant':
return cls.linearly_equidistant(weight_cn1=dd['weight_cn1'], weight_cn13=dd['weight_cn13'])
elif dd['type'] == 'geometrically_equidistant':
return cls.geometrically_equidistant(weight_cn1=dd['weight_cn1'], weight_cn13=dd['weight_cn13'])
elif dd['type'] == 'explicit':
return cls.explicit(cn_weights=dd['cn_weights'])
class DistanceAngleAreaNbSetWeight(NbSetWeight):
SHORT_NAME = 'DistAngleAreaWeight'
AC = AdditionalConditions()
DEFAULT_SURFACE_DEFINITION = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.2, 'upper': 1.8},
'angle_bounds': {'lower': 0.1, 'upper': 0.8}}
def __init__(self, weight_type='has_intersection', surface_definition=DEFAULT_SURFACE_DEFINITION,
nb_sets_from_hints='fallback_to_source', other_nb_sets='0_weight',
additional_condition=AC.ONLY_ACB, smoothstep_distance=None, smoothstep_angle=None):
self.weight_type = weight_type
if weight_type == 'has_intersection':
self.area_weight = self.w_area_has_intersection
elif weight_type == 'has_intersection_smoothstep':
raise NotImplementedError()
# self.area_weight = self.w_area_has_intersection_smoothstep
else:
raise ValueError('Weight type is "{}" while it should be "has_intersection"'.format(weight_type))
self.surface_definition = surface_definition
self.nb_sets_from_hints = nb_sets_from_hints
self.other_nb_sets = other_nb_sets
self.additional_condition = additional_condition
self.smoothstep_distance = smoothstep_distance
self.smoothstep_angle = smoothstep_angle
if self.nb_sets_from_hints == 'fallback_to_source':
if self.other_nb_sets == '0_weight':
self.w_area_intersection_specific = self.w_area_intersection_nbsfh_fbs_onb0
else:
raise ValueError('Other nb_sets should be "0_weight"')
else:
raise ValueError('Nb_sets from hints should fallback to source')
lower_and_upper_functions = get_lower_and_upper_f(surface_calculation_options=surface_definition)
self.dmin = surface_definition['distance_bounds']['lower']
self.dmax = surface_definition['distance_bounds']['upper']
self.amin = surface_definition['angle_bounds']['lower']
self.amax = surface_definition['angle_bounds']['upper']
self.f_lower = lower_and_upper_functions['lower']
self.f_upper = lower_and_upper_functions['upper']
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.area_weight(nb_set=nb_set, structure_environments=structure_environments,
cn_map=cn_map, additional_info=additional_info)
def w_area_has_intersection_smoothstep(self, nb_set, structure_environments,
cn_map, additional_info):
w_area = self.w_area_intersection_specific(nb_set=nb_set, structure_environments=structure_environments,
cn_map=cn_map, additional_info=additional_info)
if w_area > 0.0:
if self.smoothstep_distance is not None:
w_area = w_area
if self.smoothstep_angle is not None:
w_area = w_area
return w_area
def w_area_has_intersection(self, nb_set, structure_environments,
cn_map, additional_info):
return self.w_area_intersection_specific(nb_set=nb_set, structure_environments=structure_environments,
cn_map=cn_map, additional_info=additional_info)
def w_area_intersection_nbsfh_fbs_onb0(self, nb_set, structure_environments,
cn_map, additional_info):
dist_ang_sources = [src for src in nb_set.sources
if src['origin'] == 'dist_ang_ac_voronoi' and src['ac'] == self.additional_condition]
if len(dist_ang_sources) > 0:
for src in dist_ang_sources:
d1 = src['dp_dict']['min']
d2 = src['dp_dict']['next']
a1 = src['ap_dict']['next']
a2 = src['ap_dict']['max']
if self.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2):
return 1.0
return 0.0
else:
from_hints_sources = [src for src in nb_set.sources if src['origin'] == 'nb_set_hints']
if len(from_hints_sources) == 0:
return 0.0
elif len(from_hints_sources) != 1:
raise ValueError('Found multiple hints sources for nb_set')
else:
cn_map_src = from_hints_sources[0]['cn_map_source']
nb_set_src = structure_environments.neighbors_sets[nb_set.isite][cn_map_src[0]][cn_map_src[1]]
dist_ang_sources = [src for src in nb_set_src.sources
if src['origin'] == 'dist_ang_ac_voronoi' and
src['ac'] == self.additional_condition]
if len(dist_ang_sources) == 0:
return 0.0
for src in dist_ang_sources:
d1 = src['dp_dict']['min']
d2 = src['dp_dict']['next']
a1 = src['ap_dict']['next']
a2 = src['ap_dict']['max']
if self.rectangle_crosses_area(d1=d1, d2=d2, a1=a1, a2=a2):
return 1.0
return 0.0
def rectangle_crosses_area(self, d1, d2, a1, a2):
# Case 1
if d1 <= self.dmin and d2 <= self.dmin:
return False
# Case 6
if d1 >= self.dmax and d2 >= self.dmax:
return False
# Case 2
if d1 <= self.dmin and d2 <= self.dmax:
ld2 = self.f_lower(d2)
if a2 <= ld2 or a1 >= self.amax:
return False
return True
# Case 3
if d1 <= self.dmin and d2 >= self.dmax:
if a2 <= self.amin or a1 >= self.amax:
return False
return True
# Case 4
if self.dmin <= d1 <= self.dmax and self.dmin <= d2 <= self.dmax:
ld1 = self.f_lower(d1)
ld2 = self.f_lower(d2)
if a2 <= ld1 and a2 <= ld2:
return False
ud1 = self.f_upper(d1)
ud2 = self.f_upper(d2)
if a1 >= ud1 and a1 >= ud2:
return False
return True
# Case 5
if self.dmin <= d1 <= self.dmax and d2 >= self.dmax:
ud1 = self.f_upper(d1)
if a1 >= ud1 or a2 <= self.amin:
return False
return True
raise ValueError('Should not reach this point!')
def __eq__(self, other):
return (self.weight_type == other.weight_type and
self.surface_definition == other.surface_definition and
self.nb_sets_from_hints == other.nb_sets_from_hints and
self.other_nb_sets == other.other_nb_sets and
self.additional_condition == other.additional_condition
)
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"weight_type": self.weight_type,
"surface_definition": self.surface_definition,
"nb_sets_from_hints": self.nb_sets_from_hints,
"other_nb_sets": self.other_nb_sets,
"additional_condition": self.additional_condition}
@classmethod
def from_dict(cls, dd):
return cls(weight_type=dd['weight_type'], surface_definition=dd['surface_definition'],
nb_sets_from_hints=dd['nb_sets_from_hints'], other_nb_sets=dd['other_nb_sets'],
additional_condition=dd['additional_condition'])
class DistancePlateauNbSetWeight(NbSetWeight):
SHORT_NAME = 'DistancePlateauWeight'
def __init__(self, distance_function=None, weight_function=None):
if distance_function is None:
self.distance_function = {'type': 'normalized_distance'}
else:
self.distance_function = distance_function
if weight_function is None:
self.weight_function = {'function': 'inverse_smootherstep', 'options': {'lower': 0.2, 'upper': 0.4}}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.weight_rf.eval(nb_set.distance_plateau())
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"distance_function": self.distance_function,
"weight_function": self.weight_function
}
@classmethod
def from_dict(cls, dd):
return cls(distance_function=dd['distance_function'], weight_function=dd['weight_function'])
class AnglePlateauNbSetWeight(NbSetWeight):
SHORT_NAME = 'AnglePlateauWeight'
def __init__(self, angle_function=None, weight_function=None):
if angle_function is None:
self.angle_function = {'type': 'normalized_angle'}
else:
self.angle_function = angle_function
if weight_function is None:
self.weight_function = {'function': 'inverse_smootherstep', 'options': {'lower': 0.05, 'upper': 0.15}}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
return self.weight_rf.eval(nb_set.angle_plateau())
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"angle_function": self.angle_function,
"weight_function": self.weight_function
}
@classmethod
def from_dict(cls, dd):
return cls(angle_function=dd['angle_function'], weight_function=dd['weight_function'])
class DistanceNbSetWeight(NbSetWeight):
SHORT_NAME = 'DistanceNbSetWeight'
def __init__(self, weight_function=None, nbs_source='voronoi'):
if weight_function is None:
self.weight_function = {'function': 'smootherstep', 'options': {'lower': 1.2, 'upper': 1.3}}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
if nbs_source not in ['nb_sets', 'voronoi']:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
self.nbs_source = nbs_source
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
cn = cn_map[0]
inb_set = cn_map[1]
isite = nb_set.isite
voronoi = structure_environments.voronoi.voronoi_list2[isite]
if self.nbs_source == 'nb_sets':
all_nbs_voro_indices = set()
for cn2, nb_sets in structure_environments.neighbors_sets[isite].items():
for inb_set2, nb_set2 in enumerate(nb_sets):
if cn == cn2 and inb_set == inb_set:
continue
all_nbs_voro_indices.update(nb_set2.site_voronoi_indices)
elif self.nbs_source == "voronoi":
all_nbs_voro_indices = set(range(len(voronoi)))
else:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
all_nbs_indices_except_nb_set = all_nbs_voro_indices.difference(nb_set.site_voronoi_indices)
normalized_distances = [voronoi[inb]['normalized_distance'] for inb in all_nbs_indices_except_nb_set]
if len(normalized_distances) == 0:
return 1.0
return self.weight_rf.eval(min(normalized_distances))
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"weight_function": self.weight_function,
"nbs_source": self.nbs_source
}
@classmethod
def from_dict(cls, dd):
return cls(weight_function=dd['weight_function'], nbs_source=dd['nbs_source'])
class DeltaDistanceNbSetWeight(NbSetWeight):
SHORT_NAME = 'DeltaDistanceNbSetWeight'
def __init__(self, weight_function=None, nbs_source='voronoi'):
if weight_function is None:
self.weight_function = {'function': 'smootherstep', 'options': {'lower': 0.1, 'upper': 0.2}}
else:
self.weight_function = weight_function
self.weight_rf = RatioFunction.from_dict(self.weight_function)
if nbs_source not in ['nb_sets', 'voronoi']:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
self.nbs_source = nbs_source
def weight(self, nb_set, structure_environments, cn_map=None, additional_info=None):
cn = cn_map[0]
inb_set = cn_map[1]
isite = nb_set.isite
voronoi = structure_environments.voronoi.voronoi_list2[isite]
if self.nbs_source == 'nb_sets':
all_nbs_voro_indices = set()
for cn2, nb_sets in structure_environments.neighbors_sets[isite].items():
for inb_set2, nb_set2 in enumerate(nb_sets):
if cn == cn2 and inb_set == inb_set:
continue
all_nbs_voro_indices.update(nb_set2.site_voronoi_indices)
elif self.nbs_source == "voronoi":
all_nbs_voro_indices = set(range(len(voronoi)))
else:
raise ValueError('"nbs_source" should be one of ["nb_sets", "voronoi"]')
all_nbs_indices_except_nb_set = all_nbs_voro_indices.difference(nb_set.site_voronoi_indices)
normalized_distances = [voronoi[inb]['normalized_distance'] for inb in all_nbs_indices_except_nb_set]
if len(normalized_distances) == 0:
return 1.0
if len(nb_set) == 0:
return 0.0
nb_set_max_normalized_distance = max(nb_set.normalized_distances)
return self.weight_rf.eval(min(normalized_distances)-nb_set_max_normalized_distance)
def __eq__(self, other):
return self.__class__ == other.__class__
def __ne__(self, other):
return not self == other
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"weight_function": self.weight_function,
"nbs_source": self.nbs_source
}
@classmethod
def from_dict(cls, dd):
return cls(weight_function=dd['weight_function'], nbs_source=dd['nbs_source'])
class WeightedNbSetChemenvStrategy(AbstractChemenvStrategy):
"""
WeightedNbSetChemenvStrategy
"""
STRATEGY_DESCRIPTION = ' WeightedNbSetChemenvStrategy'
DEFAULT_CE_ESTIMATOR = {'function': 'power2_inverse_power2_decreasing',
'options': {'max_csm': 8.0}}
def __init__(self, structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
nb_set_weights=None,
ce_estimator=DEFAULT_CE_ESTIMATOR):
"""
Constructor for the WeightedNbSetChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
AbstractChemenvStrategy.__init__(self, structure_environments, symmetry_measure_type=symmetry_measure_type)
self._additional_condition = additional_condition
if nb_set_weights is None:
raise ValueError()
self.nb_set_weights = nb_set_weights
self.ordered_weights = []
for nb_set_weight in self.nb_set_weights:
self.ordered_weights.append({'weight': nb_set_weight, 'name': nb_set_weight.SHORT_NAME})
self.ce_estimator = ce_estimator
self.ce_estimator_ratio_function = CSMInfiniteRatioFunction.from_dict(self.ce_estimator)
self.ce_estimator_fractions = self.ce_estimator_ratio_function.fractions
@property
def uniquely_determines_coordination_environments(self):
return False
def get_site_coordination_environments_fractions(self, site, isite=None, dequivsite=None, dthissite=None,
mysym=None, ordered=True, min_fraction=0.0, return_maps=True,
return_strategy_dict_info=False, return_all=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
site_nb_sets = self.structure_environments.neighbors_sets[isite]
if site_nb_sets is None:
return None
cn_maps = []
for cn, nb_sets in site_nb_sets.items():
for inb_set, nb_set in enumerate(nb_sets):
#CHECK THE ADDITIONAL CONDITION HERE ?
cn_maps.append((cn, inb_set))
weights_additional_info = {'weights': {isite: {}}}
for wdict in self.ordered_weights:
cn_maps_new = []
weight = wdict['weight']
weight_name = wdict['name']
for cn_map in cn_maps:
nb_set = site_nb_sets[cn_map[0]][cn_map[1]]
w_nb_set = weight.weight(nb_set=nb_set, structure_environments=self.structure_environments,
cn_map=cn_map, additional_info=weights_additional_info)
if cn_map not in weights_additional_info['weights'][isite]:
weights_additional_info['weights'][isite][cn_map] = {}
weights_additional_info['weights'][isite][cn_map][weight_name] = w_nb_set
if w_nb_set > 0.0:
cn_maps_new.append(cn_map)
cn_maps = cn_maps_new
for cn_map, weights in weights_additional_info['weights'][isite].items():
weights_additional_info['weights'][isite][cn_map]['Product'] = np.product(list(weights.values()))
w_nb_sets = {cn_map: weights['Product']
for cn_map, weights in weights_additional_info['weights'][isite].items()}
w_nb_sets_total = np.sum(list(w_nb_sets.values()))
nb_sets_fractions = {cn_map: w_nb_set / w_nb_sets_total for cn_map, w_nb_set in w_nb_sets.items()}
for cn_map in weights_additional_info['weights'][isite]:
weights_additional_info['weights'][isite][cn_map]['NbSetFraction'] = nb_sets_fractions[cn_map]
ce_symbols = []
ce_dicts = []
ce_fractions = []
ce_dict_fractions = []
ce_maps = []
site_ce_list = self.structure_environments.ce_list[isite]
if return_all:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
if site_ce_nb_set is None:
continue
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self.symmetry_measure_type)
if len(mingeoms) > 0:
csms = [ce_dict['other_symmetry_measures'][self.symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms]
fractions = self.ce_estimator_fractions(csms)
if fractions is None:
ce_symbols.append('UNCLEAR:{:d}'.format(cn))
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = None
dict_fractions['Fraction'] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for ifraction, fraction in enumerate(fractions):
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = fraction
dict_fractions['Fraction'] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
ce_symbols.append('UNCLEAR:{:d}'.format(cn))
ce_dicts.append(None)
ce_fractions.append(nb_set_fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = None
dict_fractions['Fraction'] = nb_set_fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
else:
for cn_map, nb_set_fraction in nb_sets_fractions.items():
if nb_set_fraction > 0.0:
cn = cn_map[0]
inb_set = cn_map[1]
site_ce_nb_set = site_ce_list[cn][inb_set]
mingeoms = site_ce_nb_set.minimum_geometries(symmetry_measure_type=self._symmetry_measure_type)
csms = [ce_dict['other_symmetry_measures'][self._symmetry_measure_type]
for ce_symbol, ce_dict in mingeoms]
fractions = self.ce_estimator_fractions(csms)
for ifraction, fraction in enumerate(fractions):
if fraction > 0.0:
ce_symbols.append(mingeoms[ifraction][0])
ce_dicts.append(mingeoms[ifraction][1])
ce_fractions.append(nb_set_fraction * fraction)
all_weights = weights_additional_info['weights'][isite][cn_map]
dict_fractions = {wname: wvalue for wname, wvalue in all_weights.items()}
dict_fractions['CEFraction'] = fraction
dict_fractions['Fraction'] = nb_set_fraction * fraction
ce_dict_fractions.append(dict_fractions)
ce_maps.append(cn_map)
if ordered:
indices = np.argsort(ce_fractions)[::-1]
else:
indices = list(range(len(ce_fractions)))
fractions_info_list = [
{'ce_symbol': ce_symbols[ii], 'ce_dict': ce_dicts[ii], 'ce_fraction': ce_fractions[ii]}
for ii in indices if ce_fractions[ii] >= min_fraction]
if return_maps:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]['ce_map'] = ce_maps[ii]
if return_strategy_dict_info:
for ifinfo, ii in enumerate(indices):
if ce_fractions[ii] >= min_fraction:
fractions_info_list[ifinfo]['strategy_info'] = ce_dict_fractions[ii]
return fractions_info_list
def get_site_coordination_environment(self, site):
pass
def get_site_neighbors(self, site):
pass
def get_site_coordination_environments(self, site, isite=None, dequivsite=None, dthissite=None, mysym=None,
return_maps=False):
if isite is None or dequivsite is None or dthissite is None or mysym is None:
[isite, dequivsite, dthissite, mysym] = self.equivalent_site_index_and_transform(site)
return [self.get_site_coordination_environment(site=site, isite=isite, dequivsite=dequivsite,
dthissite=dthissite, mysym=mysym, return_map=return_maps)]
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other._additional_condition and
self.symmetry_measure_type == other.symmetry_measure_type and
self.nb_set_weights == other.nb_set_weights and
self.ce_estimator == other.ce_estimator)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object.
:return: Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"nb_set_weights": [nb_set_weight.as_dict() for nb_set_weight in self.nb_set_weights],
"ce_estimator": self.ce_estimator,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the WeightedNbSetChemenvStrategy object from a dict representation of the
WeightedNbSetChemenvStrategy object created using the as_dict method.
:param d: dict representation of the WeightedNbSetChemenvStrategy object
:return: WeightedNbSetChemenvStrategy object
"""
return cls(additional_condition=d["additional_condition"],
symmetry_measure_type=d["symmetry_measure_type"],
nb_set_weights=d["nb_set_weights"],
ce_estimator=d["ce_estimator"])
class MultiWeightsChemenvStrategy(WeightedNbSetChemenvStrategy):
"""
MultiWeightsChemenvStrategy
"""
STRATEGY_DESCRIPTION = ' Multi Weights ChemenvStrategy'
# STRATEGY_INFO_FIELDS = ['cn_map_surface_fraction', 'cn_map_surface_weight',
# 'cn_map_mean_csm', 'cn_map_csm_weight',
# 'cn_map_delta_csm', 'cn_map_delta_csms_cn_map2', 'cn_map_delta_csm_weight',
# 'cn_map_cn_weight',
# 'cn_map_fraction', 'cn_map_ce_fraction', 'ce_fraction']
DEFAULT_CE_ESTIMATOR = {'function': 'power2_inverse_power2_decreasing',
'options': {'max_csm': 8.0}}
DEFAULT_DIST_ANG_AREA_WEIGHT = {}
def __init__(self, structure_environments=None,
additional_condition=AbstractChemenvStrategy.AC.ONLY_ACB,
symmetry_measure_type=AbstractChemenvStrategy.DEFAULT_SYMMETRY_MEASURE_TYPE,
dist_ang_area_weight=None,
self_csm_weight=None,
delta_csm_weight=None,
cn_bias_weight=None,
angle_weight=None,
normalized_angle_distance_weight=None,
ce_estimator=DEFAULT_CE_ESTIMATOR
):
"""
Constructor for the MultiWeightsChemenvStrategy.
:param structure_environments: StructureEnvironments object containing all the information on the
coordination of the sites in a structure
"""
self._additional_condition = additional_condition
self.dist_ang_area_weight = dist_ang_area_weight
self.angle_weight = angle_weight
self.normalized_angle_distance_weight = normalized_angle_distance_weight
self.self_csm_weight = self_csm_weight
self.delta_csm_weight = delta_csm_weight
self.cn_bias_weight = cn_bias_weight
self.ordered_weights = []
nb_sets_weights = []
if dist_ang_area_weight is not None:
self.ordered_weights.append({'weight': dist_ang_area_weight, 'name': 'DistAngArea'})
nb_sets_weights.append(dist_ang_area_weight)
if self_csm_weight is not None:
self.ordered_weights.append({'weight': self_csm_weight, 'name': 'SelfCSM'})
nb_sets_weights.append(self_csm_weight)
if delta_csm_weight is not None:
self.ordered_weights.append({'weight': delta_csm_weight, 'name': 'DeltaCSM'})
nb_sets_weights.append(delta_csm_weight)
if cn_bias_weight is not None:
self.ordered_weights.append({'weight': cn_bias_weight, 'name': 'CNBias'})
nb_sets_weights.append(cn_bias_weight)
if angle_weight is not None:
self.ordered_weights.append({'weight': angle_weight, 'name': 'Angle'})
nb_sets_weights.append(angle_weight)
if normalized_angle_distance_weight is not None:
self.ordered_weights.append({'weight': normalized_angle_distance_weight, 'name': 'NormalizedAngDist'})
nb_sets_weights.append(normalized_angle_distance_weight)
self.ce_estimator = ce_estimator
self.ce_estimator_ratio_function = CSMInfiniteRatioFunction.from_dict(self.ce_estimator)
self.ce_estimator_fractions = self.ce_estimator_ratio_function.fractions
WeightedNbSetChemenvStrategy.__init__(self, structure_environments,
additional_condition=additional_condition,
symmetry_measure_type=symmetry_measure_type,
nb_set_weights=nb_sets_weights,
ce_estimator=ce_estimator)
@classmethod
def stats_article_weights_parameters(cls):
self_csm_weight = SelfCSMNbSetWeight(weight_estimator={'function': 'power2_decreasing_exp',
'options': {'max_csm': 8.0,
'alpha': 1.0}})
surface_definition = {'type': 'standard_elliptic',
'distance_bounds': {'lower': 1.15, 'upper': 2.0},
'angle_bounds': {'lower': 0.05, 'upper': 0.75}}
da_area_weight = DistanceAngleAreaNbSetWeight(weight_type='has_intersection',
surface_definition=surface_definition,
nb_sets_from_hints='fallback_to_source',
other_nb_sets='0_weight',
additional_condition=DistanceAngleAreaNbSetWeight.AC.ONLY_ACB)
symmetry_measure_type = 'csm_wcs_ctwcc'
delta_weight = DeltaCSMNbSetWeight.delta_cn_specifics()
bias_weight = None
angle_weight = None
nad_weight = None
return cls(dist_ang_area_weight=da_area_weight,
self_csm_weight=self_csm_weight,
delta_csm_weight=delta_weight,
cn_bias_weight=bias_weight,
angle_weight=angle_weight,
normalized_angle_distance_weight=nad_weight,
symmetry_measure_type=symmetry_measure_type)
@property
def uniquely_determines_coordination_environments(self):
return False
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self._additional_condition == other._additional_condition and
self.symmetry_measure_type == other.symmetry_measure_type and
self.dist_ang_area_weight == other.dist_ang_area_weight and
self.self_csm_weight == other.self_csm_weight and
self.delta_csm_weight == other.delta_csm_weight and
self.cn_bias_weight == other.cn_bias_weight and
self.angle_weight == other.angle_weight and
self.normalized_angle_distance_weight == other.normalized_angle_distance_weight and
self.ce_estimator == other.ce_estimator)
def __ne__(self, other):
return not self == other
def as_dict(self):
"""
Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
:return: Bson-serializable dict representation of the MultiWeightsChemenvStrategy object.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"additional_condition": self._additional_condition,
"symmetry_measure_type": self.symmetry_measure_type,
"dist_ang_area_weight": self.dist_ang_area_weight.as_dict()
if self.dist_ang_area_weight is not None else None,
"self_csm_weight": self.self_csm_weight.as_dict()
if self.self_csm_weight is not None else None,
"delta_csm_weight": self.delta_csm_weight.as_dict()
if self.delta_csm_weight is not None else None,
"cn_bias_weight": self.cn_bias_weight.as_dict()
if self.cn_bias_weight is not None else None,
"angle_weight": self.angle_weight.as_dict()
if self.angle_weight is not None else None,
"normalized_angle_distance_weight": self.normalized_angle_distance_weight.as_dict()
if self.normalized_angle_distance_weight is not None else None,
"ce_estimator": self.ce_estimator,
}
@classmethod
def from_dict(cls, d):
"""
Reconstructs the MultiWeightsChemenvStrategy object from a dict representation of the
MultipleAbundanceChemenvStrategy object created using the as_dict method.
:param d: dict representation of the MultiWeightsChemenvStrategy object
:return: MultiWeightsChemenvStrategy object
"""
if d["normalized_angle_distance_weight"] is not None:
nad_w = NormalizedAngleDistanceNbSetWeight.from_dict(d["normalized_angle_distance_weight"])
else:
nad_w = None
return cls(additional_condition=d["additional_condition"],
symmetry_measure_type=d["symmetry_measure_type"],
dist_ang_area_weight=DistanceAngleAreaNbSetWeight.from_dict(d["dist_ang_area_weight"])
if d["dist_ang_area_weight"] is not None else None,
self_csm_weight=SelfCSMNbSetWeight.from_dict(d["self_csm_weight"])
if d["self_csm_weight"] is not None else None,
delta_csm_weight=DeltaCSMNbSetWeight.from_dict(d["delta_csm_weight"])
if d["delta_csm_weight"] is not None else None,
cn_bias_weight=CNBiasNbSetWeight.from_dict(d["cn_bias_weight"])
if d["cn_bias_weight"] is not None else None,
angle_weight=AngleNbSetWeight.from_dict(d["angle_weight"])
if d["angle_weight"] is not None else None,
normalized_angle_distance_weight=nad_w,
ce_estimator=d["ce_estimator"])
| dongsenfo/pymatgen | pymatgen/analysis/chemenv/coordination_environments/chemenv_strategies.py | Python | mit | 95,714 | [
"pymatgen"
] | cb54fa64efe93842b696d9f5feaf9876f9f4363d24830997f499ee460211f5d4 |
# safe_eval
# Copyrighted (C) Michael Spencer
#
# Source: http://code.activestate.com/recipes/364469/
import compiler
class Unsafe_Source_Error(Exception):
def __init__(self,error,descr = None,node = None):
self.error = error
self.descr = descr
self.node = node
self.lineno = getattr(node,"lineno",None)
def __repr__(self):
return "Line %d. %s: %s" % (self.lineno, self.error, self.descr)
__str__ = __repr__
class SafeEval(object):
def visit(self, node,**kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
visitExpression = default
def visitConst(self, node, **kw):
return node.value
def visitDict(self, node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self, node, **kw):
return tuple(self.visit(i) for i in node.nodes)
def visitList(self, node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnarySub(self, node, **kw):
return -self.visit(node.expr)
class SafeEvalWithErrors(SafeEval):
def default(self, node, **kw):
raise Unsafe_Source_Error("Unsupported source construct",
node.__class__,node)
def visitName(self,node, **kw):
raise Unsafe_Source_Error("Strings must be quoted",
node.name, node)
# Add more specific errors if desired
def safe_eval(source, fail_on_error = True):
walker = fail_on_error and SafeEvalWithErrors() or SafeEval()
try:
ast = compiler.parse(source,"eval")
except SyntaxError, err:
raise Unsafe_Source_Error(err, source)
try:
return walker.visit(ast)
except Unsafe_Source_Error, err:
raise | constantinius/YaaGame | kytten/safe_eval.py | Python | mit | 1,821 | [
"VisIt"
] | 5c78b54e0fd69e7b970d14bb298af32f1d2283042d8bb49a42d137837d1da098 |
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2014'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
import os
import time
import logging
from collections import defaultdict
from numpy import mean, std
import biolib.seq_io as seq_io
from biolib.parallel import Parallel
from biolib.common import make_sure_path_exists, remove_extension, concatenate_files
class AAICalculator(object):
"""Calculate AAI between all pairs of genomes."""
def __init__(self, cpus):
"""Initialization.
Parameters
----------
cpus : int
Number of cpus to use.
"""
self.logger = logging.getLogger('timestamp')
self.cpus = cpus
def _genome_offsets(self, sorted_hit_table):
"""Read blast table to determine byte offsets of hits for each genome.
Parameters
----------
sorted_hit_table : str
File containing sorted blast hits.
Returns
-------
dict : d[genome_id] -> (start_pos, end_pos)
Start and end byte offsets of hits for each genome in blast table.
"""
offset_table = defaultdict(dict)
with open(sorted_hit_table, 'r', 512 * (10 ** 6)) as f:
cur_query_genome = None
cur_target_genome = None
start_pos = 0
end_pos = 0
for line in f:
hit = line.split('\t')
query_genome = hit[0]
target_genome = hit[2]
if target_genome != cur_target_genome or query_genome != cur_query_genome:
if cur_query_genome:
offset_table[cur_query_genome][cur_target_genome] = (start_pos, end_pos)
cur_query_genome = query_genome
cur_target_genome = target_genome
start_pos = end_pos
end_pos += len(line)
offset_table[cur_query_genome][cur_target_genome] = (start_pos, end_pos)
return offset_table
def _valid_hits(self, hit_table_stream,
offset_table,
evalue_threshold,
per_identity_threshold,
per_aln_len_threshold,
query_genome_id,
target_genome_id):
"""Identify best hits from a genome meeting the specified criteria.
Hits from genes within query genome are identified which
satisfy the percent identity threshold, percent alignment
length threshold, and are to the specified target genome.
For each gene, the hit with the highest bitscore is identified.
Parameters
----------
hit_table_stream : stream
Stream to table with blast hits.
offset_table : d[genome_id] -> (start_pos, end_pos)
Start and end byte offsets of hits for each genome in blast table.
evalue_threshold : float
Evalue threshold used to define a homologous gene.
per_identity_threshold : float
Percent identity threshold used to define a homologous gene.
per_aln_len_threshold : float
Alignment length threshold used to define a homologous gene.
query_genome_id : str
Unique id of genome to obtained hits for.
target_genome_id : str
Unique id of genome to considered hits to.
Returns
-------
dict : d[query_id] -> list of lists with blast hit information
Hits from query genome meeting specified criteria (can be multiple hits if equal bitscores).
"""
# get valid hits for genome
hits = {}
if query_genome_id not in offset_table:
# proteins in query genome failed to hit any target proteins
return hits
if target_genome_id not in offset_table[query_genome_id]:
# proteins in query genome failed to hit any proteins in target genome
return hits
start_pos, end_pos = offset_table[query_genome_id][target_genome_id]
hit_table_stream.seek(start_pos)
while hit_table_stream.tell() < end_pos:
hit = hit_table_stream.readline().split('\t')
perc_iden = float(hit[4])
if perc_iden < per_identity_threshold:
continue
evalue = float(hit[12])
query_id = hit[0] + '~' + hit[1]
query_coverage = int(hit[9]) - int(hit[8]) + 1
per_aln_len = query_coverage * 100.0 / self.gene_lengths[query_id]
if per_aln_len < per_aln_len_threshold:
continue
target_genome = hit[2]
target_id = target_genome + '~' + hit[3]
if target_genome_id and target_genome != target_genome_id:
continue
bitscore = float(hit[13])
prev_hit = hits.get(query_id, None)
if not prev_hit:
hits[query_id] = [[target_id, perc_iden, per_aln_len, evalue, bitscore]]
elif bitscore > prev_hit[0][4]:
# for each gene, keep the hit with the highest bitscore
hits[query_id] = [[target_id, perc_iden, per_aln_len, evalue, bitscore]]
elif bitscore == prev_hit[0][4]:
hits[query_id].append([target_id, perc_iden, per_aln_len, evalue, bitscore])
return hits
def _producer(self, genome_id_list):
"""Identify reciprocal best blast hits between pairs of genomes.
Parameters
----------
genome_info_pairs : ((genome_idA, # genes), (genome_idB, # genes))
Identifier of genomes to process.
"""
hit_table_stream = open(self.sorted_hit_table, 'r', 128 * (10 ** 6))
# get genome ID and number of genes in genomes to process
query_genome_id, genomes_to_process = genome_id_list
if self.keep_rbhs:
fout_stats = open(os.path.join(self.output_dir, query_genome_id + '.rbh.tsv'), 'w')
fout_stats.write('Genome A\tGenome B\tPercent Identity\tPercent Alignment Length\te-value\tbitscore\n')
# determing RBHs
results = []
for cur_genome_id in genomes_to_process:
hits = self._valid_hits(hit_table_stream,
self.offset_table,
self.evalue_threshold,
self.per_identity_threshold,
self.per_aln_len_threshold,
query_genome_id,
cur_genome_id)
cur_hits = self._valid_hits(hit_table_stream,
self.offset_table,
self.evalue_threshold,
self.per_identity_threshold,
self.per_aln_len_threshold,
cur_genome_id,
query_genome_id)
# report reciprocal best blast hits
per_identity_hits = []
for query_id, hit_stats in hits.items():
bRBH = False
for query_hit in hit_stats:
target_id, per_identA, per_aln_lenA, evalueA, bitscoreA = query_hit
for target_hit in cur_hits.get(target_id, []):
cur_target_id, per_identB, per_aln_lenB, evalueB, bitscoreB = target_hit
if query_id != cur_target_id:
continue
# take average of statistics in both blast directions as
# the results will be similar, but not identical
per_ident = 0.5 * (per_identA + per_identB)
per_identity_hits.append(per_ident)
per_aln_len = 0.5 * (per_aln_lenA + per_aln_lenB)
evalue = 0.5 * (evalueA + evalueB)
bitscore = 0.5 * (bitscoreA + bitscoreB)
if self.keep_rbhs:
fout_stats.write('%s\t%s\t%.2f\t%.2f\t%.2g\t%.2f\n' % (query_id, target_id, per_ident, per_aln_len, evalue, bitscore))
# keep only one reciprocal best hit per gene
bRBH = True
break
if bRBH: # keep only one reciprocal best hit per gene
break
mean_per_identity_hits = 0
if len(per_identity_hits) > 0:
mean_per_identity_hits = mean(per_identity_hits)
std_per_identity_hits = 0
if len(per_identity_hits) >= 2:
std_per_identity_hits = std(per_identity_hits)
num_genesA = self.query_gene_count[query_genome_id]
num_genesB = self.target_gene_count[cur_genome_id]
num_rbhs = len(per_identity_hits)
of = num_rbhs * 100.0 / min(num_genesA, num_genesB)
results.append((query_genome_id,
num_genesA,
cur_genome_id,
num_genesB,
num_rbhs,
mean_per_identity_hits,
std_per_identity_hits,
of))
if self.keep_rbhs:
fout_stats.close()
return results
def _consumer(self, produced_data, consumer_data):
"""Consume results from producer processes.
Parameters
----------
produced_data : tuple
Summary statistics for a genome pair.
consumer_data : list
Summary statistics of amino acid identity between genome pairs.
Returns
-------
consumer_data
Summary statistics of amino acid identity between genome pairs.
"""
if consumer_data == None:
# setup structure for consumed data
consumer_data = []
self.processed_paired += len(produced_data)
consumer_data.extend(produced_data)
return consumer_data
def _progress(self, processed_genomes, total_genomes):
"""Report progress of consumer processes.
Parameters
----------
processed_genomes : int
Number of genomes processed.
total_genomes : int
Total number of genomes to process.
Returns
-------
str
String indicating progress of data processing.
"""
return ' Finished processing %d of %d (%.2f%%) pairs.' % (self.processed_paired,
self.num_pairs,
float(self.processed_paired) * 100 / self.num_pairs)
def run(self, query_gene_file,
target_gene_file,
sorted_hit_table,
evalue_threshold,
per_iden_threshold,
per_aln_len_threshold,
keep_rbhs,
output_dir):
"""Calculate amino acid identity (AAI) between pairs of genomes.
Parameters
----------
query_gene_file : str
File with all query genes in FASTA format.
target_gene_file : str or None
File with all target genes in FASTA format, or None if performing a reciprocal AAI calculation.
sorted_hit_table : str
Sorted table indicating genes with sequence similarity.
evalue_threshold : float
Evalue threshold used to define a homologous gene.
per_identity_threshold : float
Percent identity threshold used to define a homologous gene.
per_aln_len_threshold : float
Alignment length threshold used to define a homologous gene.
keep_rbhs : boolean
Flag indicating if RBH should be written to file.
output_dir : str
Directory to store AAI results.
"""
self.sorted_hit_table = sorted_hit_table
self.evalue_threshold = evalue_threshold
self.per_identity_threshold = per_iden_threshold
self.per_aln_len_threshold = per_aln_len_threshold
self.keep_rbhs = keep_rbhs
self.output_dir = output_dir
# calculate length of genes and number of genes in each genome
self.logger.info('Calculating length of genes.')
self.gene_lengths = {}
self.query_gene_count = defaultdict(int)
query_genomes = set()
for seq_id, seq in seq_io.read_fasta_seq(query_gene_file):
if seq[-1] == '*':
self.gene_lengths[seq_id] = len(seq) - 1
else:
self.gene_lengths[seq_id] = len(seq)
genome_id = seq_id[0:seq_id.find('~')]
self.query_gene_count[genome_id] += 1
query_genomes.add(genome_id)
self.target_gene_count = defaultdict(int)
target_genomes = set()
if target_gene_file:
for seq_id, seq in seq_io.read_fasta_seq(target_gene_file):
if seq[-1] == '*':
self.gene_lengths[seq_id] = len(seq) - 1
else:
self.gene_lengths[seq_id] = len(seq)
genome_id = seq_id[0:seq_id.find('~')]
self.target_gene_count[genome_id] += 1
target_genomes.add(genome_id)
else:
self.target_gene_count = self.query_gene_count
# get byte offset of hits from each genome
self.logger.info('Indexing sorted hit table.')
self.offset_table = self._genome_offsets(self.sorted_hit_table)
# calculate AAI between each pair of genomes in parallel
if target_genomes:
# compare query genomes to target genomes
self.num_pairs = len(query_genomes) * len(target_genomes)
self.logger.info('Calculating AAI between %d query and %d target genomes:' % (len(query_genomes), len(target_genomes)))
else:
# compute pairwise values between target genomes
ng = len(query_genomes)
self.num_pairs = (ng*ng - ng) / 2
self.logger.info('Calculating AAI between all %d pairs of genomes:' % self.num_pairs)
if self.num_pairs == 0:
self.logger.warning('No genome pairs identified.')
return
genome_id_lists = []
query_genomes = list(query_genomes)
target_genomes = list(target_genomes)
for i in range(0, len(query_genomes)):
genome_idI = query_genomes[i]
if target_genomes:
genome_id_list = target_genomes
else:
genome_id_list = []
for j in range(i + 1, len(query_genomes)):
genome_idJ = query_genomes[j]
genome_id_list.append(genome_idJ)
genome_id_lists.append((genome_idI, genome_id_list))
self.processed_paired = 0
parallel = Parallel(self.cpus)
progress_func = self._progress
if self.logger.is_silent:
progress_func = None
consumer_data = parallel.run(self._producer, self._consumer, genome_id_lists, progress_func)
# write results for each genome pair
self.logger.info('Summarizing AAI results.')
aai_summay_file = os.path.join(output_dir, 'aai_summary.tsv')
fout = open(aai_summay_file, 'w')
fout.write('#Genome A\tGenes in A\tGenome B\tGenes in B\t# orthologous genes\tMean AAI\tStd AAI\tOrthologous fraction (OF)\n')
for data in consumer_data:
fout.write('%s\t%d\t%s\t%d\t%d\t%.2f\t%.2f\t%.2f\n' % data)
fout.close()
# concatenate RBH files
rbh_output_file = None
if self.keep_rbhs:
self.logger.info('Concatenating RBH files.')
rbh_files = []
for genome_id in query_genomes:
rbh_files.append(os.path.join(self.output_dir, genome_id + '.rbh.tsv'))
rbh_output_file = os.path.join(self.output_dir, 'rbh.tsv')
concatenate_files(rbh_files, rbh_output_file, common_header=True)
for f in rbh_files:
os.remove(f)
return aai_summay_file, rbh_output_file
| dparks1134/CompareM | comparem/aai_calculator.py | Python | gpl-3.0 | 18,604 | [
"BLAST"
] | 344aaf3b12bb62fceaea61fbc8ba34825007926a72ba3fc2e634c2ae5f572d18 |
#!/usr/bin/python
#Load matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
#Loading the mnist data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#Import tensor flow
import tensorflow as tf
sess = tf.InteractiveSession()
#Allocating sizes for the images
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
#Defining the convolution
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
#Defining the max-pooling
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
### LAYER 1 ###
#Defining the first convolutional layer
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
#Resizing the pictures
x_image = tf.reshape(x, [-1,28,28,1])
#Defining the operations in the first layer
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
### LAYER 2 ###
#Defining the second convolutioinal layer
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
#Defining the operations in the second layer
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
### LAYER 3 ###
#Defining the third layer which is fully connected
W_fc1 = weight_variable([7 * 7 * 64, 1024])
b_fc1 = bias_variable([1024])
#Defining the operations in the third layer
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
#Performing dropout
keep_prob = tf.placeholder(tf.float32) #Proba to keep a neuron's output
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
### LAYER 4 ###
#Defining the fourth layer which is just a soft max
W_fc2 = weight_variable([1024, 10])
b_fc2 = bias_variable([10])
#Defining the operations in the fourth layer
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
#Using cross-entropy as a loss function
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
#Defining the learning rate
learningRate = 1e-4
#Training
train_step = tf.train.AdamOptimizer(learningRate).minimize(cross_entropy)
#Determining the number of correct predictions
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
#Averaging the number of correct predictions
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#Performing the initialization in the back-end
sess.run(tf.global_variables_initializer())
#Doing 1000 training steps
iterations = []
for i in range(2000):
batch = mnist.train.next_batch(50)
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
_, loss_val = sess.run([train_step,cross_entropy], feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
iterations.append(loss_val)
plt.plot(iterations)
plt.show()
#Number of correct prediction
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
| palanglois/augmentedReality | testTensorFlow/MNIST.py | Python | gpl-3.0 | 3,453 | [
"NEURON"
] | 2db4c6838d3b38aa89a02d387fd7549dbea4e8ce81670135edf3f5da7817c9dd |
#!python
# coding=utf-8
import os
import unittest
from pyaxiom.urn import IoosUrn
from pyaxiom.utils import urnify, dictify_urn
from pyaxiom.netcdf.sensors import TimeSeries
import logging
from pyaxiom import logger
logger.level = logging.INFO
logger.handlers = [logging.StreamHandler()]
class IoosUrnTests(unittest.TestCase):
def test_args(self):
u = IoosUrn(asset_type='sensor', authority='me', label='mysupersensor')
assert u.urn == 'urn:ioos:sensor:me:mysupersensor'
def test_setattr(self):
u = IoosUrn()
u.asset_type = 'sensor'
u.authority = 'me'
u.label = 'mysupersensor'
assert u.urn == 'urn:ioos:sensor:me:mysupersensor'
u.version = 'abc'
assert u.urn == 'urn:ioos:sensor:me:mysupersensor:abc'
u.component = 'temp'
assert u.urn == 'urn:ioos:sensor:me:mysupersensor:temp:abc'
def test_constructor_no_data(self):
u = IoosUrn()
assert u.urn is None
def test_constructor_with_bad_data(self):
u = IoosUrn(notanattribute='foo')
assert u.urn is None
def test_station_cant_have_component(self):
u = IoosUrn(asset_type='station', component='something')
assert u.urn is None
def test_no_label(self):
u = IoosUrn(asset_type='station', authority='me')
assert u.urn is None
def test_from_string(self):
u = IoosUrn.from_string('urn:ioos:sensor:myauthority:mylabel')
assert u.asset_type == 'sensor'
assert u.authority == 'myauthority'
assert u.label == 'mylabel'
u = IoosUrn.from_string('urn:ioos:sensor:myauthority:mylabel:mycomponent')
assert u.asset_type == 'sensor'
assert u.authority == 'myauthority'
assert u.label == 'mylabel'
assert u.component == 'mycomponent'
u = IoosUrn.from_string('urn:ioos:sensor:myauthority:mylabel:mycomponent:myversion')
assert u.asset_type == 'sensor'
assert u.authority == 'myauthority'
assert u.label == 'mylabel'
assert u.component == 'mycomponent'
assert u.version == 'myversion'
def test_from_bad_string(self):
u = IoosUrn.from_string('urn:ioos:sensor:whatami')
assert u.urn is None
u = IoosUrn.from_string('urn:ioos:nothinghere')
assert u.urn is None
u = IoosUrn.from_string('urn:totesbroken')
assert u.urn is None
def test_from_long_string(self):
u = IoosUrn.from_string('urn:ioos:sensor:whatami:wow:i:have:lots:of:things')
assert u.urn == 'urn:ioos:sensor:whatami:wow:i:have'
def test_change_sensor_to_station(self):
u = IoosUrn.from_string('urn:ioos:sensor:myauthority:mylabel:mycomponent')
assert u.asset_type == 'sensor'
assert u.authority == 'myauthority'
assert u.label == 'mylabel'
assert u.component == 'mycomponent'
u.asset_type = 'station'
u.component = None
assert u.urn == 'urn:ioos:station:myauthority:mylabel'
def test_messy_urn(self):
u = IoosUrn.from_string('urn:ioos:sensor:myauthority:mylabel:standard_name#key=key1:value1,key2:value2;some_other_key=some_other_value')
assert u.asset_type == 'sensor'
assert u.authority == 'myauthority'
assert u.label == 'mylabel'
assert u.component == 'standard_name#key=key1:value1,key2:value2;some_other_key=some_other_value'
def test_cdiac_urn(self):
u = IoosUrn.from_string('urn:ioos:sensor:gov.ornl.cdiac:cheeca_80w_25n:sea_water_temperature')
assert u.asset_type == 'sensor'
assert u.authority == 'gov.ornl.cdiac'
assert u.label == 'cheeca_80w_25n'
assert u.component == 'sea_water_temperature'
class TestUrnUtils(unittest.TestCase):
def setUp(self):
self.output_directory = os.path.join(os.path.dirname(__file__), "output")
self.latitude = 34
self.longitude = -72
self.station_name = "PytoolsTestStation"
self.global_attributes = dict(id='this.is.the.id')
self.fillvalue = -9999.9
def test_from_dict(self):
d = dict(standard_name='lwe_thickness_of_precipitation_amount')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount'
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
vertical_datum='NAVD88')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#vertical_datum=navd88'
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
vertical_datum='NAVD88',
discriminant='2')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2#vertical_datum=navd88'
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: sum (interval: PT24H) time: mean')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:sum;interval=pt24h'
# Interval as a dict key (not inline with cell_methods)
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: sum time: mean',
interval='pt24h')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:sum;interval=pt24h'
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: minimum within years time: mean over years')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean_over_years,time:minimum_within_years'
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: variance (interval: PT1H comment: sampled instantaneously)')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:variance;interval=pt1h'
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: variance time: mean (interval: PT1H comment: sampled instantaneously)')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:variance;interval=pt1h'
# Interval specified twice
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: variance time: mean (interval: PT1H comment: sampled instantaneously)',
interval='PT1H')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:variance;interval=pt1h'
# Interval specified twice
d = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: variance time: mean (interval: PT1H comment: sampled instantaneously)',
interval='PT1H',
discriminant='2')
urn = urnify('axiom', 'foo', d)
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2#cell_methods=time:mean,time:variance;interval=pt1h'
def test_from_variable(self):
filename = 'test_urn_from_variable.nc'
times = [0, 1000, 2000, 3000, 4000, 5000]
verticals = None
ts = TimeSeries(output_directory=self.output_directory,
latitude=self.latitude,
longitude=self.longitude,
station_name=self.station_name,
global_attributes=self.global_attributes,
output_filename=filename,
times=times,
verticals=verticals)
values = [20, 21, 22, 23, 24, 25]
attrs = dict(standard_name='lwe_thickness_of_precipitation_amount',
vertical_datum='NAVD88')
ts.add_variable('temperature', values=values, attributes=attrs)
urn = urnify('axiom', 'foo', ts.ncd.variables['temperature'])
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#vertical_datum=navd88'
values = [20, 21, 22, 23, 24, 25]
attrs = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: variance (interval: PT1H comment: sampled instantaneously)')
ts.add_variable('temperature2', values=values, attributes=attrs)
urn = urnify('axiom', 'foo', ts.ncd.variables['temperature2'])
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:variance;interval=pt1h'
values = [20, 21, 22, 23, 24, 25]
attrs = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: variance time: mean (interval: PT1H comment: sampled instantaneously)')
ts.add_variable('temperature3', values=values, attributes=attrs)
urn = urnify('axiom', 'foo', ts.ncd.variables['temperature3'])
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:variance;interval=pt1h'
values = [20, 21, 22, 23, 24, 25]
attrs = dict(standard_name='lwe_thickness_of_precipitation_amount',
cell_methods='time: variance time: mean (interval: PT1H comment: sampled instantaneously)',
discriminant='2')
ts.add_variable('temperature4', values=values, attributes=attrs)
urn = urnify('axiom', 'foo', ts.ncd.variables['temperature4'])
assert urn == 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2#cell_methods=time:mean,time:variance;interval=pt1h'
def test_dict_from_urn(self):
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:variance;interval=pt1h'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean time: variance (interval: PT1H)'
assert 'interval' not in d
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:variance;interval=pt1h'
d = dictify_urn(urn, combine_interval=False)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean time: variance'
assert d['interval'] == 'PT1H'
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:variance;interval=pt1h,pt6h'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean (interval: PT1H) time: variance (interval: PT6H)'
assert 'interval' not in d
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean,time:variance;interval=pt1h,pt6h'
d = dictify_urn(urn, combine_interval=False)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean time: variance'
assert d['interval'] == 'PT1H,PT6H'
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:variance;interval=pt1h'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: variance (interval: PT1H)'
assert 'interval' not in d
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:variance;interval=pt1h'
d = dictify_urn(urn, combine_interval=False)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: variance'
assert d['interval'] == 'PT1H'
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#cell_methods=time:mean_over_years,time:minimum_within_years'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean over years time: minimum within years'
assert 'interval' not in d
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount#vertical_datum=navd88'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['vertical_datum'] == 'NAVD88'
assert 'interval' not in d
assert 'cell_methods' not in d
assert 'discriminant' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert 'interval' not in d
assert 'cell_methods' not in d
assert 'discriminant' not in d
assert 'vertical_datum' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['discriminant'] == '2'
assert 'interval' not in d
assert 'cell_methods' not in d
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2#cell_methods=time:mean_over_years,time:minimum_within_years;vertical_datum=navd88'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean over years time: minimum within years'
assert d['discriminant'] == '2'
assert d['vertical_datum'] == 'NAVD88'
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2#cell_methods=time:mean_over_years,time:minimum_within_years;interval=pt1h;vertical_datum=navd88'
d = dictify_urn(urn)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean over years time: minimum within years (interval: PT1H)'
assert d['discriminant'] == '2'
assert 'interval' not in d
assert d['vertical_datum'] == 'NAVD88'
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2#cell_methods=time:mean_over_years,time:minimum_within_years;interval=pt1h;vertical_datum=navd88'
d = dictify_urn(urn, combine_interval=False)
assert d['standard_name'] == 'lwe_thickness_of_precipitation_amount'
assert d['cell_methods'] == 'time: mean over years time: minimum within years'
assert d['discriminant'] == '2'
assert d['interval'] == 'PT1H'
assert d['vertical_datum'] == 'NAVD88'
urn = 'urn:ioos:sensor:axiom:foo:lwe_thickness_of_precipitation_amount-2#interval=pt2h'
# Cant have an interval without a cell_method
with self.assertRaises(ValueError):
d = dictify_urn(urn)
| axiom-data-science/pyaxiom | pyaxiom/tests/test_urn.py | Python | mit | 15,973 | [
"NetCDF"
] | d962f8485a86bb57ef300c3b0dd5d06de941ded8c9d10ace7214b5d1af0c4f0c |
#!/usr/bin/env python
import argparse
from msmbuilder.io import backup
import mdtraj
import os
parser = argparse.ArgumentParser(prog='smooth_traj.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''version1''')
parser.add_argument("top", help="A prmtop file", type=str)
parser.add_argument("traj", help="""A NetCDF MD trajectory""", nargs='+')
parser.add_argument("-w", "--width", help="The width of the filter", type=int,
default=10, required=False)
parser.add_argument("-n", "--name", help="The name of the output smoothed trajectory", type=str,
default="traj_smoothed.nc", required=False)
parser.add_argument("-r", "--ref", help="A reference structure to superpose to", type=str,
default=None, required=False)
if __name__ == '__main__':
args = parser.parse_args()
print(args)
if len(args.traj) == 1:
traj_name = os.path.basename(args.traj[0])[:-3] # drop the .nc ending
print('Loading traj...')
traj = mdtraj.load(args.traj[0], top=args.top)
print('Superposing...')
atoms = traj.topology.select('name CA')
if args.ref is None:
traj.superpose(traj, 0, atom_indices=atoms)
else:
ref = mdtraj.load(args.ref)
traj.superpose(ref, 0, atom_indices=atoms)
if args.width > 1:
print('Smoothing...')
traj.smooth(args.width, inplace=True)
backup(args.name)
traj.save_netcdf(args.name)
elif len(args.traj) > 1:
print('Loading {} trajs as one...'.format(len(args.traj)))
traj = mdtraj.load(args.traj, top=args.top)
print('Superposing...')
atoms = traj.topology.select('name CA')
if args.ref is None:
traj.superpose(traj, 0, atom_indices=atoms)
else:
ref = mdtraj.load(args.ref)
traj.superpose(ref, 0, atom_indices=atoms)
if args.width > 1:
print('Smoothing...')
traj.smooth(args.width, inplace=True)
backup(args.name)
traj.save_netcdf(args.name)
print('Done!')
| jeiros/Scripts | AnalysisMDTraj/smooth_traj.py | Python | mit | 2,194 | [
"MDTraj",
"NetCDF"
] | 5cd84852899bf095071d6172f72bcae87f3c95ed6e799c9f5fa9651ce6964c8a |
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.analysis.stats.pearsonr` function."""
# Import iris tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import numpy.ma as ma
import iris
import iris.analysis.stats as stats
from iris.exceptions import CoordinateNotFoundError
@tests.skip_data
class Test(tests.IrisTest):
def setUp(self):
# 3D cubes:
cube_temp = iris.load_cube(
tests.get_data_path(
("NetCDF", "global", "xyt", "SMALL_total_column_co2.nc")
)
)
self.cube_a = cube_temp[0:6]
self.cube_b = cube_temp[20:26]
self.cube_b.replace_coord(self.cube_a.coord("time").copy())
cube_temp = self.cube_a.copy()
cube_temp.coord("latitude").guess_bounds()
cube_temp.coord("longitude").guess_bounds()
self.weights = iris.analysis.cartography.area_weights(cube_temp)
def test_perfect_corr(self):
r = stats.pearsonr(self.cube_a, self.cube_a, ["latitude", "longitude"])
self.assertArrayEqual(r.data, np.array([1.0] * 6))
def test_perfect_corr_all_dims(self):
r = stats.pearsonr(self.cube_a, self.cube_a)
self.assertArrayEqual(r.data, np.array([1.0]))
def test_incompatible_cubes(self):
with self.assertRaises(ValueError):
stats.pearsonr(
self.cube_a[:, 0, :], self.cube_b[0, :, :], "longitude"
)
def test_compatible_cubes(self):
r = stats.pearsonr(self.cube_a, self.cube_b, ["latitude", "longitude"])
self.assertArrayAlmostEqual(
r.data,
[
0.81114936,
0.81690538,
0.79833135,
0.81118674,
0.79745386,
0.81278484,
],
)
def test_broadcast_cubes(self):
r1 = stats.pearsonr(
self.cube_a, self.cube_b[0, :, :], ["latitude", "longitude"]
)
r2 = stats.pearsonr(
self.cube_b[0, :, :], self.cube_a, ["latitude", "longitude"]
)
r_by_slice = [
stats.pearsonr(
self.cube_a[i, :, :],
self.cube_b[0, :, :],
["latitude", "longitude"],
).data
for i in range(6)
]
self.assertArrayEqual(r1.data, np.array(r_by_slice))
self.assertArrayEqual(r2.data, np.array(r_by_slice))
def test_compatible_cubes_weighted(self):
r = stats.pearsonr(
self.cube_a, self.cube_b, ["latitude", "longitude"], self.weights
)
self.assertArrayAlmostEqual(
r.data,
[
0.79105429,
0.79988078,
0.78825089,
0.79925653,
0.79009810,
0.80115292,
],
)
def test_broadcast_cubes_weighted(self):
r = stats.pearsonr(
self.cube_a,
self.cube_b[0, :, :],
["latitude", "longitude"],
weights=self.weights[0, :, :],
)
r_by_slice = [
stats.pearsonr(
self.cube_a[i, :, :],
self.cube_b[0, :, :],
["latitude", "longitude"],
weights=self.weights[0, :, :],
).data
for i in range(6)
]
self.assertArrayAlmostEqual(r.data, np.array(r_by_slice))
def test_weight_error(self):
with self.assertRaises(ValueError):
stats.pearsonr(
self.cube_a,
self.cube_b[0, :, :],
["latitude", "longitude"],
weights=self.weights,
)
def test_non_existent_coord(self):
with self.assertRaises(CoordinateNotFoundError):
stats.pearsonr(self.cube_a, self.cube_b, "bad_coord")
def test_mdtol(self):
cube_small = self.cube_a[:, 0, 0]
cube_small_masked = cube_small.copy()
cube_small_masked.data = ma.array(
cube_small.data, mask=np.array([0, 0, 0, 1, 1, 1], dtype=bool)
)
r1 = stats.pearsonr(cube_small, cube_small_masked)
r2 = stats.pearsonr(cube_small, cube_small_masked, mdtol=0.49)
self.assertArrayAlmostEqual(r1.data, np.array([0.74586593]))
self.assertMaskedArrayEqual(r2.data, ma.array([0], mask=[True]))
def test_common_mask_simple(self):
cube_small = self.cube_a[:, 0, 0]
cube_small_masked = cube_small.copy()
cube_small_masked.data = ma.array(
cube_small.data, mask=np.array([0, 0, 0, 1, 1, 1], dtype=bool)
)
r = stats.pearsonr(cube_small, cube_small_masked, common_mask=True)
self.assertArrayAlmostEqual(r.data, np.array([1.0]))
def test_common_mask_broadcast(self):
cube_small = self.cube_a[:, 0, 0]
cube_small_2d = self.cube_a[:, 0:2, 0]
cube_small.data = ma.array(
cube_small.data, mask=np.array([0, 0, 0, 0, 0, 1], dtype=bool)
)
cube_small_2d.data = ma.array(
np.tile(cube_small.data[:, np.newaxis], 2),
mask=np.zeros((6, 2), dtype=bool),
)
# 2d mask varies on unshared coord:
cube_small_2d.data.mask[0, 1] = 1
r = stats.pearsonr(
cube_small,
cube_small_2d,
weights=self.weights[:, 0, 0],
common_mask=True,
)
self.assertArrayAlmostEqual(r.data, np.array([1.0, 1.0]))
# 2d mask does not vary on unshared coord:
cube_small_2d.data.mask[0, 0] = 1
r = stats.pearsonr(cube_small, cube_small_2d, common_mask=True)
self.assertArrayAlmostEqual(r.data, np.array([1.0, 1.0]))
if __name__ == "__main__":
tests.main()
| pp-mo/iris | lib/iris/tests/unit/analysis/stats/test_pearsonr.py | Python | lgpl-3.0 | 5,987 | [
"NetCDF"
] | 091d11ceb0fedc8d60c725ebdeae2da5ed5ed188e8da5caf25026bfcf2398529 |
# LOFAR IMAGING PIPELINE
#
# BBS Source Catalogue List
# Bart Scheers, 2011
# L.H.A.Scheers@uva.nl
# ------------------------------------------------------------------------------
import sys, string
import numpy as np
import monetdb.sql as db
import logging
from .gsm_exceptions import GSMException
def expected_fluxes_in_fov(conn, ra_central, decl_central, fov_radius,
assoc_theta, bbsfile,
storespectraplots=False, deruiter_radius=0.,
vlss_flux_cutoff=None,
patchname=''):
"""Search for VLSS, WENSS and NVSS sources that
are in the given FoV. The FoV is set by its central position
(ra_central, decl_central) out to a radius of fov_radius.
The query looks for cross-matches around the sources, out
to a radius of assoc_theta.
All units are in degrees.
deruiter_radius is a measure for the association uncertainty that takes
position errors into account (see thesis Bart Scheers). If not given
as a positive value, it is read from the TKP config file. If not
available, it defaults to 3.717.
The query returns all vlss sources (id) that are in the FoV.
If so, the counterparts from other catalogues are returned as well
(also their ids).
If patchname is given, all sources get that patch name and the center of
the patch is given central ra/dec. Its brightness is the summed flux.
"""
DERUITER_R = deruiter_radius
if DERUITER_R <= 0:
try:
from tkp.config import config
DERUITER_R = config['source_association']['deruiter_radius']
##print "DERUITER_R =",DERUITER_R
except:
DERUITER_R=3.717
#TODO: Check what happens at high decl when alpha goes to 180 degrees
if ra_central - alpha(fov_radius, decl_central) < 0:
ra_min1 = np.float(ra_central - alpha(fov_radius, decl_central) + 360.0)
ra_max1 = np.float(360.0)
ra_min2 = np.float(0.0)
ra_max2 = np.float(ra_central + alpha(fov_radius, decl_central))
q = "q_across_ra0"
elif ra_central + alpha(fov_radius, decl_central) > 360:
ra_min1 = np.float(ra_central - alpha(fov_radius, decl_central))
ra_max1 = np.float(360.0)
ra_min2 = np.float(0.0)
ra_max2 = np.float(ra_central + alpha(fov_radius, decl_central) - 360)
q = "q_across_ra0"
elif ra_central - alpha(fov_radius, decl_central) < 0 and ra_central + alpha(fov_radius, decl_central) > 360:
raise BaseException("ra = %s > 360 degrees, not implemented yet" % str(ra_central + alpha(fov_radius, decl_central)))
else:
ra_min = np.float(ra_central - alpha(fov_radius, decl_central))
ra_max = np.float(ra_central + alpha(fov_radius, decl_central))
q = "q0"
if vlss_flux_cutoff is None:
vlss_flux_cutoff = 0.
status = True
bbsrows = []
totalFlux = 0.
# This is dimensionless search radius that takes into account
# the ra and decl difference between two sources weighted by
# their positional errors.
deRuiter_reduced = DERUITER_R/3600.
q_across_ra0 = """\
SELECT t0.v_catsrcid
,t0.catsrcname
,t1.wm_catsrcid
,t2.wp_catsrcid
,t3.n_catsrcid
,t0.v_flux
,t1.wm_flux
,t2.wp_flux
,t3.n_flux
,t0.v_flux_err
,t1.wm_flux_err
,t2.wp_flux_err
,t3.n_flux_err
,t1.wm_assoc_distance_arcsec
,t1.wm_assoc_r
,t2.wp_assoc_distance_arcsec
,t2.wp_assoc_r
,t3.n_assoc_distance_arcsec
,t3.n_assoc_r
,t0.pa
,t0.major
,t0.minor
,t0.ra
,t0.decl
FROM (SELECT c1.catsrcid AS v_catsrcid
,c1.catsrcname
,c1.ra
,c1.decl
,c1.i_int_avg AS v_flux
,c1.i_int_avg_err AS v_flux_err
,c1.pa
,c1.major
,c1.minor
FROM (SELECT catsrcid
,catsrcname
,ra
,decl
,pa
,major
,minor
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
) t0
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wm_catsrcid
,c2.i_int_avg AS wm_flux
,c2.i_int_avg_err AS wm_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wm_assoc_distance_arcsec
,3600 * SQRT(((c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
* (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wm_assoc_r
FROM (SELECT catsrcid
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 5
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
* (c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t1
ON t0.v_catsrcid = t1.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wp_catsrcid
,c2.i_int_avg AS wp_flux
,c2.i_int_avg_err AS wp_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wp_assoc_distance_arcsec
,3600 * SQRT(( (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
* (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wp_assoc_r
FROM (SELECT catsrcid
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 6
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
* (c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t2
ON t0.v_catsrcid = t2.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS n_catsrcid
,c2.i_int_avg AS n_flux
,c2.i_int_avg_err AS n_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS n_assoc_distance_arcsec
,3600 * SQRT(((c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
* (c1.ra_mod * COS(RADIANS(c1.decl)) - c2.ra_mod * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS n_assoc_r
FROM (SELECT catsrcid
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,MOD(ra + 180, 360) AS ra_mod
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 3
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND (ra BETWEEN %(ra_min1)s
AND %(ra_max1)s
OR ra BETWEEN %(ra_min2)s
AND %(ra_max2)s
)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
* (c2.ra_mod * COS(RADIANS(c2.decl)) - c1.ra_mod * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t3
ON t0.v_catsrcid = t3.v_catsrcid
WHERE t0.v_flux >= %(vlss_flux_cutoff)s
ORDER BY t0.v_catsrcid
"""
q0 = """\
SELECT t0.v_catsrcid
,t0.catsrcname
,t1.wm_catsrcid
,t2.wp_catsrcid
,t3.n_catsrcid
,t0.v_flux
,t1.wm_flux
,t2.wp_flux
,t3.n_flux
,t0.v_flux_err
,t1.wm_flux_err
,t2.wp_flux_err
,t3.n_flux_err
,t1.wm_assoc_distance_arcsec
,t1.wm_assoc_r
,t2.wp_assoc_distance_arcsec
,t2.wp_assoc_r
,t3.n_assoc_distance_arcsec
,t3.n_assoc_r
,t0.pa
,t0.major
,t0.minor
,t0.ra
,t0.decl
FROM (SELECT c1.catsrcid AS v_catsrcid
,c1.catsrcname
,c1.ra
,c1.decl
,c1.i_int_avg AS v_flux
,c1.i_int_avg_err AS v_flux_err
,c1.pa
,c1.major
,c1.minor
FROM (SELECT catsrcid
,catsrcname
,ra
,decl
,pa
,major
,minor
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
) t0
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wm_catsrcid
,c2.i_int_avg AS wm_flux
,c2.i_int_avg_err AS wm_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wm_assoc_distance_arcsec
,SQRT(((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wm_assoc_r
FROM (SELECT catsrcid
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 5
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.ra BETWEEN c1.ra - alpha(%(assoc_theta)s, c1.decl)
AND c1.ra + alpha(%(assoc_theta)s, c1.decl)
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
* (c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t1
ON t0.v_catsrcid = t1.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS wp_catsrcid
,c2.i_int_avg AS wp_flux
,c2.i_int_avg_err AS wp_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS wp_assoc_distance_arcsec
,SQRT(((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS wp_assoc_r
FROM (SELECT catsrcid
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 6
AND (src_type = 'S' OR src_type = 'M')
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.ra BETWEEN c1.ra - alpha(%(assoc_theta)s, c1.decl)
AND c1.ra + alpha(%(assoc_theta)s, c1.decl)
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
* (c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t2
ON t0.v_catsrcid = t2.v_catsrcid
LEFT OUTER JOIN
(SELECT c1.catsrcid AS v_catsrcid
,c2.catsrcid AS n_catsrcid
,c2.i_int_avg AS n_flux
,c2.i_int_avg_err AS n_flux_err
,3600 * DEGREES(2 * ASIN(SQRT( (c1.x - c2.x) * (c1.x - c2.x)
+ (c1.y - c2.y) * (c1.y - c2.y)
+ (c1.z - c2.z) * (c1.z - c2.z)
) / 2)
) AS n_assoc_distance_arcsec
,SQRT(((c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
* (c1.ra * COS(RADIANS(c1.decl)) - c2.ra * COS(RADIANS(c2.decl)))
/ (c1.ra_err * c1.ra_err + c2.ra_err * c2.ra_err))
+ ((c1.decl - c2.decl) * (c1.decl - c2.decl)
/ (c1.decl_err * c1.decl_err + c2.decl_err * c2.decl_err))
) AS n_assoc_r
FROM (SELECT catsrcid
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
FROM catalogedsources
WHERE cat_id = 4
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c1
,(SELECT catsrcid
,zone
,ra
,decl
,ra_err
,decl_err
,x
,y
,z
,i_int_avg
,i_int_avg_err
FROM catalogedsources
WHERE cat_id = 3
AND zone BETWEEN CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s) AS INTEGER)
AND CAST(FLOOR(CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s) AS INTEGER)
AND decl BETWEEN CAST(%(decl_central)s AS DOUBLE) - %(fov_radius)s
AND CAST(%(decl_central)s AS DOUBLE) + %(fov_radius)s
AND ra BETWEEN CAST(%(ra_central)s AS DOUBLE) - alpha(%(fov_radius)s, %(decl_central)s)
AND CAST(%(ra_central)s AS DOUBLE) + alpha(%(fov_radius)s, %(decl_central)s)
AND x * COS(RADIANS(%(decl_central)s)) * COS(RADIANS(%(ra_central)s))
+ y * COS(RADIANS(%(decl_central)s)) * SIN(RADIANS(%(ra_central)s))
+ z * SIN(RADIANS(%(decl_central)s)) > COS(RADIANS(%(fov_radius)s))
) c2
WHERE c2.zone BETWEEN CAST(FLOOR(c1.decl - %(assoc_theta)s) AS INTEGER)
AND CAST(FLOOR(c1.decl + %(assoc_theta)s) AS INTEGER)
AND c2.decl BETWEEN c1.decl - %(assoc_theta)s
AND c1.decl + %(assoc_theta)s
AND c2.ra BETWEEN c1.ra - alpha(%(assoc_theta)s, c1.decl)
AND c1.ra + alpha(%(assoc_theta)s, c1.decl)
AND c2.x * c1.x + c2.y * c1.y + c2.z * c1.z > COS(RADIANS(%(assoc_theta)s))
AND SQRT(((c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
* (c2.ra * COS(RADIANS(c2.decl)) - c1.ra * COS(RADIANS(c1.decl)))
/ (c2.ra_err * c2.ra_err + c1.ra_err * c1.ra_err))
+ ((c2.decl - c1.decl) * (c2.decl - c1.decl)
/ (c2.decl_err * c2.decl_err + c1.decl_err * c1.decl_err))) < %(deRuiter_reduced)s
) t3
ON t0.v_catsrcid = t3.v_catsrcid
WHERE t0.v_flux >= %(vlss_flux_cutoff)s
ORDER BY t0.v_catsrcid
"""
try:
cursor = conn.cursor()
if q == "q0":
query = q0
args = {'decl_central': decl_central
,'ra_central': ra_central
,'fov_radius': fov_radius
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'vlss_flux_cutoff': vlss_flux_cutoff}
cursor.execute(query, args)
elif q == "q_across_ra0":
query = q_across_ra0
args = {'decl_central': decl_central
,'ra_central': ra_central
,'ra_min1': ra_min1
,'ra_max1': ra_max1
,'ra_min2': ra_min2
,'ra_max2': ra_max2
,'fov_radius': fov_radius
,'assoc_theta': assoc_theta
,'deRuiter_reduced': deRuiter_reduced
,'vlss_flux_cutoff': vlss_flux_cutoff}
cursor.execute(query, args)
else:
raise BaseException("ra = %s > 360 degrees, not implemented yet" % str(ra_central + alpha(fov_radius, decl_central)))
results = list(zip(*cursor.fetchall()))
cursor.close()
if len(results) == 0:
raise GSMException("No sources found, so Sky Model File %s is not created" % (bbsfile,))
vlss_catsrcid = results[0]
vlss_name = results[1]
wenssm_catsrcid = results[2]
wenssp_catsrcid = results[3]
nvss_catsrcid = results[4]
v_flux = results[5]
wm_flux = results[6]
wp_flux = results[7]
n_flux = results[8]
v_flux_err = results[9]
wm_flux_err = results[10]
wp_flux_err = results[11]
n_flux_err = results[12]
wm_assoc_distance_arcsec = results[13]
wm_assoc_r = results[14]
wp_assoc_distance_arcsec = results[15]
wp_assoc_r = results[16]
n_assoc_distance_arcsec = results[17]
n_assoc_r = results[18]
pa = results[19]
major = results[20]
minor = results[21]
ra = results[22]
decl = results[23]
spectrumfiles = []
# Check for duplicate vlss_names. This may arise when a VLSS source
# is associated with one or more (genuine) counterparts.
# Eg., if two NVSS sources are seen as counterparts
# VLSS - WENSS - NVSS_1
# VLSS - WENSS - NVSS_2
# two rows will be added to the sky model, where the VLSS name
# is postfixed with _0 and _1, resp.
import collections
items = collections.defaultdict(list)
src_name = list(vlss_name)
for i, item in enumerate(src_name):
items[item].append(i)
for item, locs in items.items():
if len(locs) > 1:
#print "duplicates of", item, "at", locs
for j in range(len(locs)):
src_name[locs[j]] = src_name[locs[j]] + "_" + str(j)
if len(results) != 0:
for i in range(len(vlss_catsrcid)):
##print "\ni = ", i
bbsrow = ""
# Here we check the cases for the degree of the polynomial spectral index fit
#print i, vlss_name[i],vlss_catsrcid[i], wenssm_catsrcid[i], wenssp_catsrcid[i], nvss_catsrcid[i]
# Write the vlss name of the source (either postfixed or not)
bbsrow += src_name[i] + ", "
# According to Jess, only sources that have values for all
# three are considered as GAUSSIAN
if pa[i] is not None and major[i] is not None and minor[i] is not None:
#print "Gaussian:", pa[i], major[i], minor[i]
bbsrow += "GAUSSIAN, "
else:
#print "POINT"
bbsrow += "POINT, "
#print "ra = ", ra[i], "; decl = ", decl[i]
#print "BBS ra = ", ra2bbshms(ra[i]), "; BBS decl = ", decl2bbsdms(decl[i])
bbsrow += ra2bbshms(ra[i]) + ", " + decl2bbsdms(decl[i]) + ", "
# Stokes I id default, so filed is empty
#bbsrow += ", "
lognu = []
logflux = []
lognu.append(np.log10(74.0/60.0))
logflux.append(np.log10(v_flux[i]))
if wenssm_catsrcid[i] is not None:
lognu.append(np.log10(325.0/60.0))
logflux.append(np.log10(wm_flux[i]))
if wenssp_catsrcid[i] is not None:
lognu.append(np.log10(352.0/60.0))
logflux.append(np.log10(wp_flux[i]))
if nvss_catsrcid[i] is not None:
lognu.append(np.log10(1400.0/60.0))
logflux.append(np.log10(n_flux[i]))
f = ""
for j in range(len(logflux)):
f += str(10**logflux[j]) + "; "
##print f
#print "len(lognu) = ",len(lognu), "nvss_catsrcid[",i,"] =", nvss_catsrcid[i]
# Here we write the expected flux values at 60 MHz, and the fitted spectral index and
# and curvature term
if len(lognu) == 1:
#print "Exp. flux:", 10**(np.log10(v_flux[i]) + 0.7 * np.log10(74.0/60.0))
#print "Default -0.7"
fluxrow = round(10**(np.log10(v_flux[i]) + 0.7 * np.log10(74.0/60.0)), 2)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[-0.7]"
elif len(lognu) == 2 or (len(lognu) == 3 and nvss_catsrcid[i] is None):
#print "Do a 1-degree polynomial fit"
# p has form : p(x) = p[0] + p[1]*x
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 1))
#print p
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "spectrum_%s.eps" % vlss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here;
# Catalogues just report on Stokes I, so others are empty.
fluxrow = round(10**p[0], 4)
totalFlux += fluxrow
bbsrow += str(fluxrow) + ", , , , , "
bbsrow += "[" + str(round(p[1], 4)) + "]"
elif (len(lognu) == 3 and nvss_catsrcid[i] is not None) or len(lognu) == 4:
#print "Do a 2-degree polynomial fit"
# p has form : p(x) = p[0] + p[1]*x + p[2]*x**2
p = np.poly1d(np.polyfit(np.array(lognu), np.array(logflux), 2))
#print p
if storespectraplots:
spectrumfile = plotSpectrum(np.array(lognu), np.array(logflux), p, "spectrum_%s.eps" % vlss_name[i])
spectrumfiles.append(spectrumfile)
# Default reference frequency is reported, so we leave it empty here
bbsrow += str(round(10**p[0], 4)) + ", , , , , "
bbsrow += "[" + str(round(p[1],4)) + ", " + str(round(p[2],4)) + "]"
if pa[i] is not None and major[i] is not None and minor[i] is not None:
# Gaussian source:
bbsrow += ", " + str(round(major[i], 2)) + ", " + str(round(minor[i], 2)) + ", " + str(round(pa[i], 2))
#print bbsrow
bbsrows.append (bbsrow)
if storespectraplots:
print("Spectra available in:", spectrumfiles)
# Write the format line.
# Optionally it contains a column containing the patch name.
skymodel = open(bbsfile, 'w')
header = "FORMAT = Name, Type, Ra, Dec, I, Q, U, V, ReferenceFrequency='60e6', SpectralIndex='[0.0]', MajorAxis, MinorAxis, Orientation"
# Add fixed patch name to the header and add a line defining the patch.
if len(patchname) > 0:
header += ", patch=fixed'" + patchname + "'\n\n"
header += "# the next line defines the patch\n"
header += ',, ' + ra2bbshms(ra_central) + ', ' + decl2bbsdms(decl_central) + ', ' + str(totalFlux)
header += "\n\n# the next lines define the sources\n"
skymodel.write(header)
for bbsrow in bbsrows:
skymodel.write(bbsrow + '\n')
skymodel.close()
print("Sky model stored in source table:", bbsfile)
except db.Error as e:
logging.warn("Failed on query nr %s; for reason %s" % (query, e))
raise
def plotSpectrum(x, y, p, f):
import pylab
expflux = "Exp. flux: " + str(round(10**p(0),3)) + " Jy"
fig = pylab.figure()
ax = fig.add_subplot(111)
for i in range(len(ax.get_xticklabels())):
ax.get_xticklabels()[i].set_size('x-large')
for i in range(len(ax.get_yticklabels())):
ax.get_yticklabels()[i].set_size('x-large')
ax.set_xlabel(r'$\log \nu/\nu_0$', size='x-large')
ax.set_ylabel('$\log S$', size='x-large')
# Roughly between log10(30/60) and log10(1500/60)
xp = np.linspace(-0.3, 1.5, 100)
ax.plot(x, y, 'o', label='cat fluxes')
ax.plot(0.0, p(0), 'o', color='k', label=expflux )
ax.plot(xp, p(xp), linestyle='--', linewidth=2, label='fit')
pylab.legend(numpoints=1, loc='best')
pylab.grid(True)
pylab.savefig(f, dpi=600)
return f
def decl2bbsdms(d):
"""Based on function deg2dec Written by Enno Middelberg 2001
http://www.atnf.csiro.au/people/Enno.Middelberg/python/python.html
"""
deg = float(d)
sign = "+"
# test whether the input numbers are sane:
# if negative, store "-" in sign and continue calulation
# with positive value
if deg < 0:
sign = "-"
deg = deg * (-1)
#if deg > 180:
# logging.warn("%s: inputs may not exceed 180!" % deg)
# raise
#if deg > 90:
# print `deg`+" exceeds 90, will convert it to negative dec\n"
# deg=deg-90
# sign="-"
if deg < -90 or deg > 90:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg)
mm = int((deg - int(deg)) * 60)
ss = '%10.8f' % (((deg - int(deg)) * 60 - mm) * 60)
#print '\t'+sign+string.zfill(`hh`,2)+':'+string.zfill(`mm`,2)+':'+'%10.8f' % ss
#print '\t'+sign+string.zfill(`hh`,2)+' '+string.zfill(`mm`,2)+' '+'%10.8f' % ss
#print '\t'+sign+string.zfill(`hh`,2)+'h'+string.zfill(`mm`,2)+'m'+'%10.8fs\n' % ss
return sign + string.zfill(repr(hh), 2) + '.' + string.zfill(repr(mm), 2) + '.' + string.zfill(ss, 11)
def ra2bbshms(a):
deg=float(a)
# test whether the input numbers are sane:
if deg < 0 or deg > 360:
logging.warn("%s: inputs may not exceed 90 degrees!" % deg)
hh = int(deg / 15)
mm = int((deg - 15 * hh) * 4)
ss = '%10.8f' % ((4 * deg - 60 * hh - mm) * 60)
#print '\t'+string.zfill(`hh`,2)+':'+string.zfill(`mm`,2)+':'+'%10.8f' % ss
#print '\t'+string.zfill(`hh`,2)+' '+string.zfill(`mm`,2)+' '+'%10.8f' % ss
#print '\t'+string.zfill(`hh`,2)+'h'+string.zfill(`mm`,2)+'m'+'%10.8fs\n' % ss
return string.zfill(repr(hh), 2) + ':' + string.zfill(repr(mm), 2) + ':' + string.zfill(ss, 11)
def alpha(theta, decl):
if abs(decl) + theta > 89.9:
return 180.0
else:
return degrees(abs(np.arctan(np.sin(radians(theta)) / np.sqrt(abs(np.cos(radians(decl - theta)) * np.cos(radians(decl + theta)))))))
def degrees(r):
return r * 180 / np.pi
def radians(d):
return d * np.pi / 180
| kernsuite-debian/lofar | CEP/GSM/src/gsmutils.py | Python | gpl-3.0 | 48,239 | [
"Gaussian"
] | d44274e266f373a9015e1d888aede37507079523e88992ed7cf0a6e716244c98 |
# -*- coding: utf-8 -*-
#
# File: MusicRecruiting.py
#
# Copyright (c) 2008 by []
# Generator: ArchGenXML Version 2.0-beta10
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
__author__ = """unknown <unknown>"""
__docformat__ = 'plaintext'
##code-section init-module-header #fill in your manual code here
##/code-section init-module-header
# Subpackages
# Additional
# Classes
import Visit
import VisitFolder
import FacultyMember
import FacultyMemberFolder
import Student
import StudentFolder
import School
import SchoolFolder
import Contact
import ContactFolder
##code-section init-module-footer #fill in your manual code here
##/code-section init-module-footer
| uwosh/UWOshMusicRecruiting | content/__init__.py | Python | gpl-2.0 | 1,418 | [
"VisIt"
] | 869fadc0bfb1b58fa539656ba720a282c59f78a7d3d1003781608522bf9b938a |
"""
A module to automate CIGALE. Currently works for a single galaxy.
It generates a configuration file and runs the standard pcigale
script. Requires pcigale already installed on the system.
"""
import numpy as np
import sys, os, glob, multiprocessing, warnings
from collections import OrderedDict
from astropy.table import Table
try:
from pcigale.session.configuration import Configuration
except ImportError:
print("You will need to install pcigale to use the cigale.py module")
else:
from pcigale.analysis_modules import get_module
from pcigale.data import Database
from frb.surveys.catalog_utils import _detect_mag_cols, convert_mags_to_flux
from IPython import embed
# Default list of SED modules for CIGALE
_DEFAULT_SED_MODULES = ("sfhdelayed", "bc03", "nebular", "dustatt_calzleit", "dale2014",
"restframe_parameters", "redshifting")
#TODO Create a function to check the input filters
#Or create a translation file like eazy's.
#def check_filters(data_file):
def _sed_default_params(module):
"""
Set the default parameters for CIGALE
Args:
module (str):
Specify the SED using the CIGALE standard names, e.g. sfhdelayed, bc03, etc.
Returns:
params (dict): the default dict of SED modules
and their initial parameters.
"""
params = {}
if module == "sfhdelayed":
params['tau_main'] = (10**np.linspace(1,3,10)).tolist() #e-folding time of main population (Myr)
params['age_main'] = (10**np.linspace(3,4,10)).tolist() #age (Myr)
params['tau_burst'] = 50.0 #burst e-folding time (Myr)
params['age_burst'] = 20.0
params['f_burst'] = 0.0 #burst fraction by mass
params['sfr_A'] = 0.1 #SFR at t = 0 (Msun/yr)
params['normalise'] = False # Normalise SFH to produce one solar mass
elif module == "bc03":
params['imf'] = 1 #0: Salpeter 1: Chabrier
params['metallicity'] = [0.0001, 0.0004, 0.004, 0.008, 0.02, 0.05]
params['separation_age'] = 10 # Separation between yound and old stellar population (Myr)
elif module == 'nebular':
params['logU'] = -2.0 # Ionization parameter
params['f_esc'] = 0.0 # Escape fraction of Ly continuum photons
params['f_dust'] = 0.0 # Fraction of Ly continuum photons absorbed
params['lines_width'] = 300.0
params['emission'] = True
elif module == 'dustatt_calzleit':
params['E_BVs_young'] = [0.12, 0.25, 0.37, 0.5, 0.62, 0.74, 0.86] #Stellar color excess for young continuum
params['E_BVs_old_factor'] = 1.0 # Reduction of E(B-V) for the old population w.r.t. young
params['uv_bump_wavelength'] = 217.5 #central wavelength of UV bump (nm)
params['uv_bump_width'] = 35.6 #UV bump FWHM (nm)
params['uv_bump_amplitude'] = 1.3 # Amplitude of the UV bump. For the Milky Way: 3.
# The following parameter can have a significant affect on stellar mass
# We use the recommendation in Lo Faro+2017
params['powerlaw_slope'] = -0.13 # Slope delta of the power law modifying the attenuation curve.
# These filters have no effect
params['filters'] = 'B_B90 & V_B90 & FUV'
elif module == 'dale2014':
params['fracAGN'] = [0.0,0.05,0.1,0.2]
params['alpha'] = 2.0
elif module == 'restframe_parameters':
params['beta_calz94'] = False
params['D4000'] = False
params['IRX'] = False
params['EW_lines'] = '500.7/1.0 & 656.3/1.0'
params['luminosity_filters'] = 'u_prime & r_prime'
params['colours_filters'] = 'u_prime-r_prime'
elif module == 'redshifting':
params['redshift'] = '' #Use input redshifts
return params
def gen_cigale_in(photometry_table, zcol, idcol=None, infile="cigale_in.fits",
overwrite=True, **kwargs):
"""
Generates the input catalog from
a photometric catalog.
Args:
photometry_table (astropy Table):
A table from some photometric
catalog with magnitudes and
error measurements. Currently supports
DES, DECaLS, SDSS, Pan-STARRS and WISE
The naming convention follows those specified in frb.galaxies.defs
with the exception of WISE which use WISE-1, etc. although the code
also handles WISE-W1, etc.
zcol (str):
Name of the column with redshift estimates
idcol (str, optional):
Name of the column with object IDs. By default,
the code looks for the first column with "ID" in
its name. If that's not present, it creates a
column with row numbers for IDs.
infile (str, optional):
Output name + path for the CIGALE input file generated
overwrite (bool, optional):
If true, overwrites file if it already exists
kwargs: only here to catch extras
"""
#Table must have a column with redshift estimates
if not isinstance(zcol, str):
raise IOError("zcol must be a column name. i.e. a string")
assert zcol in photometry_table.colnames, "{} not found in the table. Please check".format(zcol)
magcols, mag_errcols = _detect_mag_cols(photometry_table)
cigtab = photometry_table.copy()
cigtab.rename_column(zcol,"redshift")
photom_cols = magcols+mag_errcols
# Rename any column with "ID" in it to "id"
if idcol is None:
try:
idcol = [col for col in cigtab.colnames if "ID" in col.upper()][0]
except IndexError:
print("No column with 'ID' in name. Adding a column.")
idcol = 'id'
cigtab[idcol] = np.arange(len(cigtab))+1
cigtab.rename_column(idcol,"id")
#First round of renaming
cigtab = convert_mags_to_flux(cigtab)
cigtab = cigtab[['id','redshift']+photom_cols]
# Rename our filters to CIGALE names, as needed
new_names = {
'SDSS_u': 'sdss.up',
'SDSS_g': 'sdss.gp',
'SDSS_r': 'sdss.rp',
'SDSS_i': 'sdss.ip',
'SDSS_z': 'sdss.zp',
'VLT_u': 'VLT_FORS2_u',
'VLT_g': 'VLT_FORS2_g',
'VLT_I': 'VLT_FORS2_I',
'VLT_z': 'VLT_FORS2_z',
'WISE_W1': 'WISE1',
'WISE_W2': 'WISE2',
'WISE_W3': 'WISE3',
'WISE_W4': 'WISE4',
'VISTA_Y': 'vista.vircam.Y',
'VISTA_J': 'vista.vircam.J',
'VISTA_H': 'vista.vircam.H',
'VISTA_Ks': 'vista.vircam.Ks',
'LRISr_I': 'LRIS_I',
'LRISb_V': 'LRIS_V',
'WFC3_F160W': 'hst.wfc3.F160W',
'WFC3_F300X': 'WFC3_F300X',
'Spitzer_3.6': 'spitzer.irac.ch1',
'Spitzer_4.5': 'spitzer.irac.ch2',
}
for key in new_names:
if key in photom_cols:
cigtab.rename_column(key, new_names[key])
# Try Error
if key+'_err' in photom_cols:
cigtab.rename_column(key+'_err', new_names[key]+'_err')
cigtab.write(infile,overwrite=overwrite)
return
def _initialise(data_file, config_file="pcigale.ini",
cores=None, save_sed=False, variables="", sed_modules=_DEFAULT_SED_MODULES,
sed_modules_params=None, **kwargs):
"""
Initialise a CIGALE configuration file and write to disk.
Args:
data_file (str):
Path to the input photometry data file.
config_file (str, optional):
Path to the file where CIGALE's configuration
is stored.
cores (int, optional):
Number of CPU cores to be used. Defaults
to all cores on the system.
save_sed (bool, optional):
Save the best fit SEDs to disk for each galaxy.
variables (str or list, optional):
A single galaxy property name to save to results
or a list of variable names. Names must belong
to the list defined in the CIGALE documentation.
sed_modules (list or tuple, optional):
A list of SED modules to be used in the
PDF analysis. If this is being input, there
should be a corresponding correct dict
for sed_modules_params.
sed_module_params (dict, optional):
A dict containing parameter values for
the input SED modules. Better not use this
unless you know exactly what you're doing.
kwargs: only here to catch extras
Returns:
cigconf (pcigale.session.configuration.Configuration):
CIGALE Configuration object
"""
# Check
if sed_modules !=_DEFAULT_SED_MODULES:
assert sed_modules_params is not None,\
"If you're not using the default modules, you'll have to input SED parameters"
# Init
cigconf = Configuration(config_file) #a set of dicts, mostly
cigconf.create_blank_conf() #Initialises a pcigale.ini file
# fill in initial values
cigconf.pcigaleini_exists = True
cigconf.config['data_file'] = data_file
cigconf.config['param_file'] = ""
cigconf.config['sed_modules'] = sed_modules
cigconf.config['analysis_method'] = 'pdf_analysis'
if cores is None:
cores = multiprocessing.cpu_count() #Use all cores
cigconf.config['cores'] = cores
cigconf.generate_conf() #Writes defaults to config_file
cigconf.config['analysis_params']['variables'] = variables
cigconf.config['analysis_params']['save_best_sed'] = save_sed
cigconf.config['analysis_params']['lim_flag'] = True
# Change the default values to new defaults:
if sed_modules_params is None:
sed_modules_params = {}
for module in sed_modules:
sed_modules_params[module] = _sed_default_params(module)
cigconf.config['sed_modules_params'] = sed_modules_params
# Overwrites the config file
cigconf.config.write()
# Return
return cigconf
def run(photometry_table, zcol, data_file="cigale_in.fits", config_file="pcigale.ini",
wait_for_input=False, plot=True, outdir='out', compare_obs_model=False, **kwargs):
"""
Input parameters and then run CIGALE.
Args:
photometry_table (astropy Table):
A table from some photometric catalog with magnitudes and
error measurements. Currently supports
DES, DECaLS, SDSS, Pan-STARRS and WISE
zcol (str):
Name of the column with redshift estimates.
data_file (str, optional):
Root name for the photometry data file generated used as input to CIGALE
config_file (str, optional):
Root name for the file where CIGALE's configuration is generated
wait_for_input (bool, optional):
If true, waits for the user to finish editing the auto-generated config file
before running.
plot (bool, optional):
Plots the best fit SED if true
cores (int, optional):
Number of CPU cores to be used. Defaults
to all cores on the system.
outdir (str, optional):
Path to the many outputs of CIGALE
If not supplied, the outputs will appear in a folder named out/
compare_obs_model (bool, optional):
If True compare the input observed fluxes with the model fluxes
This writes a Table to outdir named 'photo_observed_model.dat'
kwargs: These are passed into gen_cigale_in() and _initialise()
save_sed (bool, optional):
Save the best fit SEDs to disk for each galaxy.
variables (str or list, optional):
A single galaxy property name to save to results
or a list of variable names. Names must belong
to the list defined in the CIGALE documentation.
sed_modules (list of 'str', optional):
A list of SED modules to be used in the
PDF analysis. If this is being input, there
should be a corresponding correct dict
for sed_modules_params.
sed_module_params (dict, optional):
A dict containing parameter values for
the input SED modules. Better not use this
unless you know exactly what you're doing.
"""
gen_cigale_in(photometry_table,zcol,infile=data_file,overwrite=True, **kwargs)
_initialise(data_file, config_file=config_file,**kwargs)
if wait_for_input:
input("Edit the generated config file {:s} and press any key to run.".format(config_file))
cigconf = Configuration(config_file)
analysis_module = get_module(cigconf.configuration['analysis_method'])
analysis_module.process(cigconf.configuration)
if plot:
try:
from pcigale_plots import sed # This modifies the backend to Agg so I hide it here
old_version = True
except ImportError:
from pcigale_plots.plot_types.sed import sed
old_version = False
if old_version:
import pcigale
#warnings.warn("You are using CIGALE version {:s}, for which support is deprecated. Please update to 2020.0 or higher.".format(pcigale.__version__))
sed(cigconf,"mJy",True)
else:
# TODO: Let the user customize the plot.
series = ['stellar_attenuated', 'stellar_unattenuated', 'dust', 'agn', 'model']
sed(cigconf,"mJy",True, (False, False), (False, False), series, "pdf", "out")
# Set back to a GUI
import matplotlib
matplotlib.use('TkAgg')
# Rename the default output directory?
if outdir != 'out':
try:
os.system("rm -rf {}".format(outdir))
os.system("mv out {:s}".format(outdir))
except:
print("Invalid output directory path. Output stored in out/")
# Move input files into outdir too
os.system("mv {:s} {:s}".format(data_file, outdir))
os.system("mv {:s} {:s}".format(config_file, outdir))
os.system("mv {:s}.spec {:s}".format(config_file, outdir))
# Compare?
if compare_obs_model:
#Generate an observation/model flux comparison table.
with Database() as base:
filters = OrderedDict([(name, base.get_filter(name))
for name in cigconf.configuration['bands']
if not (name.endswith('_err') or name.startswith('line')) ])
filters_wl = np.array([filt.pivot_wavelength
for filt in filters.values()])
mods = Table.read(outdir+'/results.fits')
try:
obs = Table.read(os.path.join(outdir, cigconf.configuration['data_file']))
except:
print("Something went wrong here. Astropy was unable to read the observations table. Please ensure it is in the fits format.")
return
for model, obj in zip(mods, obs):
photo_obs_model = Table()
photo_obs_model['lambda_filter'] = [wl/1000 for wl in filters_wl]
photo_obs_model['model_flux'] = np.array([model["best."+filt] for filt in filters.keys()])
photo_obs_model['observed_flux'] = np.array([obj[filt] for filt in filters.keys()])
photo_obs_model['observed_flux_err'] = np.array([obj[filt+'_err'] for filt in filters.keys()])
photo_obs_model.write(outdir+"/photo_observed_model_"+str(model['id'])+".dat",format="ascii",overwrite=True)
#import pdb; pdb.set_trace()
return
def host_run(host, cut_photom=None, cigale_file=None):
"""
Run CIGALE on an FRBGalaxy's photometry
and store results in a folder with the
FRBGalaxy's name.
Args
----
photom (astropy Table): Table containing
galaxy photometry. Table columns
must be in the format '<SOURCE>_<BAND>'
and '<SOURCE>_<BAND>_err'.
e.g. SDSS_u, SDSS_u_err, Pan-STARRS_g
host (FRBGalaxy): A host galaxy.
cigale_file (str, optional): Name of main
CIGALE output file. Must be in the format
`<something>_CIGALE.fits`. No file is
renamed if nothing is provided.
"""
cigale_tbl = Table()
cigale_tbl['z'] = [host.z]
cigale_tbl['ID'] = host.name
# Deal with photometry
if cut_photom is not None:
photom_obj = cut_photom
else:
photom_obj = host.photom
for key in photom_obj.keys():
cigale_tbl[key] = photom_obj[key]
# Run
run(cigale_tbl, 'z', outdir=host.name, compare_obs_model=True, idcol='ID')
# Rename/move
if cigale_file is not None:
os.system('cp -rp {:s}/results.fits {:s}'.format(host.name, cigale_file))
model_file = cigale_file.replace('CIGALE', 'CIGALE_model')
os.system('cp -rp {:s}/{:s}_best_model.fits {:s}'.format(host.name, host.name, model_file))
photo_file = cigale_file.replace('CIGALE.fits', 'CIGALE_photo.dat')
os.system('cp -rp {:s}/photo_observed_model_{:s}.dat {:s}'.format(host.name, host.name, photo_file))
# SFH
sfh_file = cigale_file.replace('CIGALE', 'CIGALE_SFH')
os.system('mv {:s}/{:s}_SFH.fits {:s}'.format(host.name, host.name, sfh_file))
return
| FRBs/FRB | frb/galaxies/cigale.py | Python | bsd-3-clause | 17,194 | [
"Galaxy"
] | c4abe595273a7a2a47fa8292bf45721c674a3ff99a23d0605ecf16398e899756 |
"""
#;+
#; NAME:
#; igmguesses
#; Version 1.0
#;
#; PURPOSE:
#; Module for LineIDs and Initial guesses in IGM spectra with QT
#; Likely only useful for lowz-IGM
#; 14-Aug-2015 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys, warnings, imp
import matplotlib.pyplot as plt
import glob
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib import mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
from astropy.units import Quantity
from astropy import units as u
from astropy.io import fits, ascii
from astropy import constants as const
from linetools.lists.linelist import LineList
from linetools.spectra.xspectrum1d import XSpectrum1D
from linetools.spectra import convolve as lsc
from linetools.spectralline import AbsLine
#from xastropy.atomic import ionization as xatomi
from xastropy.plotting import utils as xputils
from xastropy.xguis import spec_widgets as xspw
from xastropy.xguis import utils as xxgu
from xastropy.spec import voigt as xsv
from xastropy.xutils import xdebug as xdb
xa_path = imp.find_module('xastropy')[1]
#class IGMGuessesGui(QtGui.QMainWindow):
# GUI for fitting LLS in a spectrum
class IGMGuessesGui(QtGui.QMainWindow):
''' GUI to fit LLS in a given spectrum
v0.5
30-Jul-2015 by JXP
'''
def __init__(self, ispec, parent=None, lls_fit_file=None,
srch_id=True, outfil=None, fwhm=3., zqso=None,plot_residuals=True):
QtGui.QMainWindow.__init__(self, parent)
'''
spec = Spectrum1D
lls_fit_file: str, optional
Name of the LLS fit file to input
smooth: float, optional
Number of pixels to smooth on
zqso: float, optional
Redshift of the quasar. If input, a Telfer continuum is used
plot_residuals : bool, optional
Whether to plot residuals
'''
# TODO
# 1. Fix convolve window size
# 2. Avoid sorting of wavelengths
# 3. Remove astropy.relativity
# 4. Display self.z
# 5. Recenter on 'z'
# 6. Add COS LSF
# 7. Refit component key 'F'
# 8. Plot only selected components [DEPRECATED]
# 9. Avoid shifting z of component outside its velocity range
# 10. Write Component vlim to JSON
# 11. Key to set line as some other transition (e.g. RMB in XSpec)
# 12. Mask array with points
# 13. Toggle line ID names
# 14. Add error + residual arrays [NT]
# 15. Adjust component redshift by keystroke
# 16. Input redshift value via Widget
# 17. Use Component list to jump between components (like 'S')
# Build a widget combining several others
self.main_widget = QtGui.QWidget()
# Status bar
self.create_status_bar()
# Initialize
if outfil is None:
self.outfil = 'IGM_model.json'
else:
self.outfil = outfil
self.fwhm = fwhm
self.plot_residuals = plot_residuals
# Spectrum
spec, spec_fil = xxgu.read_spec(ispec)
spec.mask = np.zeros(len(spec.dispersion),dtype=int)
# Full Model
self.model = XSpectrum1D.from_tuple((
spec.dispersion,np.ones(len(spec.dispersion))))
# LineList (Grab ISM and HI as defaults)
self.llist = xxgu.set_llist('ISM')
# Load others
self.llist['HI'] = LineList('HI')
self.llist['Strong'] = LineList('Strong')
self.llist['Lists'].append('HI')
self.llist['HI']._data = self.llist['HI']._data[::-1] #invert order of Lyman series
#self.llist['show_line'] = np.arange(10) #maximum 10 to show for Lyman series
#define initial redshift
z=0.0
self.llist['z'] = z
# Grab the pieces and tie together
self.slines_widg = xspw.SelectedLinesWidget(
self.llist[self.llist['List']], parent=self,
init_select='All')
self.fiddle_widg = FiddleComponentWidget(parent=self)
self.comps_widg = ComponentListWidget([], parent=self)
self.velplot_widg = IGGVelPlotWidget(spec, z,
parent=self, llist=self.llist, fwhm=self.fwhm,plot_residuals=self.plot_residuals)
self.wq_widg = xxgu.WriteQuitWidget(parent=self)
# Setup strongest LineList
self.llist['strongest'] = LineList('ISM')
self.llist['Lists'].append('strongest')
self.update_strongest_lines()
self.slines_widg.selected = self.llist['show_line']
self.slines_widg.on_list_change(
self.llist[self.llist['List']])
# Connections (buttons are above)
#self.spec_widg.canvas.mpl_connect('key_press_event', self.on_key)
#self.abssys_widg.abslist_widget.itemSelectionChanged.connect(
# self.on_list_change)
# Layout
anly_widg = QtGui.QWidget()
anly_widg.setMaximumWidth(400)
anly_widg.setMinimumWidth(250)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.fiddle_widg)
vbox.addWidget(self.comps_widg)
vbox.addWidget(self.slines_widg)
vbox.addWidget(self.wq_widg)
anly_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.velplot_widg)
hbox.addWidget(anly_widg)
self.main_widget.setLayout(hbox)
# Point MainWindow
self.setCentralWidget(self.main_widget)
def update_strongest_lines(self):
'''Grab the strongest lines in the spectrum at the current
redshift.
'''
z = self.velplot_widg.z
wvmin = np.min(self.velplot_widg.spec.dispersion)
wvmax = np.max(self.velplot_widg.spec.dispersion)
wvlims = (wvmin/(1+z),wvmax/(1+z))
transitions = self.llist['ISM'].available_transitions(
wvlims,n_max=100, n_max_tuple=4,min_strength=5.)
if transitions is not None:
names = list(np.array(transitions['name']))
else:
names = ['HI 1215']
self.llist['strongest'].subset_lines(reset_data=True,subset=names)
self.llist['show_line'] = np.arange(len(self.llist['strongest']._data))
self.llist['List'] = 'strongest'
# self.llist['strongest'] = self.llist['ISM'].subset(names)
def on_list_change(self):
self.update_boxes()
def create_status_bar(self):
self.status_text = QtGui.QLabel("IGMGuessesGui")
self.statusBar().addWidget(self.status_text, 1)
def delete_component(self, component):
'''Remove component'''
# Component list
self.comps_widg.remove_item(component.name)
# Fiddle query (need to come back to it)
if component is self.fiddle_widg.component:
self.fiddle_widg.reset()
# Mask
for line in component.lines:
wvmnx = line.wrest*(1+component.zcomp)*(1 + component.vlim.value/3e5)
gdp = np.where((self.velplot_widg.spec.dispersion>wvmnx[0])&
(self.velplot_widg.spec.dispersion<wvmnx[1]))[0]
self.velplot_widg.spec.mask[gdp] = 0
# Delete
del component
# Update
self.velplot_widg.update_model()
self.velplot_widg.on_draw(fig_clear=True)
def updated_slines(self, selected):
self.llist['show_line'] = selected
self.velplot_widg.on_draw(fig_clear=True)
def updated_component(self):
'''Component attrib was updated. Deal with it'''
self.fiddle_widg.component.sync_lines()
self.velplot_widg.update_model()
self.velplot_widg.on_draw(fig_clear=True)
def updated_compslist(self,component):
'''Component list was updated'''
if component is None:
self.fiddle_widg.reset()
else:
self.fiddle_widg.init_component(component)
#self.velplot_widg.update_model()
#self.velplot_widg.on_draw(fig_clear=True)
def write_out(self):
import json, io
# Create dict of the components
out_dict = dict(cmps={},
spec_file=self.velplot_widg.spec.filename,
fwhm=self.fwhm)
mskp = np.where(self.velplot_widg.spec.mask == 1)[0]
if len(mskp) > 0:
out_dict['mask'] = list(mskp)
# Load
for kk,comp in enumerate(self.comps_widg.all_comp):
key = comp.name
out_dict['cmps'][key] = {}
out_dict['cmps'][key]['z'] = comp.attrib['z']
out_dict['cmps'][key]['NHI'] = comp.attrib['N']
out_dict['cmps'][key]['bval'] = comp.attrib['b'].value
out_dict['cmps'][key]['vlim'] = list(comp.vlim.value)
out_dict['cmps'][key]['Quality'] = str(comp.attrib['Quality'])
out_dict['cmps'][key]['comment'] = str(comp.comment)
# Write
print('Wrote: {:s}'.format(self.outfil))
with io.open(self.outfil, 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(out_dict, sort_keys=True, indent=4,
separators=(',', ': '))))
# Write + Quit
def write_quit(self):
self.write_out()
self.quit()
# Quit
def quit(self):
self.close()
######################
class IGGVelPlotWidget(QtGui.QWidget):
''' Widget for a velocity plot with interaction.
Adapted from VelPlotWidget in spec_guis
14-Aug-2015 by JXP
'''
def __init__(self, ispec, z, parent=None, llist=None, norm=True,
vmnx=[-500., 500.]*u.km/u.s, fwhm=0.,plot_residuals=True):
'''
spec = Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
'''
super(IGGVelPlotWidget, self).__init__(parent)
# Initialize
self.parent = parent
spec, spec_fil = xxgu.read_spec(ispec)
self.spec = spec
self.spec_fil = spec_fil
self.fwhm = fwhm
self.z = z
self.vmnx = vmnx
self.norm = norm
# Init
self.flag_add = False
self.flag_idlbl = False
self.flag_mask = False
self.wrest = 0.
self.avmnx = np.array([0.,0.])*u.km/u.s
self.model = XSpectrum1D.from_tuple((
spec.dispersion,np.ones(len(spec.dispersion))))
self.plot_residuals = plot_residuals
#Define arrays for plotting residuals
if self.plot_residuals:
self.residual_normalization_factor = 0.02/np.median(self.spec.sig)
self.residual_limit = self.spec.sig * self.residual_normalization_factor
self.residual = (self.spec.flux - self.model.flux) * self.residual_normalization_factor
self.psdict = {} # Dict for spectra plotting
self.psdict['xmnx'] = self.vmnx.value # Too much pain to use units with this
self.psdict['ymnx'] = [-0.1, 1.1]
self.psdict['nav'] = xxgu.navigate(0,0,init=True)
# Status Bar?
#if not status is None:
# self.statusBar = status
# Line List
if llist is None:
self.llist = xxgu.set_llist(['HI 1215', 'HI 1025'])
else:
self.llist = llist
self.llist['z'] = self.z
# Indexing for line plotting
self.idx_line = 0
self.init_lines()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Sub_plots
self.sub_xy = [5,3]
self.subxy_state = 'Out'
self.fig.subplots_adjust(hspace=0.0, wspace=0.1,left=0.04,right=0.975)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Load them up for display
def init_lines(self):
wvmin = np.min(self.spec.dispersion)
wvmax = np.max(self.spec.dispersion)
#
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
wrest = self.llist[self.llist['List']].wrest
wvobs = (1+self.z) * wrest
gdlin = np.where( (wvobs > wvmin) & (wvobs < wvmax) )[0]
self.llist['show_line'] = gdlin
# Update GUI
self.parent.slines_widg.selected = self.llist['show_line']
self.parent.slines_widg.on_list_change(
self.llist[self.llist['List']])
# Add a component
def update_model(self):
if self.parent is None:
return
all_comp = self.parent.comps_widg.all_comp #selected_components()
if len(all_comp) == 0:
self.model.flux[:] = 1.
return
# Setup lines
wvmin = np.min(self.spec.dispersion)
wvmax = np.max(self.spec.dispersion)
gdlin = []
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
for comp in all_comp:
for line in comp.lines:
wvobs = (1+line.attrib['z'])*line.wrest
if (wvobs>wvmin) & (wvobs<wvmax):
gdlin.append(line)
# Voigt
self.model = xsv.voigt_model(self.spec.dispersion,gdlin,
fwhm=self.fwhm)#,debug=True)
#Define arrays for plotting residuals
if self.plot_residuals:
self.residual_limit = self.spec.sig * self.residual_normalization_factor
self.residual = (self.spec.flux - self.model.flux) * self.residual_normalization_factor
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Add a component
def add_component(self,wrest):
'''Generate a component and fit with Gaussian'''
#
# Center z and reset vmin/vmax
zmin,zmax = self.z + (1+self.z)*(self.avmnx.value/3e5)
vlim = self.avmnx - (self.avmnx[1]+self.avmnx[0])/2.
new_comp = Component((zmin+zmax)/2.,wrest,vlim=vlim,
linelist=self.llist['ISM'])
# Fit
#print('doing fit for {:g}'.format(wrest))
self.fit_component(new_comp)
# Mask for analysis
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
for line in new_comp.lines:
#print('masking {:g}'.format(line.wrest))
wvmnx = line.wrest*(1+new_comp.zcomp)*(1 + vlim.value/3e5)
gdp = np.where((self.spec.dispersion>wvmnx[0])&
(self.spec.dispersion<wvmnx[1]))[0]
if len(gdp) > 0:
self.spec.mask[gdp] = 1
# Add to component list and Fiddle
if self.parent is not None:
self.parent.comps_widg.add_component(new_comp)
self.parent.fiddle_widg.init_component(new_comp)
# Update model
self.current_comp = new_comp
self.update_model()
# Update model and plot
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
#,weights=1./(np.ones(len(wave))*0.1))
def fit_component(self,component):
'''Fit the component and save values'''
from astropy.modeling import fitting
# Generate Fit line
fit_line = AbsLine(component.init_wrest,
linelist=self.llist[self.llist['List']])
fit_line.analy['vlim'] = component.vlim
fit_line.analy['spec'] = self.spec
fit_line.attrib['z'] = component.zcomp
fit_line.measure_aodm()
# Guesses
fmin = np.argmin(self.spec.flux[fit_line.analy['pix']])
zguess = self.spec.dispersion[fit_line.analy['pix'][fmin]]/component.init_wrest - 1.
bguess = (component.vlim[1]-component.vlim[0])/2.
Nguess = fit_line.attrib['logN']
# Voigt model
fitvoigt = xsv.single_voigt_model(logN=Nguess,b=bguess.value,
z=zguess, wrest=component.init_wrest.value,
gamma=fit_line.data['gamma'].value,
f=fit_line.data['f'], fwhm=self.fwhm)
# Restrict z range
fitvoigt.logN.min = 10.
fitvoigt.b.min = 1.
fitvoigt.z.min = component.zcomp+component.vlim[0].value/3e5/(1+component.zcomp)
fitvoigt.z.max = component.zcomp+component.vlim[1].value/3e5/(1+component.zcomp)
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Fit
fitter = fitting.LevMarLSQFitter()
parm = fitter(fitvoigt,self.spec.dispersion[fit_line.analy['pix']],
self.spec.flux[fit_line.analy['pix']].value)
# Save and sync
component.attrib['N'] = parm.logN.value
component.attrib['z'] = parm.z.value
component.attrib['b'] = parm.b.value * u.km/u.s
component.sync_lines()
def out_of_bounds(self,coord):
'''Check for out of bounds
'''
# Default is x
if ((coord < np.min(self.spec.dispersion))
or (coord > np.max(self.spec.dispersion))):
print('Out of bounds!')
return True
else:
return False
# Key stroke
def on_key(self,event):
# Init
rescale = True
fig_clear = False
wrest = None
flg = 1
sv_idx = self.idx_line
## Change rows/columns
if event.key == 'k':
self.sub_xy[0] = max(0, self.sub_xy[0]-1)
if event.key == 'K':
self.sub_xy[0] = self.sub_xy[0]+1
if event.key == 'c':
self.sub_xy[1] = max(1, self.sub_xy[1]-1)
if event.key == 'C':
self.sub_xy[1] = max(1, self.sub_xy[1]+1)
if event.key == '(':
if self.subxy_state == 'Out':
self.sub_xy = [3,2]
self.subxy_state = 'In'
else:
self.sub_xy = [5,3]
self.subxy_state = 'Out'
## NAVIGATING
if event.key in self.psdict['nav']:
flg = xxgu.navigate(self.psdict,event)
if event.key == '-':
self.idx_line = max(0, self.idx_line-self.sub_xy[0]*self.sub_xy[1]) # Min=0
if self.idx_line == sv_idx:
print('Edge of list')
if event.key == '=':
self.idx_line = min(len(self.llist['show_line'])-self.sub_xy[0]*self.sub_xy[1],
self.idx_line + self.sub_xy[0]*self.sub_xy[1])
if self.idx_line == sv_idx:
print('Edge of list')
# Find line
try:
wrest = event.inaxes.get_gid()
except AttributeError:
return
else:
wvobs = wrest*(1+self.z)
pass
## Fiddle with a Component
if event.key in ['N','n','v','V','<','>','R']:
if self.parent.fiddle_widg.component is None:
print('Need to generate a component first!')
return
if event.key == 'N':
self.parent.fiddle_widg.component.attrib['N'] += 0.05
elif event.key == 'n':
self.parent.fiddle_widg.component.attrib['N'] -= 0.05
elif event.key == 'v':
self.parent.fiddle_widg.component.attrib['b'] -= 2*u.km/u.s
elif event.key == 'V':
self.parent.fiddle_widg.component.attrib['b'] += 2*u.km/u.s
elif event.key == '<':
self.parent.fiddle_widg.component.attrib['z'] -= 2e-5 #should be a fraction of pixel size
elif event.key == '>':
self.parent.fiddle_widg.component.attrib['z'] += 2e-5
elif event.key == 'R': # Refit
self.fit_component(self.parent.fiddle_widg.component)
# Updates (this captures them all and redraws)
self.parent.fiddle_widg.update_component()
## Grab/Delete a component
if event.key in ['D','S','d']:
components = self.parent.comps_widg.all_comp
iwrest = np.array([comp.init_wrest.value for comp in components])*u.AA
mtc = np.where(wrest == iwrest)[0]
if len(mtc) == 0:
return
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
dvz = np.array([3e5*(self.z- components[mt].zcomp)/(1+self.z) for mt in mtc])
# Find minimum
mindvz = np.argmin(np.abs(dvz+event.xdata))
if event.key == 'S':
self.parent.fiddle_widg.init_component(components[mtc[mindvz]])
elif event.key == 'd': # Delete selected component
self.parent.delete_component(self.parent.fiddle_widg.component)
elif event.key == 'D': # Delete nearest component to cursor
self.parent.delete_component(components[mtc[mindvz]])
#absline = self.abs_sys.grab_line((self.z,wrest))
#kwrest = wrest.value
## Reset z
if event.key == ' ': #space to move redshift
#from xastropy.relativity import velocities
#newz = velocities.z_from_v(self.z, event.xdata)
self.z = self.z + event.xdata*(1+self.z)/3e5
#self.abs_sys.zabs = newz
# Drawing
self.psdict['xmnx'] = self.vmnx.value
if event.key == '^':
zgui = xxgu.AnsBox('Enter redshift:',float)
zgui.exec_()
self.z = zgui.value
self.psdict['xmnx'] = self.vmnx.value
# Choose line
if event.key == "%":
# GUI
self.select_line_widg = xspw.SelectLineWidget(
self.llist[self.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
if line.strip() == 'None':
return
#
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
#
self.z = (wvobs/wrest - 1.).value
#self.statusBar().showMessage('z = {:f}'.format(z))
self.init_lines()
# Toggle line lists
if event.key == 'H':
self.llist['List'] = 'HI'
self.init_lines()
if event.key == 'U':
self.parent.update_strongest_lines()
self.init_lines()
## Velocity limits
unit = u.km/u.s
if event.key in ['1','2']:
if event.key == '1':
self.vmin = event.xdata*unit
if event.key == '2':
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
absline.analy['vlim'][1] = event.xdata*unit
self.update_component()
## Add component
if event.key == 'A': # Add to lines
if self.out_of_bounds(wvobs*(1+event.xdata/3e5)):
return
if self.flag_add is False:
self.vtmp = event.xdata
self.flag_add = True
self.wrest = wrest
else:
self.avmnx = np.array([np.minimum(self.vtmp,event.xdata),
np.maximum(self.vtmp,event.xdata)])*unit
self.add_component(wrest)
# Reset
self.flag_add = False
self.wrest = 0.
# Fiddle with analysis mask
if event.key in ['x','X']:
# x = Delete mask
# X = Add to mask
if self.flag_mask is False:
self.wrest = wrest
self.wtmp = wvobs*(1+event.xdata/3e5)
self.vtmp = event.xdata
self.flag_mask = True
else:
wtmp2 = wvobs*(1+event.xdata/3e5)
twvmnx = [np.minimum(self.wtmp,wtmp2), np.maximum(self.wtmp,wtmp2)]
# Modify mask
mskp = np.where((self.spec.dispersion>twvmnx[0])&
(self.spec.dispersion<twvmnx[1]))[0]
#print(twvmnx,len(mskp))
if event.key == 'x':
self.spec.mask[mskp] = 0
elif event.key == 'X':
self.spec.mask[mskp] = 1
# Reset
self.flag_mask = False
self.wrest = 0.
# Labels
if event.key == 'L': # Toggle ID lines
self.flag_idlbl = ~self.flag_idlbl
# AODM plot
if event.key == ':': #
# Grab good lines
from xastropy.xguis import spec_guis as xsgui
gdl = [iline.wrest for iline in self.abs_sys.lines
if iline.analy['do_analysis'] > 0]
# Launch AODM
if len(gdl) > 0:
gui = xsgui.XAODMGui(self.spec, self.z, gdl, vmnx=self.vmnx, norm=self.norm)
gui.exec_()
else:
print('VelPlot.AODM: No good lines to plot')
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
#if wrest is not None: # Single window
# flg = 3
if event.key in ['c','C','k','K','W','!', '@', '=', '-', 'X', ' ','R']: # Redraw all
flg = 1
if event.key in ['Y']:
rescale = False
if event.key in ['c','C','k','K', 'R', '(']:
fig_clear = True
if flg==1: # Default is to redraw
self.on_draw(rescale=rescale, fig_clear=fig_clear)
elif flg==2: # Layer (no clear)
self.on_draw(replot=False, rescale=rescale)
elif flg==3: # Layer (no clear)
self.on_draw(in_wrest=wrest, rescale=rescale)
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, replot=True, in_wrest=None, rescale=True, fig_clear=False):
""" Redraws the figure
"""
#
if replot is True:
if fig_clear:
self.fig.clf()
# Title
self.fig.suptitle('z={:.5f}'.format(self.z),fontsize='large')
# Components
components = self.parent.comps_widg.all_comp
iwrest = np.array([comp.init_wrest.value for comp in components])*u.AA
# Loop on windows
all_idx = self.llist['show_line']
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Labels
if self.flag_idlbl:
line_wvobs = []
line_lbl = []
for comp in components:
if comp.attrib['Quality'] == 'None':
la = ''
else:
la = comp.attrib['Quality']
for line in comp.lines:
line_wvobs.append(line.wrest.value*(line.attrib['z']+1))
line_lbl.append(line.trans+',{:.3f}{:s}'.format(line.attrib['z'],la))
line_wvobs = np.array(line_wvobs)*u.AA
line_lbl = np.array(line_lbl)
# Subplots
nplt = self.sub_xy[0]*self.sub_xy[1]
if len(all_idx) <= nplt:
self.idx_line = 0
subp = np.arange(nplt) + 1
subp_idx = np.hstack(subp.reshape(self.sub_xy[0],self.sub_xy[1]).T)
#print('idx_l={:d}, nplt={:d}, lall={:d}'.format(self.idx_line,nplt,len(all_idx)))
#try different color per ion species
color_model = '#999966'
colors = ['#0066FF','#339933','#CC3300','#660066','#FF9900','#B20047']
color_ind = 0
#loop over individual velplot axes
for jj in range(min(nplt, len(all_idx))):
try:
idx = all_idx[jj+self.idx_line]
except IndexError:
continue # Likely too few lines
#print('jj={:d}, idx={:d}'.format(jj,idx))
# Grab line
wrest = self.llist[self.llist['List']].wrest[idx]
kwrest = wrest.value # For the Dict
#define colors for visually grouping same species
if jj > 0:
name_aux = self.llist[self.llist['List']].name[idx].split(' ')[0]
name_aux2 = self.llist[self.llist['List']].name[idx-1].split(' ')[0]
if name_aux != name_aux2:
color_ind += 1
color = colors[color_ind % len(colors)]
# Single window?
#if in_wrest is not None:
# if np.abs(wrest-in_wrest) > (1e-3*u.AA):
# continue
# Generate plot
self.ax = self.fig.add_subplot(self.sub_xy[0],self.sub_xy[1], subp_idx[jj])
self.ax.clear()
# GID for referencing
self.ax.set_gid(wrest)
# Zero velocity line
self.ax.plot( [0., 0.], [-1e9, 1e9], ':', color='gray')
# Velocity
wvobs = (1+self.z) * wrest
wvmnx = wvobs*(1 + np.array(self.psdict['xmnx'])/3e5)
velo = (self.spec.dispersion/wvobs - 1.)*const.c.to('km/s')
# Plot
self.ax.plot(velo, self.spec.flux, '-',color=color,drawstyle='steps-mid',lw=0.5)
# Model
self.ax.plot(velo, self.model.flux, '-',color=color_model,lw=0.5)
#Error & residuals
if self.plot_residuals:
self.ax.plot(velo, self.residual_limit, 'k-',drawstyle='steps-mid',lw=0.5)
self.ax.plot(velo, -self.residual_limit, 'k-',drawstyle='steps-mid',lw=0.5)
self.ax.plot(velo, self.residual, '.',color='grey',ms=2)
#import pdb
#pdb.set_trace()
# Labels
if (((jj+1) % self.sub_xy[0]) == 0) or ((jj+1) == len(all_idx)):
self.ax.set_xlabel('Relative Velocity (km/s)')
else:
self.ax.get_xaxis().set_ticks([])
lbl = self.llist[self.llist['List']].name[idx]
self.ax.text(0.01, 0.15, lbl, color=color, transform=self.ax.transAxes,
size='x-small', ha='left',va='center',backgroundcolor='w',bbox={'pad':0,'edgecolor':'none'})
if self.flag_idlbl:
# Any lines inside?
mtw = np.where((line_wvobs > wvmnx[0]) & (line_wvobs<wvmnx[1]))[0]
for imt in mtw:
v = 3e5*(line_wvobs[imt]/wvobs - 1)
self.ax.text(v, 0.5, line_lbl[imt], color=color_model,backgroundcolor='w',
bbox={'pad':0,'edgecolor':'none'}, size='xx-small', rotation=90.,ha='center',va='center')
# Analysis regions
if np.sum(self.spec.mask) > 0.:
gdp = self.spec.mask==1
if len(gdp) > 0:
self.ax.scatter(velo[gdp],self.spec.flux[gdp],
marker='o',color=color,s=3.,alpha=0.5)
# Reset window limits
self.ax.set_ylim(self.psdict['ymnx'])
self.ax.set_xlim(self.psdict['xmnx'])
# Add line?
if self.wrest == wrest:
self.ax.plot([self.vtmp]*2,self.psdict['ymnx'], '--',
color='red')
# Components
mtc = np.where(wrest == iwrest)[0]
if len(mtc) > 0:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
for mt in mtc:
comp = components[mt]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
dvz = const.c.to('km/s')*(self.z-comp.zcomp)/(1+self.z)
if dvz.value < np.max(np.abs(self.psdict['xmnx'])):
if comp is self.parent.fiddle_widg.component:
lw = 1.5
else:
lw = 1.
# Plot
for vlim in comp.vlim:
self.ax.plot([vlim.value-dvz.value]*2,self.psdict['ymnx'],
'--', color='r',linewidth=lw)
self.ax.plot([-1.*dvz.value]*2,[1.0,1.05],
'-', color='grey',linewidth=lw)
# Fonts
xputils.set_fontsize(self.ax,6.)
# Draw
self.canvas.draw()
############
class FiddleComponentWidget(QtGui.QWidget):
''' Widget to fiddle with a given component
'''
def __init__(self, component=None, parent=None):
'''
'''
super(FiddleComponentWidget, self).__init__(parent)
self.parent = parent
#if not status is None:
# self.statusBar = status
self.label = QtGui.QLabel('Component:',self)
self.zwidget = xxgu.EditBox(-1., 'zc=', '{:0.5f}')
self.Nwidget = xxgu.EditBox(-1., 'Nc=', '{:0.2f}')
self.bwidget = xxgu.EditBox(-1., 'bc=', '{:0.1f}')
self.ddlbl = QtGui.QLabel('Quality')
self.ddlist = QtGui.QComboBox(self)
self.ddlist.addItem('None')
self.ddlist.addItem('a')
self.ddlist.addItem('b')
self.ddlist.addItem('c')
self.Cwidget = xxgu.EditBox('None', 'Comment=', '{:s}')
# Init further
if component is not None:
self.init_component(component)
else:
self.component = component
# Connect
self.ddlist.activated[str].connect(self.setQuality)
self.connect(self.Nwidget.box,
QtCore.SIGNAL('editingFinished ()'), self.setbzN)
self.connect(self.zwidget.box,
QtCore.SIGNAL('editingFinished ()'), self.setbzN)
self.connect(self.bwidget.box,
QtCore.SIGNAL('editingFinished ()'), self.setbzN)
self.connect(self.Cwidget.box,
QtCore.SIGNAL('editingFinished ()'), self.setbzN)
# Layout
zNbwidg = QtGui.QWidget()
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget(self.zwidget)
hbox2.addWidget(self.Nwidget)
hbox2.addWidget(self.bwidget)
zNbwidg.setLayout(hbox2)
ddwidg = QtGui.QWidget()
vbox1 = QtGui.QVBoxLayout()
vbox1.addWidget(self.ddlbl)
vbox1.addWidget(self.ddlist)
ddwidg.setLayout(vbox1)
commwidg = QtGui.QWidget()
hbox3 = QtGui.QHBoxLayout()
hbox3.addWidget(ddwidg)
hbox3.addWidget(self.Cwidget)
commwidg.setLayout(hbox3)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.label)
vbox.addWidget(zNbwidg)
vbox.addWidget(commwidg)
self.setLayout(vbox)
def init_component(self,component):
'''Setup Widget for the input component'''
self.component = component
# Values
self.Nwidget.set_text(self.component.attrib['N'])
self.zwidget.set_text(self.component.attrib['z'])
self.bwidget.set_text(self.component.attrib['b'].value)
self.Cwidget.set_text(self.component.comment)
# Quality
idx = self.ddlist.findText(self.component.attrib['Quality'])
self.ddlist.setCurrentIndex(idx)
# Label
self.set_label()
def setQuality(self,text):
if self.component is not None:
self.component.attrib['Quality'] = text
def reset(self):
#
self.component = None
# Values
self.Nwidget.set_text(-1.)
self.zwidget.set_text(-1.)
self.bwidget.set_text(-1.)
self.Cwidget.set_text('None')
idx = self.ddlist.findText('None')
self.ddlist.setCurrentIndex(idx)
# Label
self.set_label()
def update_component(self):
'''Values have changed'''
self.Nwidget.set_text(self.component.attrib['N'])
self.zwidget.set_text(self.component.attrib['z'])
self.bwidget.set_text(self.component.attrib['b'].value)
self.Cwidget.set_text(self.component.comment)
if self.parent is not None:
self.parent.updated_component()
def set_label(self):
'''Sets the label for the Widget'''
if self.component is not None:
self.label.setText('Component: {:s}'.format(self.component.name))
else:
self.label.setText('Component:')
def setbzN(self):
'''Set the component column density or redshift from the boxes'''
if self.component is None:
print('Need to generate a component first!')
else:
# Grab values
self.component.attrib['N'] = (float(self.Nwidget.box.text()))
self.component.attrib['z'] = (float(self.zwidget.box.text()))
self.component.attrib['b'] = (float(self.bwidget.box.text()))*u.km/u.s
self.component.comment = str(self.Cwidget.box.text())
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Update beyond
if self.parent is not None:
self.parent.updated_component()
# #####
class ComponentListWidget(QtGui.QWidget):
''' Widget to organize components on a sightline
Parameters:
-----------
components: List
List of components
16-Dec-2014 by JXP
'''
def __init__(self, components, parent=None, no_buttons=False):
'''
only_one: bool, optional
Restrict to one selection at a time? [False]
no_buttons: bool, optional
Eliminate Refine/Reload buttons?
'''
super(ComponentListWidget, self).__init__(parent)
self.parent = parent
#if not status is None:
# self.statusBar = status
self.all_comp = components # Actual components
list_label = QtGui.QLabel('Components:')
self.complist_widget = QtGui.QListWidget(self)
#self.complist_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.complist_widget.addItem('None')
#self.abslist_widget.addItem('Test')
# Lists
self.items = [] # Selected
self.all_items = [] # Names
self.complist_widget.setCurrentRow(0)
self.complist_widget.itemSelectionChanged.connect(self.on_list_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(list_label)
vbox.addWidget(self.complist_widget)
self.setLayout(vbox)
# ##
def on_list_change(self):
'''
Changed an item in the list
'''
item = self.complist_widget.selectedItems()
txt = item[0].text()
if txt == 'None':
if self.parent is not None:
self.parent.updated_compslist(None)
else:
ii = self.all_items.index(txt)
if self.parent is not None:
self.parent.updated_compslist(self.all_comp[ii])
'''
items = self.complist_widget.selectedItems()
# Empty the list
#self.abs_sys = []
if len(self.abs_sys) > 0:
for ii in range(len(self.abs_sys)-1,-1,-1):
self.abs_sys.pop(ii)
# Load up abs_sys (as need be)
new_items = []
for item in items:
txt = item.text()
# Dummy
if txt == 'None':
continue
print('Including {:s} in the list'.format(txt))
# Using LLS for now. Might change to generic
new_items.append(txt)
ii = self.all_items.index(txt)
self.abs_sys.append(self.all_abssys[ii])
# Pass back
self.items = new_items
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
'''
'''
def selected_components(self):
items = self.complist_widget.selectedItems()
selc = []
for item in items:
txt = item.text()
if txt == 'None':
continue
ii = self.all_items.index(txt)
selc.append(self.all_comp[ii])
# Return
return selc
'''
def add_component(self,component):
self.all_comp.append( component )
self.add_item(component.name)
def add_item(self,comp_name):
#
self.all_items.append(comp_name)
self.complist_widget.addItem(comp_name)
self.complist_widget.item(len(self.all_items)).setSelected(True)
def remove_item(self,comp_name):
# Delete
idx = self.all_items.index(comp_name)
del self.all_items[idx]
self.all_comp.pop(idx)
tmp = self.complist_widget.takeItem(idx+1) # 1 for None
#self.on_list_change()
class Component(object):
def __init__(self, z, wrest, vlim=[-300.,300]*u.km/u.s,
linelist=None):
# Init
self.init_wrest = wrest
self.zcomp = z
self.vlim = vlim
self.attrib = {'N': 0., 'Nsig': 0., 'flagN': 0, # Column
'b': 0.*u.km/u.s, 'bsig': 0.*u.km/u.s, # Doppler
'z': self.zcomp, 'zsig': 0.,
'Quality': 'None'}
self.comment = 'None'
#
self.linelist = linelist
self.lines = []
self.init_lines()
#
self.name = 'z{:.5f}_{:s}'.format(
self.zcomp,self.lines[0].data['name'].split(' ')[0])
#
def init_lines(self):
'''Fill up the component lines
'''
if self.linelist is None:
self.linelist = LineList('Strong')
# Get the lines
all_trans = self.linelist.all_transitions(self.init_wrest)
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if isinstance(all_trans,dict):
all_trans = [all_trans]
for trans in all_trans:
self.lines.append(AbsLine(trans['wrest'],
linelist=self.linelist))
# Sync
self.sync_lines()
def sync_lines(self):
'''Synchronize attributes of the lines
'''
for line in self.lines:
line.attrib['N'] = self.attrib['N']
line.attrib['b'] = self.attrib['b']
line.attrib['z'] = self.attrib['z']
# Script to run XSpec from the command line or ipython
def run_gui(*args, **kwargs):
'''
Runs the IGMGuessesGui
Command line or from Python
Examples:
1. python ~/xastropy/xastropy/xguis/spec_guis.py 1
2. spec_guis.run_fitlls(filename)
3. spec_guis.run_fitlls(spec1d)
'''
import argparse
from specutils import Spectrum1D
parser = argparse.ArgumentParser(description='Parser for XFitLLSGUI')
parser.add_argument("flag", type=int, help="GUI flag (ignored)")
parser.add_argument("in_file", type=str, help="Spectral file")
parser.add_argument("-out_file", type=str, help="Output LLS Fit file")
parser.add_argument("-smooth", type=float, help="Smoothing (pixels)")
parser.add_argument("-lls_fit_file", type=str, help="Input LLS Fit file")
parser.add_argument("-zqso", type=float, help="Use Telfer template with zqso")
if len(args) == 0:
pargs = parser.parse_args()
else: # better know what you are doing!
if isinstance(args[0],(Spectrum1D,tuple)):
app = QtGui.QApplication(sys.argv)
gui = XFitLLSGUI(args[0], **kwargs)
gui.show()
app.exec_()
return
else: # String parsing
largs = ['1'] + [iargs for iargs in args]
pargs = parser.parse_args(largs)
# Output file
try:
outfil = pargs.out_file
except AttributeError:
outfil=None
# Input LLS file
try:
lls_fit_file = pargs.lls_fit_file
except AttributeError:
lls_fit_file=None
# Smoothing parameter
try:
smooth = pargs.smooth
except AttributeError:
smooth=3.
# Smoothing parameter
try:
zqso = pargs.zqso
except AttributeError:
zqso=None
app = QtGui.QApplication(sys.argv)
gui = XFitLLSGUI(pargs.in_file,outfil=outfil,smooth=smooth,
lls_fit_file=lls_fit_file, zqso=zqso)
gui.show()
app.exec_()
# ################
if __name__ == "__main__":
import sys, os
from linetools.spectra import io as lsi
from xastropy.igm import abs_sys as xiabs
if len(sys.argv) == 1: # TESTING
flg_fig = 0
flg_fig += 2**0 # Fit LLS GUI
# LLS
if (flg_fig % 2**1) >= 2**0:
#spec_fil = '/Users/xavier/Keck/ESI/RedData/PSS0133+0400/PSS0133+0400_f.fits'
# spec_fil = os.getenv('DROPBOX_DIR')+'/Tejos_X/COS-Clusters/J1018+0546.txt'
# spec_fil = os.getenv('DROPBOX_DIR')+'/Tejos_X/COS-Filaments/q1410.fits'
spec_fil = os.getenv('DROPBOX_DIR')+'/Tejos_X/COS-Filaments/J1619+2543.fits'
spec = lsi.readspec(spec_fil)
spec.normalize()
#spec.plot()
#xdb.set_trace()
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('IGMGuesses')
main = IGMGuessesGui(spec)
main.show()
sys.exit(app.exec_())
else: # RUN A GUI
id_gui = int(sys.argv[1]) # 1 = XSpec, 2=XAbsId
if id_gui == 1:
run_gui()
else:
raise ValueError('Unsupported flag for spec_guis')
| nhmc/xastropy | xastropy/xguis/igmguesses.py | Python | bsd-3-clause | 47,337 | [
"Gaussian"
] | b1f2050acdd44f4287a46cb4dc4c743039026b28417f3c2e5e8e5463f590d55c |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class recaptchaenterpriseCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'annotate_assessment': ('name', 'annotation', ),
'create_assessment': ('parent', 'assessment', ),
'create_key': ('parent', 'key', ),
'delete_key': ('name', ),
'get_key': ('name', ),
'list_keys': ('parent', 'page_size', 'page_token', ),
'update_key': ('key', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: not a.keyword.value in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=recaptchaenterpriseCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the recaptchaenterprise client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
| googleapis/python-recaptcha-enterprise | scripts/fixup_keywords.py | Python | apache-2.0 | 6,239 | [
"VisIt"
] | 24b6aae75c7f912eb0f4ec0865d0a88b910b2903981c741dda0025290ccbf856 |
# -*- Mode: Python -*-
# vi:si:et:tw=4
"""
Capers - play at draughts
capers provides a visual interface to socalled checkers engines,
that search a given board for the 'best' move.
several drag and drop and point and click controls allow
tuning the behaviour of the program - select game types,
browse played games, etc.
its really more an office application than a game. a game would
let the user enter a small cafe, and with the aid of a waiter,
sit down at some table, join a game, have a chat, get up, and
settle on another table, to play another opponent.
capers tries its best, to give you a pleasant time with Martin
Fierz' strong checkers engines. capers is free software: you are
invited to share and tinker. if in doubt, see accompanying file
COPYING for details on distribution and derivation.
(c) Peter Chiochetti, 2004, 2005
DISCLAIMER:
- a game, for which there is no engine, cannot be played, as capers
does not know how to take pieces and crown kings.
- unlike earlier versions of capers, this one is made with a
toolkit. the python bindings make working with gtk quite bearable.
documentation is, except in parts, very clear and accurate.
- this follows the object oriented design. still, there are just
procedures. they are organised in classes, to not have to come up
with clever names for functions and globals, and to more easily
build on the fine work others have done.
- no care has been taken to make the objects reusable outside of this
context. they all depend one on the other; though most of the work
gets done in the "game" class.
DEVEL:
- drive: all but one action are triggerd from the gui, only the engines
moves are ticking in a gobject timeout loop.
- locking: the Main.game.lock is held, when an engine moves, and when a
game is created, and while reading a pdn
TODO:
- have engine suggest user moves, possibly in statusline
- when a game is browsed, new moves can be made and will truncate
the game: instead it was nice to start a variation
- have move_piece called from gobject idle too - about 4 of 5 seconds
are spent there: the problem is, that when the idle hook returns,
the canvas isnt done yet...
- intermediate hops mouse input (wait for python-gnomecanvas grab())
- lazy loading of engines, so application starts faster - seems
no problem here, maybe will be with cake...
- network play: with central server, or p2p or both?
"""
import os
import sys
import thread
import gobject
import gtk
import gnome.canvas
import math
# =======
# G A M E
# =======
class Position:
"""known setup positions:
international, english, (italian, russian,) mafierz
in store: pseudo constants for the values of a square, a function
to return a mutable deepcopy of a position, with references to
pixmaps intact, and a function to print an ascii art board
- a position is a list of squares: 64 or 100 are allowed
- a square is a list of five values: num, col, x, y, pixbuf
"""
COL_NUM = 0
COL_VAL = 1
COL_X = 2
COL_Y = 3
COL_PIECE = 4 # pixbuf reference
# square values
EMPTY = 0
WHITE = 1
BLACK = 2
CC = 3 # change color
MAN = 4
KING = 8
FREE = 16
# convenience
def copy(self, position):
"""return mutable deepcopy of position:
simple types by value, objects by reference"""
temp = []
for i in xrange(len(position)):
temp.append([0]*len(position[i]))
for j in xrange(len(position[i])):
temp[i][j] = position[i][j]
return temp
def ascii(self, position):
"return ascii art string of <position>"
i = 0
r = "\n "
squares = "- wb WB "
if len(position) == 64: c = 8
if len(position) == 100: c = 10
assert c
for s in position:
i += 1
r = r + squares[s[1]] + ' '
if i % c == 0: r = r + "\n "
return r
def fen_setup(self, position, fen):
"change position to match fen setup, return turn, position"
turn, bpos, wpos = fen
fpos = {}
for num, val in bpos.iteritems():
fpos[num] = val
for num, val in wpos.iteritems():
fpos[num] = val
for s in position:
if not s[self.COL_NUM]:
continue
s[self.COL_VAL] = self.EMPTY
try:
newval = fpos[s[self.COL_NUM]]
s[self.COL_VAL] = newval
except KeyError:
continue
return turn, position
# setup positions
def international(self):
"return international setup, color to move first"
pos = []
cells = 10
count = 0
for col in xrange(cells):
for row in xrange(cells):
if (row + col) % 2 == 1:
if count < 39:
pos.append((count / 2 + 1,
self.WHITE|self.MAN,
row, col, 0))
elif count > 59:
pos.append((count / 2 + 1,
self.BLACK|self.MAN,
row, col, 0))
else:
pos.append((count / 2 + 1,
self.EMPTY,
row, col, 0))
else:
pos.append((0, self.FREE,
row, col, 0))
count += 1
return (self.WHITE, self.copy(pos))
def english(self):
"return english setup, color to move first"
pos = []
cells = 8
count = 0
for col in xrange(cells):
for row in xrange(cells):
if (row + col) % 2 == 1:
if count < 24:
pos.append((count / 2 + 1,
self.BLACK|self.MAN,
cells - row - 1,
cells - col - 1, 0))
elif count > 39:
pos.append((count / 2 + 1,
self.WHITE|self.MAN,
cells - row - 1,
cells - col - 1, 0))
else:
pos.append((count / 2 + 1,
self.EMPTY,
cells - row - 1,
cells - col - 1, 0))
else:
pos.append((0, self.FREE,
cells - row - 1,
cells - col - 1, 0))
count += 1
return (self.BLACK, self.copy(pos))
def italian(self):
"return italian setup, color to move first"
Fatal('Italian setup not implemented')
def russian(self):
"return russian setup, color to move first"
Fatal('Russian setup not implemented')
def mafierz(self):
"""return mafierz setup (italian rules on russian board),
color to move first"""
pos = []
cells = 8
count = 0
for col in xrange(cells):
for row in xrange(cells):
if (row + col) % 2 == 0:
if count < 24:
pos.append((count / 2 + 1,
self.BLACK|self.MAN,
cells - row - 1,
col, 0))
elif count > 40:
pos.append((count / 2 + 1,
self.WHITE|self.MAN,
cells - row - 1,
col, 0))
else:
pos.append((count / 2 + 1,
self.EMPTY,
cells - row - 1,
col, 0))
else:
pos.append((0, self.FREE,
cells - row - 1,
col, 0))
count += 1
return (self.WHITE, self.copy(pos))
class Game:
"""glue together gui, engine, board and book
the game does know very little about the rules, it has to ask the
engine, if a move was legal, the game only knows the current position,
it has to ask the book for others
on entry of a move, board and book will be updated, current game,
current and last move are tracked here too, its easier than always
having to ask the book
on selecting an old game, a new setup position is created and the game
rewound, in order to update all the pixbuf references to valid ones;
a game loaded from pdn is just like a game from the treestore
all communication with board and book shall be through the game
the game also keeps the lock, while an engine is searching a move
_grey is the engine used to validate moves by human players;
_black and _white are either references to engines, or None
_game_curr, _game_last, _move_curr and _move_last are paths into
the Book treestore
"""
# gametypes
INTERNL = 20
ENGLISH = 21
ITALIAN = 22
RUSSIAN = 25
MAFIERZ = 32
gametype = 0
_color = 0 # color to move
_game_curr = _game_last = _move_curr = _move_last = -1
_position = []
_black = _white = _grey = False
engines_loop = 0
def __init__(self):
self.lock = thread.allocate_lock()
# setup
def new(self):
"setup from prefs, register with board and book, kickoff engines"
assert not self.lock.locked()
self.lock.acquire()
self.gametype = Main.prefs.getint('game', 'type')
self._timeout = Main.prefs.getint('game', 'timeout')
# connect engines
self._grey = Main.players.gt2engine(self.gametype)
if not self._grey:
Fatal('Unsupported gametype: ' + self.gametype)
black = Main.prefs.get('game', 'black')
white = Main.prefs.get('game', 'white')
self._black = Main.players.file2engine(black)
self._white = Main.players.file2engine(white)
blackname = whitename = Main.prefs.get('player', 'name')
if isinstance(self._black, Engine):
blackname = self._black.name
if isinstance(self._white, Engine):
whitename = self._white.name
# setup position
flip = False
if self.gametype == self.INTERNL:
self._first, self._position = Main.pos.international()
elif self.gametype == self.ENGLISH:
self._first, self._position = Main.pos.english()
elif self.gametype == self.MAFIERZ:
self._first, self._position = Main.pos.mafierz()
# flip board, so single human sees it right
if self._first == Position.WHITE \
and self._white and not self._black:
flip = True
elif self._first == Position.BLACK \
and self._black and not self._white:
flip = True
Main.board.new(self.gametype, self._position, flip)
self._color = self._first
# setup book
name = False
self._game_curr, = Main.book.new_game(name, self.gametype,
blackname, whitename, self._position, self._color)
self._game_last = self._game_curr
self._move_curr = -1
self._move_last = self._move_curr
# go
self.engines_go()
self.lock.release()
if self._color == Position.WHITE:
Main.feedback.g_push('New game: White to move')
else:
Main.feedback.g_push('New game: Black to move')
def old(self, game):
"setup from old game <game>, register with prefs, board and book"
assert not self.lock.locked()
self.lock.acquire()
self._game_curr = game
# setup position
header = Main.book.goto_game(self._game_curr)
self.gametype, black, white = \
header['gametype'], header['black'], header['white']
if self.gametype == self.INTERNL:
self._first, self._position = Main.pos.international()
if self.gametype == self.ENGLISH:
self._first, self._position = Main.pos.english()
if self.gametype == self.MAFIERZ:
self._first, self._position = Main.pos.mafierz()
# fen setup
fen = header['fen']
if fen:
self._first, self._position \
= Main.pos.fen_setup(self._position, fen)
self._color = self._first
Main.book[self._game_curr][Book.COL_TURN] = self._first
# set prefs
Main.prefs.set('game', 'type', self.gametype)
Main.prefs.set('game', 'black', Main.players.name2file(black))
Main.prefs.set('game', 'white', Main.players.name2file(white))
Main.prefs.save()
# connect engines
self._grey = Main.players.gt2engine(self.gametype)
self._black = Main.players.name2engine(black)
self._white = Main.players.name2engine(white)
# replay game
self._move_curr = -1
self._move_last = self._move_curr
# flip board, so single human sees it right
flip = False
if self._first == Position.WHITE \
and self._white and not self._black:
flip = True
elif self._first == Position.BLACK \
and self._black and not self._white:
flip = True
Main.board.new(self.gametype, self._position, flip)
movelist = Main.book.old_game(self._position)
for move in movelist:
data = self._grey.islegal(move,
self._color, self._position, None)
self.do_oldmove(move, data)
self.lock.release()
self.goto_begin()
def pdn(self, last):
"go to the first game in the book, that has num <last> games"
assert not self.lock.locked()
self._timeout = Main.prefs.getint('game', 'timeout')
self._game_last = last
self.old(0)
# editor
def start_edit(self, empty=False):
"start new game from current setup or empty board, lock game"
assert not self.lock.locked()
self.lock.acquire()
# setup book
blackname = whitename = Main.prefs.get('player', 'name')
if isinstance(self._black, Engine):
blackname = self._black.name
if isinstance(self._white, Engine):
whitename = self._white.name
name = False
self._game_curr, = Main.book.new_game(name, self.gametype,
blackname, whitename, self._position, self._color)
# setup board
if empty:
self.empty()
else:
self.clean()
self._game_last = self._game_curr
self._move_curr = -1
self._move_last = self._move_curr
Main.feedback.g_push('Edit board: click square to set piece')
def stop_edit(self):
"create fen of current position, correct book, release game lock"
bpos = {}
wpos = {}
for s in self._position:
num = s[Position.COL_NUM]
val = s[Position.COL_VAL]
if val & Position.BLACK:
bpos[num] = val
if val & Position.WHITE:
wpos[num] = val
Main.book[self._game_curr][Book.COL_HEAD]['fen'] = \
self._color, bpos, wpos
Main.book[self._game_curr][Book.COL_POS] = self._position
Main.feedback.g_push('Setup registered')
if self._color == Position.WHITE:
Main.feedback.g_push('Position set: White to move')
else:
Main.feedback.g_push('Position set: Black to move')
self.lock.release()
# transport
def goto_move(self, move):
"go to move/position <move> in current game"
if self.lock.locked():
return
move = min(self._move_last, move)
move = max(move, -1)
if move == self._move_curr:
return
self.engines_stop()
self._move_curr = move
self._position, self._color = \
Main.book.get_move(self._move_curr)
Main.board.setposition(self._position)
if self._color == Position.WHITE:
Main.feedback.g_push('Position '
+ str(move + 1) + ': White to move')
else:
Main.feedback.g_push('Position '
+ str(move + 1) + ': Black to move')
def goto_begin(self):
"go to the begin of the game"
self.goto_move(-1)
def goto_prev(self):
"go to the previous position in the game"
self.goto_move(self._move_curr - 1)
def goto_next(self):
"go to the next position in the game"
self.goto_move(self._move_curr + 1)
def goto_end(self):
"go to the last position in the game"
self.goto_move(self._move_last)
def goto_game(self, game):
"go to move/position <game> in current game"
if self.lock.locked():
return
game = min(self._game_last, game)
game = max(game, 0)
if game == self._game_curr:
return
self.engines_stop()
self.old(game)
def goto_game_prev(self):
"go to the previous game"
self.goto_game(self._game_curr - 1)
def goto_game_next(self):
"go to the next game"
self.goto_game(self._game_curr + 1)
def goto_game_move(self, path):
"""go to move/position <path> in game <path>
when game changed, only go to begin"""
if len(path) == 2:
game, move = path
else:
game, = path
move = -1
if game != self._game_curr:
self.goto_game(game)
elif move != self._move_curr:
self.goto_move(move)
# convenience
def num2val(self, num):
"return value of square <num>"
for s in self._position:
if s[Position.COL_NUM] == num:
return s[Position.COL_VAL]
assert "number off board"
def num2piece(self, num):
"return piece on square <num>"
for s in self._position:
if s[Position.COL_NUM] == num:
return s[Position.COL_PIECE]
assert "number off board"
def num2coor(self, num):
"translate number to coordinates"
for s in self._position:
if s[Position.COL_NUM] == num:
return (s[Position.COL_X], s[Position.COL_Y])
assert "number off board"
def coor2num(self, x, y):
"translate coordinates to number"
for s in self._position:
if s[Position.COL_X] == x \
and s[Position.COL_Y] == y:
return s[Position.COL_NUM]
assert "coordinates off board"
# calls to engine
def engines_stop(self):
"make engine_getmove always return False"
if self.engines_loop:
gobject.source_remove(self.engines_loop)
self.engines_loop = 0
Main.feedback.g_push('Engines stop')
def engines_go(self):
"let engine_getmove return True"
if self.engines_loop:
return
self.engines_loop = \
gobject.timeout_add(self._timeout, self.engine_getmove)
Main.feedback.g_push('Engines go')
def engine_getmove(self):
"""ask the engine for a move, called from gobject timeout, so its
in a loop, where False means break, and True means continue, but
this will never break what is not broken, only engines_stop does"""
if not self.engines_loop:
return False
if self.lock.locked():
return True
if Main.board.busy:
return True
if self._color == Position.WHITE \
and not isinstance(self._white, Engine):
return True
if self._color == Position.BLACK \
and not isinstance(self._black, Engine):
return True
maxtime = Main.prefs.getint('engines', 'maxtime')
data = [self._color, maxtime, self._position]
if self._color == Position.WHITE:
self._white.getmove(data)
return True
if self._color == Position.BLACK:
self._black.getmove(data)
return True
assert 0
def engine_islegal(self, list):
"if game not locked, target is empty and color right, ask engine"
if self.lock.locked():
return [False]
if not list[-1]:
return [False]
if self.num2val(list[-1]) ^ Position.EMPTY:
return [False]
if self.num2val(list[0]) & self._color:
return self._grey.islegal(list,
self._color, self._position, None)
return [False]
# book management
def save_move(self, steps, jumps):
"register move with book"
if self._move_curr < self._move_last:
Main.book.trunc_game(self._move_curr)
# name = move as string
if jumps:
name = 'x'.join([`num` for num in steps])
else:
name = '-'.join([`num` for num in steps])
self._game_curr, self._move_curr = Main.book.new_move(name,
self._position, self._color, steps)
self._move_last = self._move_curr
def save_oldmove(self, steps, jumps):
"reregister move with book, increments move_curr"
if jumps:
name = 'x'.join([`num` for num in steps])
else:
name = '-'.join([`num` for num in steps])
self._game_curr, self._move_curr = Main.book.old_move(
self._move_curr, name, self._position, self._color, steps)
self._move_last = self._move_curr
def set_result(self, code, move):
"game ended, push message and format result for board/pdn"
self.engines_stop()
message = 'Game ends with unknown result'
if code == Engine.DRAW:
message = "Game ends in a draw (%s)" % move
Main.book.set_result('1/2 1/2')
if code == Engine.WIN:
if self._color == Position.BLACK:
message = "Black wins (%s)" % move
if self._color == Position.WHITE:
message = "White wins (%s)" % move
if code == Engine.LOSS:
if self._color == Position.BLACK:
message = "Black looses (%s)" % move
if self._color == Position.WHITE:
message = "White looses (%s)" % move
if self._first == self._color:
if code == Engine.WIN:
Main.book.set_result('1-0')
if code == Engine.LOSS:
Main.book.set_result('0-1')
if self._first != self._color:
if code == Engine.WIN:
Main.book.set_result('0-1')
if code == Engine.LOSS:
Main.book.set_result('1-0')
# wait after engines status pushed
gobject.timeout_add(self._timeout, Main.feedback.g_push, message)
# board management
def empty(self):
"clear board of all pieces, needed for edit empty"
for s in self._position:
s[Position.COL_VAL] = Position.EMPTY
p = s[Position.COL_PIECE]
if p:
p.destroy()
s[Position.COL_PIECE] = 0
def clean(self):
"clear board of all hidden pieces, needed for edit"
for s in self._position:
p = s[Position.COL_PIECE]
v = s[Position.COL_VAL]
if p and not v:
p.destroy()
s[Position.COL_PIECE] = 0
def do_move_silent(self, a, b):
"update temp position after move, reducable"
if a == b: return b
n = m = -1 # from, to
# find squares a, b
for s in self._position:
if s[Position.COL_NUM] == a:
n = self._position.index(s)
elif s[Position.COL_NUM] == b:
m = self._position.index(s)
assert n >= 0 and m >=0
# swap values 1, 4
self._position[m][Position.COL_VAL], \
self._position[n][Position.COL_VAL] \
= self._position[n][Position.COL_VAL], \
self._position[m][Position.COL_VAL]
self._position[m][Position.COL_PIECE], \
self._position[n][Position.COL_PIECE] \
= self._position[n][Position.COL_PIECE], \
self._position[m][Position.COL_PIECE]
return b
def do_move(self, a, b):
"update temp position and board after move, reducable"
Main.board.move_piece(a, b)
return self.do_move_silent(a, b)
def take_piece(self, num):
"take piece on squares num in list, called via map()"
for s in self._position:
if s[Position.COL_NUM] != num: continue
assert s[Position.COL_PIECE]
s[Position.COL_VAL] = Position.EMPTY
Main.board.take_piece(num)
break
def take_piece_silent(self, num):
"take piece on squares num in list, called via map()"
for s in self._position:
if s[Position.COL_NUM] != num: continue
assert s[Position.COL_PIECE]
s[Position.COL_VAL] = Position.EMPTY
break
def promote(self, num, value, silent=False):
"update temp position with piece value"
for s in self._position:
if s[Position.COL_NUM] != num: continue
if not Main.board.edit:
assert s[Position.COL_PIECE]
s[Position.COL_VAL] = value
if not silent:
s[Position.COL_PIECE] = Main.board.set_piece(num, value)
break
def do_enginemove(self, data):
"""register legal engine move with current position, release game lock
always return False to remove gobject idle"""
code, steps, new, old, huffs, lock = data
if code != Engine.UNKNOWN:
if len(huffs):
move = 'x'.join([`num` for num in steps])
else:
move = '-'.join([`num` for num in steps])
self.set_result(code, move)
lock.release()
# dont use this move
return False
self._position = Main.pos.copy(self._position)
reduce(self.do_move, steps)
map(self.take_piece, huffs)
if new != old:
self.promote(steps[-1], new)
self._color ^= Position.CC
self.save_move(steps, len(huffs))
lock.release()
if self._color == Position.WHITE:
Main.feedback.g_push('White to move')
else:
Main.feedback.g_push('Black to move')
return False
def do_usermove(self, steps, data):
"register legal user move with current position, called from board"
assert data[0] # must be legal
code, steps, new, old, huffs = data
self._position = Main.pos.copy(self._position)
reduce(self.do_move_silent, steps)
map(self.take_piece, huffs)
if new != old:
self.promote(steps[-1], new)
self._color ^= Position.CC
self.save_move(steps, len(huffs))
if self._color == Position.WHITE:
Main.feedback.g_push('White to move')
else:
Main.feedback.g_push('Black to move')
self.engines_go()
def do_oldmove(self, steps, data):
"register legal old move with current position, called from replay"
if not data[0]: # must be legal
self.save_oldmove((0, 0), 0)
return
code, steps, new, old, huffs = data
self._position = Main.pos.copy(self._position)
reduce(self.do_move_silent, steps)
map(self.take_piece_silent, huffs)
# make kings
if new != old:
self.promote(steps[-1], new, True)
self._color ^= Position.CC
self.save_oldmove(steps, len(huffs))
# =======
# B O O K
# =======
import datetime
class Book(gtk.TreeStore):
"""game history - a tree of all the games in the book
- games grow from the root
games store a name, the setup position, etc.
- moves grow from a game
moves contain the move as a string and as a list and
the position after the move
the currently active game is to be remembered between calls to
several methods, so persistant iters are required
output game headers are: gametype, black, white, result, date,
site, round, fen. other headers are not in the output pdn
the "event" header is in COL_NAME
"""
# general
COL_NAME = 0 # pdn event or move as string
COL_HEAD = 1 # game headers
# move
COL_MOVE = 2 # move as a list
COL_STREN = 3
COL_ANNO = 4
COL_POS = 5
COL_TURN = 6 # who's next
def __init__(self):
super(Book, self).__init__(
str, gobject.TYPE_PYOBJECT,
gobject.TYPE_PYOBJECT, str, str, gobject.TYPE_PYOBJECT, int,
int, str, str, str, gobject.TYPE_PYOBJECT,
gobject.TYPE_PYOBJECT)
assert self.get_flags() & gtk.TREE_MODEL_ITERS_PERSIST
def do_clear(self):
"clear book"
if hasattr(self, 'game'):
del self.game
self.clear()
def new_game(self, name, gametype, black, white, position, color):
"add a new game to the book, return path"
today = datetime.date.today()
# only if there are moves in the current game
if not hasattr(self, 'game') or self.iter_n_children(self.game) > 0:
self.game = self.append(None)
path = self.get_path(self.game)
name = "Game " + str(path[0] + 1)
self.set(self.game, self.COL_NAME, name, self.COL_POS, position,
self.COL_TURN, color, self.COL_ANNO, '',
self.COL_HEAD, {'gametype': gametype,
'black': black, 'white': white, 'result': '*',
'date': str(today), 'site': '', 'round': '', 'fen': ''})
Main.bookview.set_cursor(path)
Main.bookview.scroll_to_cell(path, None, True)
return path
def pdn_game(self):
"add a new empty game to the book"
self.game = self.append(None)
self.set(self.game, self.COL_NAME, 'Pdn', self.COL_ANNO, '',
self.COL_HEAD, {'gametype': Game.ENGLISH,
'black': 'Black', 'white': 'White', 'result': '*',
'date': '', 'site': '', 'round': '', 'fen': ''})
def goto_game(self, num):
"set game <num> as current, return game header"
iter = self.iter_nth_child(None, num)
assert iter
self.game = iter
path = self.get_path(self.game)
Main.bookview.set_cursor(path)
Main.bookview.scroll_to_cell(path, None, True)
return (self.get_value(iter, self.COL_HEAD))
def old_game(self, position):
"replace position in current old game, return old games' movelist"
movelist = self.get_movelist()
self.set_value(self.game, self.COL_POS, position)
return movelist
def trunc_game(self, num):
"delete all moves after <num> from current game"
assert self.game
self.get_value(self.game, self.COL_HEAD)['result'] = '*'
iter = self.iter_nth_child(self.game, num + 1)
while self.remove(iter): assert iter
def new_move(self, name, position, color, move):
"append move to current game, return path"
assert self.game
iter = self.append(self.game)
self.set(iter, self.COL_NAME, name, self.COL_POS, position,
self.COL_TURN, color, self.COL_MOVE, move, self.COL_ANNO, '')
path = self.get_path(iter)
Main.bookview.expand_to_path(path)
Main.bookview.set_cursor(path)
Main.bookview.scroll_to_cell(path)
return path
def pdn_move(self, name, strength, annotation, move):
"""append move from pdn to current game, some essential
parameters are only added later on replay, ie. old_move"""
assert self.game
iter = self.append(self.game)
self.set(iter, self.COL_NAME, name, self.COL_STREN, strength,
self.COL_ANNO, annotation, self.COL_MOVE, move)
def old_move(self, num, name, position, color, move):
"replace position in move <num> in current game, return path"
assert self.game
iter = self.iter_nth_child(self.game, num + 1)
assert iter
self.set(iter, self.COL_NAME, name, self.COL_POS, position,
self.COL_TURN, color, self.COL_MOVE, move)
path = self.get_path(iter)
return path
def get_move(self, num):
"get move <num>, return saved position and turn"
assert self.game
if num < 0:
iter = self.game
else:
iter = self.iter_nth_child(self.game, num)
path = self.get_path(iter)
Main.bookview.expand_to_path(path)
Main.bookview.set_cursor(path)
#Main.bookview.scroll_to_cell(path)
return (self.get_value(iter, self.COL_POS),
self.get_value(iter, self.COL_TURN))
def get_movelist(self):
"return list of the moves in the current game"
move = self.iter_children(self.game)
movelist = []
while move:
movelist.append(self.get_value(move, self.COL_MOVE))
move = self.iter_next(move)
return movelist
def set_result(self, result):
"replace result in current game"
self.get_value(self.game, self.COL_HEAD).update({'result' : result})
def fen2str(self, fen):
"return fen as a string"
color, bpos, wpos = fen
if color == Position.BLACK:
fstr = 'B:B'
else:
fstr = 'W:B'
black = []
for num, val in bpos.iteritems():
if val & Position.KING:
black.append('K%d' % num)
else:
black.append(str(num))
fstr = fstr + ','.join(black)
fstr = fstr + ':W'
white = []
for num, val in wpos.iteritems():
if val & Position.KING:
white.append('K%d' % num)
else:
white.append(str(num))
fstr = fstr + ','.join(white)
return fstr
def wrap(self, text, width):
"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (\n).
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/148061
"""
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[(len(line)-line.rfind('\n')-1
+ len(word.split('\n',1)[0]
) >= width)],
word),
text.split(' ')
)
def game2pdn(self):
"return current game as a pdn string, wrapped email friendly"
game = self.get_value(self.game, self.COL_NAME)
header = self.get_value(self.game, self.COL_HEAD)
gametype, black, white, date, site, round, result, fen = \
header['gametype'], header['black'], header['white'], \
header['date'], header['site'], header['round'], \
header['result'], header['fen']
pdn = """[Event "%s"]\n""" % game
if date:
pdn = pdn + """[Date "%s"]\n""" % date
if black != 'Black':
pdn = pdn + """[Black "%s"]\n""" % black
if white != 'White':
pdn = pdn + """[White "%s"]\n""" % white
if site:
pdn = pdn + """[Site "%s"]\n""" % site
if round:
pdn = pdn + """[Round "%s"]\n""" % round
if gametype != Game.ENGLISH:
pdn = pdn + """[Gametype "%s"]\n""" % gametype
pdn = pdn + """[Result "%s"]\n""" % result
if fen:
fstr = self.fen2str(fen)
pdn = pdn + """[FEN "%s"]\n""" % fstr
anno = self.get_value(self.game, self.COL_ANNO)
if anno:
pdn = pdn + """{%s}\n""" % self.wrap(anno, 72)
move = self.iter_children(self.game)
movelist = []
count = 2
while move:
if count % 2 == 0:
movelist.append(str(count / 2) + '.')
count += 1
name = self.get_value(move, self.COL_NAME)
stren = self.get_value(move, self.COL_STREN)
anno = self.get_value(move, self.COL_ANNO)
if stren:
name = name + stren
if anno:
name = name + ' {' + anno + '}'
movelist.append(name)
move = self.iter_next(move)
movelist = ' '.join(movelist)
pdn = pdn + self.wrap(movelist, 72)
pdn = pdn + ' %s\n' % (result)
return game, pdn
import shlex
class Pdn(shlex.shlex):
"""load games from pdn file
as the parser is quite small, its put here too. the globals game and
move are counts that match paths into the books treestore.
this gets a filename, parses the games in the file and writes them to
the book, then points the game at the first loaded game; moves are not
checked for validity here
"""
def __init__(self, stream, filename):
"init lexer in posix mode, prepare parser"
# not a new style class
#super(Pdn, self).__init__()
shlex.shlex.__init__(self, stream, filename, True)
self.wordchars = self.wordchars + """.-/'<>*!?"""
self.quotes = '"'
self.pdn_state = 'none'
self.pdn_trace = False
self.game = -1
self.move = -1
self.name = filename
def parse_fen(self, fen):
"parse fen string: setup position, return turn, bpos, wpos"
def num_val(square, color):
"return number and value of square"
if square.isdigit():
return [int(square), color|Position.MAN]
if square[0].lower() == 'k':
return [int(square[1:]), color|Position.KING]
print self.error_leader(), 'invalid square in fen: "%s"' %square
return (0, 0)
turn, pos1, pos2 = fen.split(':')
if turn.lower() == 'b':
turn = Position.BLACK
else:
turn = Position.WHITE
if pos1[0].lower() == 'b':
black = pos1[1:].split(',')
white = pos2[1:].split(',')
else:
black = pos2[1:].split(',')
white = pos1[1:].split(',')
try:
black = dict([num_val(x, Position.BLACK) for x in black])
white = dict([num_val(x, Position.WHITE) for x in white])
except ValueError:
print self.error_leader(), 'invalid fen: "%s"' %fen
return [0, 0, 0]
return turn, black, white
def parse_header(self):
"parse headers: key, value; save to book, increment game counter"
if not self.pdn_state == 'headers':
# got new game
self.pdn_state = 'headers'
self.game += 1
self.move = -1
# create empty game in book
Main.book.pdn_game()
Main.feedback.g_push('Loading game: %3i' % self.game)
# parsing may take some time
while gtk.events_pending():
gtk.main_iteration(False)
key = self.get_token()
value = self.get_token()
header = Main.book[self.game][Book.COL_HEAD]
if self.pdn_trace:
print """H:%s,: [%s "%s"]""" % (self.game, key, value)
if key.lower() == 'event':
Main.book[self.game][Book.COL_NAME] = value
elif key.lower() == 'gametype':
Main.book[self.game][Book.COL_HEAD]['gametype'] = int(value)
elif key.lower() == 'fen':
Main.book[self.game][Book.COL_HEAD]['fen'] = self.parse_fen(value)
else:
Main.book[self.game][Book.COL_HEAD][key.lower()] = value
# discard rest of line
for token in iter(self.get_token, None):
if token == ']':
return
print self.error_leader(), 'stray text in header: "%s"' %token
def parse_annotation(self):
"parse annotation, return it"
annotation = self.get_token()
# drop empty annotations
if annotation == '}': return ''
for token in iter(self.get_token, None):
if token == '}':
if self.pdn_trace:
print "A:%s,%s: {%s}" % (self.game, self.move, annotation)
return annotation
annotation = ' '.join((annotation, token))
def move_split(self, move):
"split a move, return list of integers"
if move.count('-'):
steps = move.split('-')
try:
return [int(x) for x in steps]
except ValueError:
print self.error_leader(), 'stray text in movelist: "%s"' %move
return [0, 0]
elif move.count('x'):
steps = move.split('x')
try:
return [int(x) for x in steps]
except ValueError:
print self.error_leader(), 'stray text in movelist: "%s"' %move
return [0, 0]
else:
return [False]
def parse_move(self):
"parse single move: hops, strength, annotation; return them"
steps = []
move = strength = annotation = ''
for token in iter(self.get_token, None):
# move
if not steps and token[0] in '1234567890':
# strength
if token[-1] in '*!?':
strength = token[-1]
steps = self.move_split(token[:-1])
else:
steps = self.move_split(token)
# eat move numbers
if len(steps) < 2:
steps = []
move = strength = annotation = ''
continue
self.move += 1
move = token
continue
#annotation
if token == '{':
annotation = self.parse_annotation()
continue
# push back the rest
self.push_token(token)
break
if self.pdn_trace:
print "M:%s,%s: %s" %(self.game, self.move, steps)
return (move, steps, strength, annotation)
def parse_moves(self):
"parse movelist: moves, result; save them to book"
def set_result(result):
"set result in book"
if self.game < 0: return
Main.book[self.game][Book.COL_HEAD]['result'] = result
self.pdn_state = 'none'
result = ''
for token in iter(self.get_token, None):
# result: unknown
if token == '*':
self.pdn_state = 'none'
continue
# result: first wins
elif token == '1-0':
set_result(token)
continue
# result: second wins
elif token == '0-1':
set_result(token)
continue
# result: draw
elif token == '1/2-1/2':
set_result(token)
continue
# shift move
elif token[0] in '1234567890':
if self.game < 0:
continue
self.push_token(token)
data = self.parse_move()
move, steps, strength, annotation = data
if move and len(steps) > 1:
Main.book.pdn_move(move, strength, annotation, steps)
continue
if token == '[':
self.push_token(token)
break
# warn about others
print self.error_leader(), 'stray text in movelist: "%s"' %token
break
def parse(self):
"parse book: headers, annotation, movelist; hold game.lock"
Main.game.lock.acquire()
Main.book.do_clear()
Main.bookview.connect_model(False)
for token in iter(self.get_token, None):
# header
if token == '[':
self.parse_header()
continue
# annotation
elif token == '{':
if self.game < 0: continue
annotation = self.parse_annotation()
Main.book[self.game][Book.COL_ANNO] = annotation
continue
# shift movelist
elif token[0] in '1234567890*':
if not self.pdn_state == 'moves':
self.pdn_state = 'moves'
self.push_token(token)
self.parse_moves()
continue
# warn about others
print self.error_leader(), 'stray text: "%s"' %token
Main.bookview.connect_model(True)
Main.game.lock.release()
Main.feedback.g_push('read %d games from %s'
%(self.game + 1, self.name))
if self.game < 0:
Main.game.new()
Main.feedback.g_push('No games found')
return False
Main.game.pdn(self.game)
return True
import pango
class CellRendererWrap(gtk.GenericCellRenderer):
"""a cell renderer, that wraps long text
the first on_get_size will not be given a cell_area, probably,
because the treeview widget is not yet realized. as no width can
be set for the wordwrap, pango will not wrap at all and the cell
will be as wide as the longest single line in the model.
one way around this is to set an initial width. this has a severe
drawback: a cell will never shrink below its initial width and its
initial height will neither shrink nor grow. so if the set width is
smaller than the actual width, when the cell is realized, there will
be a blank padding on the bottom. if the set width is larger than
the actual width, some of the text will be cut off.
another way might be to pack the cell into the column after view and
column have been realized. still, the first calls will not sport a
cell_area.
this always sets the marked up text
"""
__gproperties__ = {
'text': (gobject.TYPE_STRING, 'markup', 'markup displayed by the cell',
'', gobject.PARAM_READWRITE),
'markup': (gobject.TYPE_STRING, 'markup', 'markup displayed by the cell',
'', gobject.PARAM_READWRITE),
}
def __init__(self):
gobject.GObject.__init__(self)
#self.set_property('mode', gtk.CELL_RENDERER_MODE_EDITABLE)
self.set_property('xalign', 0.0)
self.set_property('yalign', 0.5)
self.set_property('xpad', 2)
self.set_property('ypad', 2)
self.markup = self.text = ''
def do_get_property(self, property):
if property.name == 'text' or property.name == 'markup':
return self.markup
else:
raise TypeError('No property named %s' % (property.name,))
def do_set_property(self, property, value):
if property.name == 'text' or property.name == 'markup':
self.markup = value
else:
raise TypeError('No property named %s' % (property.name,))
def _render(self, widget, cell_area, xpad):
"call pango to render the text"
layout = widget.create_pango_layout(self.text)
layout.set_markup(self.markup)
layout.set_wrap(pango.WRAP_WORD)
if cell_area:
width = cell_area.width
else:
width = self.get_property('width')
width -= 2 * xpad
layout.set_width(width * pango.SCALE)
return layout
def _get_size(self, widget, cell_area, layout=False):
"only call to pango, if there is no layout"
xpad = self.get_property('xpad')
ypad = self.get_property('ypad')
xalign = self.get_property('xalign')
yalign = self.get_property('yalign')
if not layout:
layout = self._render(widget, cell_area, xpad)
width, height = layout.get_pixel_size()
calc_width = 2 * xpad + width
calc_height = 2 * ypad + height
if cell_area:
x_offset = xalign * (cell_area.width - calc_width)
x_offset = max(x_offset, 0)
y_offset = yalign * (cell_area.height - calc_height)
y_offset = max(y_offset, 0)
else:
x_offset = 0
y_offset = 0
return int(x_offset), int(y_offset), calc_width, calc_height
def on_get_size(self, widget, cell_area):
"tell the treeview the cell's size"
return self._get_size(widget, cell_area)
def on_render(self, window, widget, background_area,
cell_area, expose_area, flags):
"paint the cell"
xpad = self.get_property('xpad')
ypad = self.get_property('ypad')
layout = self._render(widget, cell_area, xpad)
x_offset, y_offset, width, height = self._get_size(widget, cell_area, layout)
width -= 2*xpad
height -= 2*ypad
widget.style.paint_layout(window,
gtk.STATE_NORMAL, True,
cell_area, widget, "text",
cell_area.x + x_offset + xpad,
cell_area.y + y_offset + ypad,
layout)
gobject.type_register(CellRendererWrap) # make widget available to bookview
class BookView(gtk.TreeView):
"""display of book: games and moves
the book view displays data from several columns of the book model
in a single column, collection is done via a celldata function
to speed up loading of games, the model is disconnected then
it might have been preferable to show the black and white move on
the same line, but that seems quite hard and doesnt mix well with
annotations
"""
_book = None # for disconnect/reconnect
def __init__(self):
"never called, libglade doesnt"
gobject.GObject.__init__(self)
#assert 0 # obviously, now its called
def edit_game(self, model, path):
"edit game headers and dialog"
game = model.get_iter(path)
event, header, annotation = model.get(game,
Book.COL_NAME, Book.COL_HEAD, Book.COL_ANNO)
editor = Main.gui['Edit game...']
ege = Main.gui['EG event']
egd = Main.gui['EG date']
egb = Main.gui['EG black']
egw = Main.gui['EG white']
egs = Main.gui['EG site']
egn = Main.gui['EG round']
egr = Main.gui['EG result']
ega = Main.gui['EG annotation'].get_buffer()
ege.set_text(event)
egd.set_text(header['date'])
egb.set_text(header['black'])
egw.set_text(header['white'])
egs.set_text(header['site'])
egn.set_text(header['round'])
egr.set_text(header['result'])
ega.set_text(annotation)
if editor.run() == gtk.RESPONSE_OK:
model.set(game,
Book.COL_NAME, ege.get_text(),
Book.COL_ANNO, ega.get_text(ega.get_start_iter(),
ega.get_end_iter()))
header['date'] = egd.get_text()
header['black'] = egb.get_text()
header['white'] = egw.get_text()
header['site'] = egs.get_text()
header['round'] = egn.get_text()
header['result'] = egr.get_text()
header['date'] = egd.get_text()
Main.feedback.g_push('Game headers set')
else:
Main.feedback.g_push('Edit game cancelled')
editor.hide()
def edit_move(self, model, path):
"edit move info and dialog"
move = model.get_iter(path)
stren, annotation = model.get(move,
Book.COL_STREN, Book.COL_ANNO)
editor = Main.gui['Edit move...']
emsn = Main.gui['EM strength']
emso = Main.gui['EM strong']
emsa = Main.gui['EM star']
emsw = Main.gui['EM weak']
ema = Main.gui['EM annotation'].get_buffer()
if stren == '!': emso.set_active(1)
elif stren == '*': emsa.set_active(1)
elif stren == '?': emsw.set_active(1)
else: emsn.set_active(1)
ema.set_text(annotation)
if editor.run() == gtk.RESPONSE_OK:
if emso.state: stren = '!'
elif emsa.state: stren = '*'
elif emsw.state: stren = '?'
else: stren = ''
model.set(move, Book.COL_STREN, stren)
model.set(move, Book.COL_ANNO,
ema.get_text(ema.get_start_iter(), ema.get_end_iter()))
Main.feedback.g_push('Move info set')
else:
Main.feedback.g_push('Edit move cancelled')
editor.hide()
def on_activate_row(self, treeview, path, column):
"on double click on row edit game or move"
if Main.board.busy or Main.board.edit: return False
if len(path) == 1:
Main.feedback.g_push('Edit game headers')
self.edit_game(treeview.get_model(), path)
else:
Main.feedback.g_push('Edit move info')
self.edit_move(treeview.get_model(), path)
return True
def on_change_selection(self, selection):
"on selction change goto selected game or move"
if Main.game.lock.locked(): return False
if Main.board.busy or Main.board.edit: return False
model, iter = selection.get_selected()
if not iter: return False
Main.game.goto_game_move(model.get_path(iter))
return True
def open(self, book):
"connect to the model, install callbacks"
self._book = book
self.set_model(book)
column = gtk.TreeViewColumn('Book')
column.set_fixed_width(210)
self.append_column(column)
cell = CellRendererWrap()
column.pack_start(cell, False)
w = column.get_fixed_width()
cell.set_property('width', w - 20)
column.set_cell_data_func(cell, self.do_markup)
self.connect('row-activated', self.on_activate_row)
selection = self.get_selection()
selection.connect('changed', self.on_change_selection)
def do_markup (self, column, cell, model, iter):
"add annotation and headers to cells markup"
name = model.get_value(iter, model.COL_NAME)
name = name.replace('&', '&')
if model.get_value(iter, model.COL_TURN) == Position.WHITE:
norm = '<span foreground="#C0000C">%s</span>'
bold = '<b>%s</b>'
else:
norm = '%s'
bold = '<b>%s</b>'
header = model.get_value(iter, model.COL_HEAD)
if header:
black, white, result = \
header['black'], header['white'], header['result']
markup = bold % name + '\nBlack: %s' % black \
+ '\nWhite: %s' % white + '\nResult: %s' % result
else:
markup = norm % name
stren = model.get_value(iter, model.COL_STREN)
if stren:
markup = markup + stren
anno = model.get_value(iter, model.COL_ANNO)
if anno:
anno = anno.replace('&', '&')
markup = markup + '\n' + anno
cell.set_property('markup', markup)
def connect_model(self, toggle=True):
"toggle/connect from book"
if toggle == False:
self.set_model(None)
else:
self.set_model(self._book)
gobject.type_register(BookView) # make widget available to libglade
# ===========
# E N G I N E
# ===========
from ctypes import cdll, c_int, c_double, c_char_p, c_buffer, Structure
from ctypes import sizeof, byref
class CBcoor(Structure):
"cb api coordinates structure"
_fields_ = [("x", c_int), ("y", c_int)]
class CBmove(Structure):
"cb api move structure"
_fields_ = [("jumps", c_int),
("newpiece", c_int),
("oldpiece", c_int),
("mfrom", CBcoor),
("mto", CBcoor),
("path", CBcoor * 12),
("mdel", CBcoor * 12),
("delpiece", c_int * 12)]
class Engine (gobject.GObject):
"""interface to the checkers engine dll
on init this class loads an engine. it also translates between
the different data types of the C-api and the python game
unlike "enginecommand()" and "islegal()", which return in an
instant, the "getmove()" function needs to be in a thread: when
its done, it will install game.do_enginemove as an idle task
in gtk.
getmove will change the game.lock, so feedback can know, when
the engine stopped. gtk seems to cycle thread locks, so not
deleting it, should not leak.
there is only ever one engine active at a time, as the CBapi
does not support thinking on the opponents time. so its ok to
lock the game, not the engine thread.
engines have to conform to the CheckerBoard API by Martin Fierz.
engines are written eg. in C, and can be loaded and removed at
runtime
"""
# game result codes
DRAW = 0
WIN = 1
LOSS = 2
UNKNOWN = 3
def __init__(self, enginefile):
"load a dll, set globals: name, gametype, about, help"
gobject.GObject.__init__(self)
try:
self.engine = cdll.LoadLibrary(enginefile)
except OSError:
Fatal('Invalid engine, please remove:\n\n'
+ enginefile)
self.name = self.about = self.help = ''
res = self.enginecommand('name')
if res[0]:
self.name = res[1]
res = self.enginecommand('about')
if res[0]:
self.about = res[1]
res = self.enginecommand('help')
if res[0]:
self.help = res[1]
def get(self, key):
"shorthand enginecommand get: gametype..."
res = self.enginecommand('get ' + key)
if res[0]:
return res[1]
return False
def cbcoor2num(self, cbcoor):
"convert CBapi coordinate structure to board number"
x = cbcoor.x
y = cbcoor.y
if Main.game.gametype == Main.game.ENGLISH:
y = 8 - y - 1
elif Main.game.gametype == Main.game.MAFIERZ:
x = 8 - x - 1
return Main.game.coor2num(x, y)
def pos2cbboard(self, position, board):
"""copy position to CBapi board
in CBapi, origin is SW, in capers origin is NW"""
cells = 8
if Main.game.gametype == Main.game.INTERNL: cells = 10
if Main.game.gametype == Main.game.ENGLISH:
for s in position:
v, x, y = s[Position.COL_VAL], \
s[Position.COL_X], s[Position.COL_Y]
if v:
board[x][cells-y-1] = v
elif Main.game.gametype == Main.game.MAFIERZ:
for s in position:
v, x, y = s[Position.COL_VAL], \
s[Position.COL_X], s[Position.COL_Y]
if v:
board[cells-x-1][y] = v
else:
assert 0 # gametype not supported
if 0: # print board
squares = "- wb WB "
for x in range(cells):
print ' '.join([squares[num] for num in board[x]])
print
# int enginecommand(char str[256], char reply[256]);
def enginecommand(self, command):
"mostly 'get gametype' and 'name'"
res = 0
buf = c_buffer(256) # create_string_buffer
argtypes = [c_char_p, c_char_p]
res = self.engine.enginecommand(command, buf)
return res, buf.value
# int islegal(int b[8][8], int color, int from, int to,
# struct CBmove *move);
def islegal(self, list, color, position, cbmove):
"check move in list, return False if illegal, else return list"
cells = 8
if len(position) == 100: cells = 10
board = ((c_int * cells) * cells)()
color = c_int(color)
mfrom = c_int(list[0])
mto = c_int(list[-1])
cbmove = CBmove()
self.pos2cbboard(position, board)
# call engine
argtypes = [(c_int * cells) * cells, c_int, c_int, c_int, CBmove]
res = self.engine.islegal(board, color, mfrom, mto, byref(cbmove))
steps = [self.cbcoor2num(cbmove.mfrom)]
for i in range(1, cbmove.jumps):
steps.append(self.cbcoor2num(cbmove.path[i]))
steps.append(self.cbcoor2num(cbmove.mto))
# collapse duplicate fields in movelist
s = steps[-1]
for i in range(len(steps)-2, -1, -1):
if steps[i] == s: del steps[i]
else: s = steps[i]
huffs = []
for i in range(cbmove.jumps):
huffs.append(self.cbcoor2num(cbmove.mdel[i]))
return [res, steps, cbmove.newpiece, cbmove.oldpiece, huffs]
def showbuf(self, lock, buf):
"engine feedback, in timeout, stop when lock lost/released"
if not lock.locked():
return False
if lock != Main.game.lock:
return False
Main.feedback.e_push(buf.value)
return lock.locked()
def playnow(self, lock, playnow):
"engine break, in timeout, stop when break or lock lost/released"
if not Main.game.engines_loop:
playnow.value = 1
return False
if lock != Main.game.lock:
return False
return lock.locked()
# int getmove(int b[8][8],int color, double maxtime, char str[255],
# int *playnow, int info, int unused, struct CBmove *move);
def getmove(self, data):
"wraps the threaded call to the engine, always return False"
assert not Main.game.lock.locked()
Main.game.lock = thread.allocate_lock()
Main.game.lock.acquire()
assert thread.start_new_thread(
self.getmove_thread, ((Main.game.lock, data)))
def getmove_thread(self, lock, data):
"start searching a move; in a thread"
color, maxtime, position = data
cells = 8
if len(position) == 100: cells = 10
board = ((c_int * cells) * cells)()
self.pos2cbboard(position, board)
color = c_int(color)
maxtime = c_double(maxtime)
buf = c_buffer(1024) # create_string_buffer
playnow = c_int(0)
info = c_int(0)
moreinfo = c_int(0)
cbmove = CBmove()
# let main thread handle feedback, break
gobject.timeout_add(200, self.showbuf, lock, buf)
gobject.timeout_add(100, self.playnow, lock, playnow)
argtypes = [(c_int * cells) * cells, c_double, c_char_p,
c_int, c_int, c_int, CBmove]
res = self.engine.getmove(board, color, maxtime, buf,
byref(playnow), info, moreinfo, byref(cbmove))
steps = [self.cbcoor2num(cbmove.mfrom)]
for i in range(1, cbmove.jumps):
steps.append(self.cbcoor2num(cbmove.path[i]))
steps.append(self.cbcoor2num(cbmove.mto))
# collapse duplicate fields in movelist
s = steps[-1]
for i in range(len(steps)-2, -1, -1):
if steps[i] == s: del steps[i]
else: s = steps[i]
huffs = []
for i in range(cbmove.jumps):
huffs.append(self.cbcoor2num(cbmove.mdel[i]))
# let main thread handle the move
gobject.idle_add(Main.game.do_enginemove,
(res, steps, cbmove.newpiece, cbmove.oldpiece, huffs, lock))
gobject.type_register(Engine) # make widget available to Player ListStore
# =========
# B O A R D
# =========
class CheckerBoard(gnome.canvas.Canvas):
"""present the game visually
- the user may drag pieces (event handler)
- the game may move, take, crown pieces (public method)
the board does not know the rules, as soon as a piece is dropped,
the board will ask the game if the move was legal, and if it
wasnt, will put the piece back to the start, otherwise, it will
pass the result of the query on to the game
board coordinates start in the top-left corner (NORTHWEST), the
other classes send and get calibrated coordinates (0-[79])
the board also does not know the numbering scheme and where the
double corner lies. this information is included in the position
passed when a new board is setup
the board resizes itself to match the setup, and flips, if asked
self.busy is true, while a piece is moved by user or engine
"""
# private globals
_grid_width = 0
_board_width = 0
_temp_move = []
_gametype = 0
_flipped = False # True when upsidedown
_x1 = _y1 = 0 # drag start point
busy = False # True while move_piece
edit = False # True while editing
def __init_(self):
"never called, should libglade do that?"
assert 0
def new(self, gametype, setup, flip):
"create board backdrop, pieces, numbers from setup"
self._gametype = gametype
self._flipped = False
# grid_width is derived from loaded pixmap
scenefile = Main.prefs.get('look', 'scene')
pb = gtk.gdk.pixbuf_new_from_file(scenefile)
gw = pb.get_height()
self._grid_width = gw
# board_width is derived from passed setup
assert(len(setup) in (64, 100))
if (len(setup) == 64):
bw = gw * 8
elif (len(setup) == 100):
bw = gw * 10
self._board_width = bw
if self.get_size() != (bw, bw):
self.set_size_request(bw, bw)
self.set_scroll_region(0, 0, bw, bw)
# delete previous canvas items
if dir(self).count('background'):
self.background.destroy()
del self.background
if dir(self).count('numbers'):
self.numbers.destroy()
del self.numbers
if dir(self).count('pieces'):
self.pieces.destroy()
del self.pieces
# all new, scene pixmap might have changed
self.background = self.root().add(gnome.canvas.CanvasPixbuf)
self.numbers = self.root().add(gnome.canvas.CanvasGroup)
self.pieces = self.root().add(gnome.canvas.CanvasGroup)
bg = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, bw, bw)
bm = pb.subpixbuf(gw*2, 0, gw, gw)
wm = pb.subpixbuf(gw*3, 0, gw, gw)
bk = pb.subpixbuf(gw*4, 0, gw, gw)
wk = pb.subpixbuf(gw*5, 0, gw, gw)
# map squares e=0, f=16
# map pieces w=5, b=6, W=9, B=10
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
squares = [1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0]
figures = [0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0]
pixbufs = [0, 0, 0, 0, 0,wm,bm, 0, 0,wk,bk, 0, 0, 0, 0, 0, 0]
self.pixbufs = pixbufs
i = 0
for s in setup:
n, v, x, y = s[Position.COL_NUM], s[Position.COL_VAL], \
s[Position.COL_X], s[Position.COL_Y]
# background
pb.copy_area(squares[v] * gw,
0, gw, gw, bg, x * gw, y * gw)
# numbers
if squares[v]:
m = self.numbers.add(gnome.canvas.CanvasText,
anchor=gtk.ANCHOR_SOUTH_EAST)
m.set(text=n, x=x * gw + gw, y=y * gw + gw)
# pieces
if figures[v]:
p = self.pieces.add(gnome.canvas.CanvasPixbuf)
p.set(pixbuf=pixbufs[v], x=x * gw, y=y * gw)
p.connect('event', self.on_piece_event)
setup[i][Position.COL_PIECE] = p
i += 1
self.connect('event', self.on_board_event)
self.background.set(pixbuf=bg)
del pb, bg, squares, figures, m, p
self.flip(flip)
def flip(self, flip='flip'):
"quickly rotate board"
if flip == self._flipped:
return self._flipped
# if called from action
if flip == 'flip':
self._flipped = not self._flipped
else:
self._flipped = flip
gw = self._grid_width
bw = self._board_width
for n in self.numbers.item_list:
a, b, x, y = n.get_bounds()
x, y = n.i2w(x, y)
n.set(x=bw-x+gw, y=bw-y+gw)
for p in self.pieces.item_list:
a, b, x, y = p.get_bounds()
x, y = p.i2w(x, y)
p.set(x=bw-x, y=bw-y)
def setposition(self, setup):
"quickly switch to setup position, only same game"
figures = [0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0]
gw = self._grid_width
bw = self._board_width
for s in setup:
n, v, x, y, p = s[Position.COL_NUM], s[Position.COL_VAL], \
s[Position.COL_X], s[Position.COL_Y], s[Position.COL_PIECE]
if figures[v]:
if self._flipped:
x, y = p.w2i(bw - x * gw - gw, bw - y * gw - gw)
else:
x, y = p.w2i(x * gw, y * gw)
p.set(pixbuf=self.pixbufs[v], x=x, y=y)
p.show()
elif p:
p.hide()
#self.update_now()
def on_board_event(self, num, event):
"handle events on board, ie. menu and edit mode"
if event.type != gtk.gdk.BUTTON_PRESS: return False
if self.busy: return False
# context menu
if event.button == 3:
menu = Main.gui['Menu']
menu.popup(None,None,None,event.button,event.time)
return True
if not self.edit: return False
gw = self._grid_width
bw = self._board_width
cells = bw / gw
x = math.floor(event.x / gw)
y = math.floor(event.y / gw)
num = Main.game.coor2num(x, y)
if not num: return False
# promotion cycle: empty, bm, bk, wm, wk, empty
value = Main.game.num2val(num)
if not value:
value = Position.BLACK | Position.MAN
elif value == Position.WHITE | Position.KING:
value = 0
elif value & Position.MAN:
value ^= Position.MAN
value |= Position.KING
elif value & Position.KING:
value ^= Position.KING
value |= Position.MAN
value ^= Position.CC
Main.game.promote(num, value)
return True
def on_piece_event(self, piece, event):
"handle events on pieces, ie. dragndrop"
if self.edit: return True
if Main.game.lock.locked(): return True
gw = self._grid_width
bw = self._board_width
cells = bw / gw
# drag
if event.type == gtk.gdk.BUTTON_PRESS:
if self.busy: return False
x = math.floor(event.x / gw)
y = math.floor(event.y / gw)
if self._flipped:
num = Main.game.coor2num(cells - x - 1, cells - y - 1)
else:
num = Main.game.coor2num(x, y)
assert num
# intermediate square
if self._x1:
self._temp_move.append(num)
return True
# starting square
self._x1 = self._x = event.x
self._y1 = self._y = event.y
"""
################
# TODO item.grab
################
"""
piece.raise_to_top()
self._temp_move = [num]
self.busy = True
return True
# piece dropped
if self._x1 == 0:
self.busy = False
return True
# drop
if event.type == gtk.gdk.BUTTON_RELEASE:
x = math.floor(event.x / gw)
y = math.floor(event.y / gw)
if self._flipped:
num = Main.game.coor2num(cells - x - 1, cells - y - 1)
else:
num = Main.game.coor2num(x, y)
# ending square
self._temp_move.append(num)
legal = Main.game.engine_islegal(self._temp_move)
# not legal, return to start
if not legal[0]:
u = self._x1 - self._x
v = self._y1 - self._y
piece.move(u, v)
self._x1 = self._y1 = 0
self._temp_move = []
self.busy = False
return True
# legal, snap piece to grid
x *= gw
y *= gw
x, y = piece.w2i(x, y)
piece.set(x=x, y=y)
self._x1 = self._y1 = 0
Main.game.do_usermove(self._temp_move, legal)
self.busy = False
return True
# ignore other events
if event.type != gtk.gdk.MOTION_NOTIFY:
return True
# off board, return to start
if event.x < 0 or event.y < 0 or event.x > bw or event.y > bw:
u = self._x1 - self._x
v = self._y1 - self._y
piece.move(u, v)
self._x1 = self._y1 = 0
self.busy = False
return False
u = event.x - self._x
v = event.y - self._y
piece.move(u, v)
self._x = event.x
self._y = event.y
return True
def take_piece(self, num):
"hide piece on square <num>"
piece = Main.game.num2piece(num)
assert piece
piece.hide()
self.update_now()
def set_piece(self, num, value):
"make piece on square <num> of <value>, return new piece"
piece = Main.game.num2piece(num)
if not self.edit:
assert piece
if not value:
piece.destroy()
return 0
elif not piece:
piece = self.pieces.add(gnome.canvas.CanvasPixbuf)
x, y = Main.game.num2coor(num)
gw = self._grid_width
piece.set(pixbuf=self.pixbufs[value], x=x * gw, y=y * gw)
piece.connect('event', self.on_piece_event)
else:
piece.set(pixbuf=self.pixbufs[value])
return piece
def move_piece(self, a, b,):
"animate move, set board to 'busy'"
def step(x, y):
"diagonal path"
u = v = 1
if x < 0: u = -1
if y < 0: v = -1
for i in xrange(max(abs(x),abs(y))):
if i > abs(x): u = 0
if i > abs(y): v = 0
yield((u, v))
piece = Main.game.num2piece(a)
assert piece
self.busy = True
piece.raise_to_top()
gw = self._grid_width
bw = self._board_width
x1, y1 = Main.game.num2coor(a)
x2, y2 = Main.game.num2coor(b)
if self._flipped:
x1, y1 = bw - x1, bw - y1
x2, y2 = bw - x2, bw - y2
for u, v in step((x2 - x1) * gw, (y2 - y1) * gw):
piece.move(u, v)
self.update_now()
self.busy = False
def move_piece2(self, a, b):
"animate move in gtk.idle, set board to 'busy'"
def step(x, y):
"diagonal path"
u = v = 1
if x < 0: u = -1
if y < 0: v = -1
for i in xrange(max(abs(x),abs(y))):
if i > abs(x): u = 0
if i > abs(y): v = 0
piece.move(u, v)
yield True
self.busy = False
yield False
def wait(x, y):
if self.busy:
yield True
self.busy = True
gobject.idle_add(step(x, y).next)
yield False
piece = Main.game.num2piece(a)
assert piece
piece.raise_to_top()
gw = self._grid_width
x1, y1 = Main.game.num2coor(a)
x2, y2 = Main.game.num2coor(b)
gobject.idle_add(wait((x2 - x1) * gw, (y2 - y1) * gw).next)
gobject.type_register(CheckerBoard) # make widget available to libglade
# =============
# P L A Y E R S
# =============
import glob
class Players(gtk.ListStore):
"""list the possible opponents in a game
human is always first, then comes the list of available engines
human gets a gametype of 0, to be distinguishable from engines
subclasses liststore, to be suitable as a model for the engines
popup in the "New..." dialogue
"""
COL_FILE = 0
COL_NAME = 1
COL_GAMETYPE = 2
COL_GAMENAME = 3
COL_ABOUT = 4
COL_HELP = 5
COL_ENGINE = 6
def __init__(self):
"human is first, then load engines, ask them for gametype etc."
super(Players, self).__init__(str, str, int, str, str, str, Engine)
# human player, aka user
name = Main.prefs.get('player', 'name')
self.append(['human', name, 0, 'any', 'Human', 'None', None])
# engines
libdir = Main.prefs.get('paths', 'engines')
if os.name == 'nt':
search = os.path.join(libdir, '*.dll')
else:
search = os.path.join(libdir, '*.so')
engines = glob.glob(search)
for fn in engines:
engine = Engine(fn)
try:
name, about, help = engine.name, \
engine.about, engine.help
gametype = int(engine.get('gametype'))
except:
del engine
continue
if gametype == Main.game.INTERNL:
gamename = 'International'
elif gametype == Main.game.ENGLISH:
gamename = 'English'
elif gametype == Main.game.ITALIAN:
gamename = 'Italian'
elif gametype == Main.game.RUSSIAN:
gamename = 'Russian'
elif gametype == Main.game.MAFIERZ:
gamename = 'Italian1'
assert gamename
self.append([fn, name, gametype, gamename,
about, help, engine])
# convenience
def gt2engine(self, gametype):
"return an engine for a gametype, the first one found"
iter = self.get_iter_first()
while iter:
if self.get_value(iter, self.COL_GAMETYPE) == gametype:
return self.get_value(iter, self.COL_ENGINE)
iter = self.iter_next(iter)
assert 0
def file2engine(self, filename):
"return an engine for a filename"
iter = self.get_iter_first()
while iter:
if self.get_value(iter, self.COL_FILE) == filename:
return self.get_value(iter, self.COL_ENGINE)
iter = self.iter_next(iter)
assert 0
def file2name(self, file):
"return the name of the engine corresponding to <file>"
iter = self.get_iter_first()
while iter:
if self.get_value(iter, self.COL_FILE) == file:
return self.get_value(iter, self.COL_NAME)
iter = self.iter_next(iter)
assert 0
def name2engine(self, name):
"return the name of the engine corresponding to <file> or None"
iter = self.get_iter_first()
while iter:
if self.get_value(iter, self.COL_NAME) == name:
return self.get_value(iter, self.COL_ENGINE)
iter = self.iter_next(iter)
return None
def name2file(self, name):
"return the name of the engine corresponding to <file> or human"
iter = self.get_iter_first()
while iter:
if self.get_value(iter, self.COL_NAME) == name:
return self.get_value(iter, self.COL_FILE)
iter = self.iter_next(iter)
return 'human'
# =====
# G U I
# =====
class NewDialog(gtk.Dialog):
"""Start new game with options
Options are: gametype, black and white players
The selection of players depends on the gametype, as engines are
specifically adapted to one; only the human player can play all
the different types, the exclude filter takes that into account.
the unique filter's types list is preset with 0, as to not show
the humans "any" gametype
"""
_gametype = 0
_gametypes = [0]
def gt_exclude(self, model, iter):
"exclude entries by global gametype"
gt = model.get_value(iter, Main.players.COL_GAMETYPE)
if gt == 0:
return True
if gt == self._gametype:
return True
return False
def gt_unique(self, model, iter):
"only one entry per gametype"
gt = model.get_value(iter, Main.players.COL_GAMETYPE)
if gt in self._gametypes:
return False
self._gametypes.append(gt)
return True
def on_gt_change(self, gbox, xfil):
"change filter for players"
iter = gbox.get_active_iter()
model = gbox.get_model()
self._gametype = model.get_value(iter, Main.players.COL_GAMETYPE)
xfil.refilter()
bbox = Main.gui['NewBlackBox']
iter = xfil.get_iter_first()
bbox.set_active_iter(iter)
wbox = Main.gui['NewWhiteBox']
iter = xfil.get_iter_first()
wbox.set_active_iter(iter)
def reset(self):
"change filter for players to prefs, called from gui"
if not self._gametype:
self.prepare()
self._gametype = Main.prefs.getint('game', 'type')
# gametypes
gbox = Main.gui['NewGametypeBox']
iter = self.ufil.get_iter_first()
while iter:
if self.ufil.get_value(iter, Main.players.COL_GAMETYPE) \
== self._gametype:
gbox.set_active_iter(iter)
break
iter = self.ufil.iter_next(iter)
else:
assert 0
# players
bbox = Main.gui['NewBlackBox']
wbox = Main.gui['NewWhiteBox']
iter = self.xfil.get_iter_first()
bbox.set_active_iter(iter)
wbox.set_active_iter(iter)
black = Main.prefs.get('game', 'black')
white = Main.prefs.get('game', 'white')
while iter:
file = self.xfil.get_value(iter, Main.players.COL_FILE)
if file == black:
bbox.set_active_iter(iter)
if file == white:
wbox.set_active_iter(iter)
iter = self.xfil.iter_next(iter)
def save(self):
"write current selection to prefs"
bbox = Main.gui['NewBlackBox']
wbox = Main.gui['NewWhiteBox']
model = bbox.get_model()
iter = bbox.get_active_iter()
assert iter
black = self.xfil.get_value(iter, Main.players.COL_FILE)
iter = wbox.get_active_iter()
assert iter
white = self.xfil.get_value(iter, Main.players.COL_FILE)
Main.prefs.set('game', 'type', self._gametype)
Main.prefs.set('game', 'black', black)
Main.prefs.set('game', 'white', white)
Main.prefs.save()
def prepare(self):
self._gametype = Main.prefs.getint('game', 'type')
black = Main.prefs.get('game', 'black')
white = Main.prefs.get('game', 'white')
# exclusive gametype filter
self.xfil = Main.players.filter_new()
self.xfil.set_visible_func(self.gt_exclude)
# unique gametypes filter
self.ufil = Main.players.filter_new()
self.ufil.set_visible_func(self.gt_unique)
# gametypes
gbox = Main.gui['NewGametypeBox']
cell = gtk.CellRendererText()
gbox.pack_start(cell, True)
gbox.add_attribute(cell, 'text', Main.players.COL_GAMENAME)
gbox.set_model(self.ufil)
gbox.connect('changed', self.on_gt_change, self.xfil)
# players
bbox = Main.gui['NewBlackBox']
cell = gtk.CellRendererText()
bbox.pack_start(cell, True)
bbox.add_attribute(cell, 'text', Main.players.COL_NAME)
bbox.set_model(self.xfil)
wbox = Main.gui['NewWhiteBox']
cell = gtk.CellRendererText()
wbox.pack_start(cell, True)
wbox.add_attribute(cell, 'text', Main.players.COL_NAME)
wbox.set_model(self.xfil)
self.reset()
gobject.type_register(NewDialog) # make widget available to libglade
class Feedback(gtk.Statusbar):
"""the statusbar displays mostly what the engine thinks it does"""
def prepare(self):
"setup statusbar contexts: engine, game"
self.e_con = self.get_context_id('Engine')
self.g_con = self.get_context_id('Game')
def e_push(self, message):
"write a message to the engine context, pop old one"
self.pop(self.e_con)
self.push(self.e_con, message)
return False
def g_push(self, message):
"write a message to the game context, pop old one"
self.pop(self.g_con)
self.push(self.g_con, message)
return False
gobject.type_register(Feedback) # make widget available to libglade
class GladeGui:
"""interface with the libglade runtime
the layout of the application window is described in a file
that libglade loads at runtime. signal handlers are installed
in this class' dictionary. widgets are made available as
attributes of this class
display of pieces and numbers can be toggled
"""
types = dict(GnomeCanvas=CheckerBoard,
GtkTreeView=BookView,
GtkDialog=NewDialog,
GtkStatusbar=Feedback)
def __getitem__(self, key):
"Make widgets available as attributes of this class"
return self.tree.get_widget(key)
def __init__(self, gladefile):
"""Load the interface descripton and connect the signals
setup up actions and accelerators, create clipboard"""
self.tree = gtk.glade.XML(gladefile, typedict=self.types)
self.tree.signal_autoconnect(GladeGui.__dict__)
self.editempty = False
# clipboard
self.clipboard = gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD)
# menu
menu = self['Menu']
# action definitions
accelgroup = gtk.AccelGroup()
self['Capers'].add_accel_group(accelgroup)
actiongroup = gtk.ActionGroup('Capers')
self.actiongroup = actiongroup
action = gtk.Action('flip', 'F_lip board', 'Flip board', None)
action.connect('activate', self.flip_cb)
actiongroup.add_action_with_accel(action, '<control>l')
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
action = gtk.ToggleAction('edit', '_Edit board', 'Edit board', None)
action.connect('activate', self.edit_cb)
actiongroup.add_action_with_accel(action, '<control>e')
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
action = gtk.Action('editempty', '_Edit empty board', \
'Edit empty board', None)
action.connect('activate', self.editempty_cb)
actiongroup.add_action_with_accel(action, '<control><shift>e')
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
sep = gtk.SeparatorMenuItem()
sep.show()
menu.append(sep)
action = gtk.Action('copy', '_Copy game', \
'Copy game', gtk.STOCK_COPY)
action.connect('activate', self.copy_cb)
actiongroup.add_action_with_accel(action, None)
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
action = gtk.Action('paste', '_Paste game', \
'Paste game', gtk.STOCK_PASTE)
action.connect('activate', self.paste_cb)
actiongroup.add_action_with_accel(action, None)
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
action = gtk.Action('open', '_Open book', 'Open book', gtk.STOCK_OPEN)
action.connect('activate', self.open_cb)
actiongroup.add_action_with_accel(action, None)
action.connect_proxy(self['Open book'])
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
action = gtk.Action('save', '_Save game', 'Save game', gtk.STOCK_SAVE)
action.connect('activate', self.save_cb)
actiongroup.add_action_with_accel(action, None)
action.connect_proxy(self['Save game'])
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
sep = gtk.SeparatorMenuItem()
sep.show()
menu.append(sep)
action = gtk.Action('new', '_New game', 'New game', gtk.STOCK_NEW)
action.connect('activate', self.new_cb)
actiongroup.add_action_with_accel(action, None)
action.connect_proxy(self['New game'])
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
action = gtk.Action('nwo', '_New game with options',
'New game with options', gtk.STOCK_PROPERTIES)
action.connect('activate', self.nwo_cb)
actiongroup.add_action_with_accel(action, '<control><shift>n')
action.connect_proxy(self['New game with options'])
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
action = gtk.Action('quit', '_Quit', 'Quit program', gtk.STOCK_QUIT)
action.connect('activate', self.on_wm_quit)
actiongroup.add_action_with_accel(action, None)
action.set_accel_group(accelgroup)
action.connect_accelerator()
menu.append(action.create_menu_item())
# action callbacks
def quit_cb(self, *args):
gtk.main_quit()
def new_cb(self, *args):
"new same game"
if not Main.game.lock.locked():
Main.game.new()
def nwo_cb(self, *args):
"new game with options"
if Main.game.lock.locked():
Main.feedback.g_push('Function locked!')
return
Main.feedback.g_push('Select options for new game...')
nwo = Main.gui['New...']
nwo.reset()
nwo.connect("close", lambda w: nwo.hide()) # esc key
if nwo.run() == gtk.RESPONSE_OK:
nwo.save()
Main.game.new()
else:
Main.feedback.g_push('New game cancelled')
nwo.hide()
def open_cb(self, *args):
"load book from pdn file"
if Main.game.lock.locked():
return
Main.feedback.g_push('Select book to open...')
fc = gtk.FileChooserDialog(title='Open...',
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
fc.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("Checkers Books")
filter.add_pattern("*.pdn")
filter.add_mime_type("text/*")
fc.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All Files")
filter.add_pattern("*")
fc.add_filter(filter)
opendir = Main.prefs.get('paths', 'opengame')
fc.set_current_folder(opendir)
if fc.run() == gtk.RESPONSE_OK:
fn = fc.get_filename()
try:
f = open(fn, 'r')
except:
Main.feedback.g_push('Could not open file')
fc.destroy()
return
pdn = Pdn(f, fn)
pdn.parse()
f.close()
# save path
opendir = os.path.dirname(fn)
Main.prefs.set('paths', 'opengame', opendir)
Main.prefs.save()
else:
Main.feedback.g_push('Open book cancelled')
fc.destroy()
def save_cb(self, *args):
"save current game as pdn"
if Main.game.lock.locked():
return
Main.feedback.g_push('Select file for saving...')
fc = gtk.FileChooserDialog(title='Save as...',
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
fc.set_default_response(gtk.RESPONSE_OK)
filter = gtk.FileFilter()
filter.set_name("Checkers Books")
filter.add_pattern("*.pdn")
filter.add_mime_type("text/*")
fc.add_filter(filter)
filter = gtk.FileFilter()
filter.set_name("All Files")
filter.add_pattern("*")
fc.add_filter(filter)
filename, text = Main.book.game2pdn()
filename = filename + '.pdn'
fc.set_current_name(filename)
savedir = Main.prefs.get('paths', 'savegame')
fc.set_current_folder(savedir)
if fc.run() == gtk.RESPONSE_OK:
fn = fc.get_filename()
f = open(fn, 'w')
f.write(text)
f.close
# save path
savedir = os.path.dirname(fn)
Main.prefs.set('paths', 'savegame', savedir)
Main.prefs.save()
Main.feedback.g_push('Game saved: ' + fn)
else:
Main.feedback.g_push('Save game cancelled')
fc.destroy()
def copy_cb(self, *args):
"copy game"
if Main.game.lock.locked():
return
Main.feedback.g_push('Copy current game to clipboard')
filename, text = Main.book.game2pdn()
self.clipboard.set_text(text)
Main.feedback.g_push('Copied current game to clipboard')
def paste_cb(self, *args):
"paste game"
if Main.game.lock.locked():
return
Main.feedback.g_push('Paste book from clipboard')
text = self.clipboard.wait_for_text()
if not text or text == '':
Main.feedback.g_push('Clipboard empty')
return
pdn = Pdn(text, 'clipboard')
pdn.parse()
def edit_cb(self, *args):
"edit board toggle"
action = self.actiongroup.get_action('edit')
if Main.board.edit:
Main.game.stop_edit()
Main.board.edit = False
action.set_active(False)
else:
Main.game.start_edit(self.editempty)
Main.board.edit = True
action.set_active(True)
assert action.get_active() == Main.board.edit
self.editempty = False
def editempty_cb(self, *args):
"edit empty board toggle, set global, then call edit board"
action = self.actiongroup.get_action('edit')
self.editempty = True
if Main.board.edit:
action.set_active(False)
else:
action.set_active(True)
def flip_cb(self, *args):
"flip board"
if Main.game.lock.locked():
return
Main.board.flip()
# glade handlers
def on_wm_quit(widget, event):
Main.gui.quit_cb()
def on_button_begin(widget):
Main.game.goto_begin()
def on_button_prev(widget):
Main.game.goto_prev()
def on_button_next(widget):
Main.game.goto_next()
def on_button_end(widget):
Main.game.goto_end()
def on_button_game_prev(widget):
Main.game.goto_game_prev()
def on_button_game_next(widget):
Main.game.goto_game_next()
def on_button_go(widget):
Main.game.engines_go()
def on_button_stop(widget):
Main.game.engines_stop()
# =========
# P R E F S
# =========
import ConfigParser
class Prefs(ConfigParser.RawConfigParser):
"""read, manage and save settings
- the player has a name
- the game has a gametype, a black and a white player
players may be "human" or the path to an engine
- the look has a scene and a glade file
this is just the last game played, no checking is done, if
engine and gametype match; prefs are synced from "New..."
preferences are saved in a file "prefs" in
$XDG_CONFIG_HOME/capers or in
$HOME/.config/capers or in $PWD
"""
def pmkdir(self, newdir):
"works like <mkdir -p>"
if os.path.isdir(newdir):
return
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
self.pmkdir(head)
if tail:
os.mkdir(newdir)
def find(self):
"find or invent the preferences file, return filename"
prefsdir = os.getenv('XDG_CONFIG_HOME')
if not prefsdir:
prefsdir = os.getenv('HOME')
if prefsdir:
prefsdir = os.path.join(prefsdir, '.config')
prefsdir = os.path.join(prefsdir, 'capers')
else:
prefsdir = os.getcwd()
self.pmkdir(prefsdir)
return os.path.join(prefsdir, 'prefs')
def save(self):
"save preferences to default location"
assert self.prefsfile
file = open(self.prefsfile, 'w')
self.write(file)
file.close
def __init__(self):
"load settings, make defaults"
# not a new style class
#super(Prefs, self).__init__()
ConfigParser.RawConfigParser.__init__(self)
self.prefsfile = self.find()
self.read(self.prefsfile)
# installed dir
cwd = sys.path[0].rstrip("/bin")
# player
username = False
try:
username = self.get('player', 'name')
except ConfigParser.NoSectionError:
self.add_section('player')
except ConfigParser.NoOptionError:
pass
if not username:
username = os.getenv('LOGNAME')
if not username:
username = os.getenv('USERNAME')
if not username:
username = 'Player'
self.set('player', 'name', username)
# paths
libdir = False
try:
libdir = self.get('paths', 'engines')
except ConfigParser.NoSectionError:
self.add_section('paths')
except ConfigParser.NoOptionError:
pass
if not libdir or not os.path.isdir(libdir):
libdir = os.path.join(cwd, 'lib')
#self.pmkdir(libdir)
self.set('paths', 'engines', libdir)
opendir = False
try:
opendir = self.get('paths', 'opengame')
except ConfigParser.NoSectionError:
self.add_section('paths')
except ConfigParser.NoOptionError:
pass
if not opendir or not os.path.isdir(opendir):
opendir = os.path.join(cwd, 'games')
self.set('paths', 'opengame', opendir)
savedir = False
try:
savedir = self.get('paths', 'savegame')
except ConfigParser.NoSectionError:
self.add_section('paths')
except ConfigParser.NoOptionError:
pass
if not savedir or not os.path.isdir(savedir):
savedir = os.getenv('XDG_DATA_HOME')
if savedir and os.path.isdir(savedir):
savedir = os.path.join(savedir, 'capers')
self.pmkdir(savedir)
else:
savedir = os.getenv('HOME')
assert(savedir)
self.set('paths', 'savegame', savedir)
# engines
enginefile = False
try:
enginefile = self.get('engines', 'default')
except ConfigParser.NoSectionError:
self.add_section('engines')
except ConfigParser.NoOptionError:
pass
if not enginefile or not os.path.isfile(enginefile):
enginefile = os.path.join(libdir, 'simplech')
if os.name == 'nt':
enginefile = enginefile + '.dll'
else:
enginefile = enginefile + '.so'
if not os.access(enginefile, os.F_OK | os.R_OK):
Fatal('Default checkers engine not found:\n\n'
+ enginefile)
self.set('engines', 'default', enginefile)
maxtime = 2
try:
maxtime = self.getint('engines', 'maxtime')
except ConfigParser.NoSectionError:
self.add_section('engines')
self.set('engines', 'maxtime', 2)
except ConfigParser.NoOptionError:
self.set('engines', 'maxtime', 2)
if maxtime < 1:
self.set('engines', 'maxtime', 1)
# game
gametype = False
try:
gametype = self.getint('game', 'type')
except ConfigParser.NoSectionError:
self.add_section('game')
except ConfigParser.NoOptionError:
pass
if not gametype or gametype not in (Main.game.INTERNL,
Main.game.ENGLISH, Main.game.ITALIAN,
Main.game.RUSSIAN, Main.game.MAFIERZ):
gametype = Main.game.ENGLISH
self.set('game', 'type', gametype)
black = False
try:
black = self.get('game', 'black')
except ConfigParser.NoSectionError:
self.add_section('game')
self.set('game', 'black', 'human')
except ConfigParser.NoOptionError:
self.set('game', 'black', 'human')
black = self.get('game', 'black')
if black != 'human' and not os.access(black, os.F_OK | os.R_OK):
self.set('game', 'black', 'human')
white = False
try:
white = self.get('game', 'white')
except ConfigParser.NoSectionError:
self.add_section('game')
self.set('game', 'white', self.get('engines', 'default'))
except ConfigParser.NoOptionError:
self.set('game', 'white', self.get('engines', 'default'))
white = self.get('game', 'white')
if white != 'human' and not os.access(white, os.F_OK | os.R_OK):
self.set('game', 'white', 'human')
timeout = 500
try:
timeout = self.getint('game', 'timeout')
except ConfigParser.NoSectionError:
self.add_section('game')
self.set('game', 'timeout', 500)
except ConfigParser.NoOptionError:
self.set('game', 'timeout', 500)
if timeout < 100:
self.set('game', 'timeout', 100)
# look
scenefile = False
try:
scenefile = self.get('look', 'scene')
except ConfigParser.NoSectionError:
self.add_section('look')
except ConfigParser.NoOptionError:
pass
if not scenefile or not os.path.isfile(scenefile):
scenefile = os.path.join(cwd, 'share', 'scene.xpm')
if not os.access(scenefile, os.F_OK | os.R_OK):
Fatal('Scene pixmap not found:\n\n'
+ scenefile)
self.set('look', 'scene', scenefile)
gladefile = False
try:
gladefile = self.get('look', 'glade')
except ConfigParser.NoSectionError:
self.add_section('look')
except ConfigParser.NoOptionError:
pass
if not gladefile or not os.path.isfile(gladefile):
gladefile = os.path.join(cwd, 'share', 'capers.glade')
if not os.access(gladefile, os.F_OK | os.R_OK):
Fatal('User interface description not found:\n\n'
+ gladefile)
self.set('look', 'glade', gladefile)
self.save()
# =======
# M A I N
# =======
class Fatal(gtk.Window):
"the program cannot continue, display dialog telling the reason"
def __init__(self, text):
gobject.GObject.__init__(self)
message = gtk.MessageDialog(None, gtk.DIALOG_MODAL,
gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, text)
response = message.run()
message.destroy()
sys.exit(1)
class Main:
"""create instances of the classes, map these to Main namespace
Position(), Game(), Book(), Prefs(), Players(), GladeGui(),
Feedback(), BookView(), CheckerBoard()
is this a singleton?
"""
def __init__(self):
try:
gobject.threads_init()
except:
Fatal('No threads in pygtk')
Main.pos = Position()
Main.game = Game()
Main.book = Book()
Main.prefs = Prefs()
Main.players = Players()
Main.gui = GladeGui(Main.prefs.get('look', 'glade'))
Main.feedback = Main.gui['Feedback']
Main.feedback.prepare()
Main.bookview = Main.gui['Book']
Main.bookview.open(Main.book)
Main.board = Main.gui['Board']
if len(sys.argv) > 1:
fn = sys.argv[-1]
try:
f = open(fn, 'r')
except:
Fatal('No such file: %s' % fn)
pdn = Pdn(f, fn)
pdn.parse()
f.close()
del pdn
else:
Main.game.new()
gtk.main()
| hungerburg/capers | share/capers.py | Python | gpl-2.0 | 86,514 | [
"TINKER"
] | aa35fe096dca00a5938fa46f0378443f407394af3f7ff39c1c6f5f470961071e |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# statpath - [insert a few words of module description on this line]
# Copyright (C) 2003-2011 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Emulate the un*x function with the same name"""
import os
import glob
import shared.returnvalues as returnvalues
from shared.base import client_id_dir
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.parseflags import verbose
from shared.validstring import valid_user_path
def signature():
"""Signature of the main function"""
defaults = {'path': REJECT_UNSET, 'flags': ['']}
return ['stats', defaults]
def stat_path(real_path, logger):
"""Call OS stat on provided path"""
try:
stat_info = os.stat(real_path)
except Exception, err:
# Don't give away FS information - only log full failure reason
logger.warning('ls failed to stat %s: %s' % (real_path, err))
return (False, 'Internal error: stat failed!')
stat = {}
try:
stat['device'] = stat_info.st_dev
stat['inode'] = stat_info.st_ino
stat['mode'] = stat_info.st_mode
stat['nlink'] = stat_info.st_nlink
stat['uid'] = stat_info.st_uid
stat['gid'] = stat_info.st_gid
stat['rdev'] = stat_info.st_rdev
stat['size'] = stat_info.st_size
stat['atime'] = stat_info.st_atime
stat['mtime'] = stat_info.st_mtime
stat['ctime'] = stat_info.st_ctime
except Exception, exc:
return (False, 'Could not get all stat info: %s' % exc)
return (True, stat)
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
client_dir = client_id_dir(client_id)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
flags = accepted['flags']
patterns = accepted['path']
# Please note that base_dir must end in slash to avoid access to other
# user dirs when own name is a prefix of another user name
base_dir = os.path.abspath(os.path.join(configuration.user_home,
client_dir)) + os.sep
status = returnvalues.OK
if verbose(flags):
for flag in flags:
output_objects.append({'object_type': 'text', 'text'
: '%s using flag: %s' % (op_name,
flag)})
for pattern in patterns:
# Check directory traversal attempts before actual handling to avoid
# leaking information about file system layout while allowing
# consistent error messages
unfiltered_match = glob.glob(base_dir + pattern)
match = []
for server_path in unfiltered_match:
real_path = os.path.abspath(server_path)
if not valid_user_path(real_path, base_dir, True):
logger.warning('%s tried to %s restricted path %s ! (%s)'
% (client_id, op_name, real_path, pattern))
continue
match.append(real_path)
# Now actually treat list of allowed matchings and notify if no
# (allowed) match
if not match:
output_objects.append({'object_type': 'file_not_found',
'name': pattern})
status = returnvalues.FILE_NOT_FOUND
stats = []
for real_path in match:
relative_path = real_path.replace(base_dir, '')
try:
(stat_status, stat) = stat_path(real_path, logger)
if stat_status:
if verbose(flags):
stat['name'] = relative_path
stat['object_type'] = 'stat'
stats.append(stat)
else:
output_objects.append({'object_type': 'error_text',
'text': stat})
status = returnvalues.SYSTEM_ERROR
except Exception, exc:
output_objects.append({'object_type': 'error_text',
'text': "%s: '%s': %s" % (op_name,
relative_path, exc)})
logger.error("%s: failed on '%s': %s" % (op_name,
relative_path, exc))
status = returnvalues.SYSTEM_ERROR
continue
output_objects.append({'object_type': 'stats', 'stats'
: stats})
return (output_objects, status)
| heromod/migrid | mig/shared/functionality/statpath.py | Python | gpl-2.0 | 5,631 | [
"Brian"
] | 56e8c78c7f4fca9c31112a7a1a427c2fa7a09175e92476cabe06cb4de0483dfb |
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Common functions for topology building --- :mod:`MDAnalysis.topology.core`
==========================================================================
The various topology parsers make use of functions and classes in this
module. They are mostly of use to developers.
See Also
--------
:mod:`MDAnalysis.topology.tables`
for some hard-coded atom information that is used by functions such as
:func:`guess_atom_type` and :func:`guess_atom_mass`.
"""
# Global imports
import os.path
import numpy as np
from collections import defaultdict
# Local imports
from . import tables
from .guessers import (
guess_atom_element, guess_atom_type,
get_atom_mass, guess_atom_mass, guess_atom_charge,
guess_bonds, guess_angles, guess_dihedrals, guess_improper_dihedrals,
)
from ..core._get_readers import get_parser_for
from ..lib.util import cached
#tumbleweed
| MDAnalysis/mdanalysis | package/MDAnalysis/topology/core.py | Python | gpl-2.0 | 1,924 | [
"MDAnalysis"
] | fb7d9def6cf3f67e86bd3dfde1c3480a6f79024ce224c81d12759511bd0ad295 |
"""
pyNEAT
Copyright (C) 2007-2008 Brian Greer
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import math
import random
from Configuration import *
from Mutator import *
from Organism import *
from Genome import *
from Crossover import *
from random_utils import *
class Species:
def __init__(self, id, novel=False):
self.id = id
self.age = 1
self.aveFitness = 0.0
self.maxFitness = 0
self.overallMaxFitness = 0
self.expectedOffspring = 0
self.novel = novel
self.ageOfLastImprovement = 0
self.organisms = []
# descending order of fitness
def __cmp__(self, other):
if len(self.organisms) > 0:
if len(other.organisms) > 0:
return self.organisms[0].__cmp__(other.organisms[0])
else:
return 1
elif len(other.organisms) > 0:
return -1
else:
return 0
def addOrganism(self, organism):
self.organisms.append(organism)
organism.species = self
def removeOrganism(self, organism):
if organism in self.organisms:
self.organisms.remove(organism)
def adjustFitness(self):
ageDebt = self.age - self.ageOfLastImprovement + 1 - Configuration.dropoffAge
if ageDebt == 0:
ageDebt = 1
numOrganisms = len(self.organisms)
for organism in self.organisms:
organism.originalFitness = organism.fitness
if ageDebt >= 1:
# possible graded dropoff
#organism.fitness = organism.fitness * math.atan(ageDebt)
organism.fitness = organism.fitness * 0.01
if self.age <= 10:
organism.fitness *= Configuration.ageSignificance
if organism.fitness < 0.0:
organism.fitness = 0.0001
organism.fitness = organism.fitness / numOrganisms
self.organisms.sort()
if self.organisms[0].originalFitness > self.overallMaxFitness:
self.ageOfLastImprovement = self.age
self.overallMaxFitness = self.organisms[0].originalFitness
self.organisms[0].speciesChampion = True
numParents = int(math.floor(Configuration.survivalThreshold * numOrganisms + 1.0))
if numParents < numOrganisms:
for i in range(numParents + 1, numOrganisms):
self.organisms[i].eliminated = True
def calcFitnessStats(self):
self.maxFitness = 0.0
total = 0.0
for organism in self.organisms:
total += organism.fitness
if organism.fitness > self.maxFitness:
self.maxFitness = organism.fitness
self.aveFitness = total / len(self.organisms)
def lastImproved(self):
return self.age - self.ageOfLastImprovement
def reproduce(self, generation, population, species):
poolSize = len(self.organisms) - 1
if poolSize >= 0:
champion = self.organisms[0]
championDone = False
mutationPower = Configuration.weightMutationPower
for i in range(int(self.expectedOffspring)):
mom = None
baby = None
if champion.superChampionOffspring > 0:
mom = champion
newGenome = mom.genome.makeCopy(i)
if champion.superChampionOffspring > 1:
if randfloat() < 0.8 or Configuration.mutateAddSynapseProbability == 0.0:
newGenome.mutateSynapseWeights(Mutator.GAUSSIAN, mutationPower, 1.0)
else:
newGenome.genesis(generation)
newGenome.mutateAddSynapse(population, Configuration.synapseAddTries)
baby = Organism(0.0, newGenome, generation)
if champion.superChampionOffspring == 1:
if champion.populationChampion:
baby.populationChampionChild = True
baby.maxFitness = mom.originalFitness
champion.superChampionOffspring -= 1
elif not championDone and self.expectedOffspring > 5:
mom = champion
newGenome = mom.genome.makeCopy(i)
baby = Organism(0.0, newGenome, generation)
championDone = True
elif poolSize == 0 or randfloat() < Configuration.mutateOnlyProbability:
mom = self.organisms[random.randint(0, poolSize)]
newGenome = mom.genome.makeCopy(i)
if randfloat() < Configuration.mutateAddNeuronProbability:
newGenome.mutateAddNeuron(population)
elif randfloat() < Configuration.mutateAddSynapseProbability:
newGenome.genesis(generation)
newGenome.mutateAddSynapse(population, Configuration.synapseAddTries)
else:
newGenome.tryAllMutations(mutationPower)
baby = Organism(0.0, newGenome, generation)
else:
mom = self.organisms[random.randint(0, poolSize)]
dad = None
if randfloat() > Configuration.interspeciesMateRate:
dad = self.organisms[random.randint(0, poolSize)]
else:
otherSpecies = None
for tries in range(5):
randMultiplier = random.gauss(0, 1) / 4.0
if randMultiplier > 1.0:
randMultiplier = 1.0
speciesIndex = int(math.floor((randMultiplier * (len(species) - 1.0)) + 0.5))
otherSpecies = species[speciesIndex]
if self != otherSpecies:
break
else:
otherSpecies = None
if otherSpecies is not None:
dad = otherSpecies.organisms[0]
else:
dad = mom
if randfloat() < Configuration.mateMultipointProbability:
newGenome = mom.genome.crossover(Crossover.MULTIPOINT, dad.genome, i, mom.originalFitness, dad.originalFitness)
elif randfloat() < (Configuration.mateMultipointAveProbability / (Configuration.mateMultipointAveProbability + Configuration.mateSinglepointProbability)):
newGenome = mom.genome.crossover(Crossover.MULTIPOINT_AVG, dad.genome, i, mom.originalFitness, dad.originalFitness)
else:
newGenome = mom.genome.crossover(Crossover.SINGLEPOINT, dad.genome, i)
if randfloat() > Configuration.mateOnlyProbability or \
dad.genome.id == mom.genome.id or \
dad.genome.getCompatibility(mom.genome) == 0.0:
if randfloat() < Configuration.mutateAddNeuronProbability:
newGenome.mutateAddNeuron(population)
elif randfloat() < Configuration.mutateAddSynapseProbability:
newGenome.genesis(generation)
newGenome.mutateAddSynapse(population, Configuration.synapseAddTries)
else:
newGenome.tryAllMutations(mutationPower)
baby = Organism(0.0, newGenome, generation)
# try mom's species first, then iterate through all other species
# optimization proposed in the NEAT FAQ
if baby.isCompatibleWith(mom):
mom.species.addOrganism(baby)
baby.species = mom.species
else:
found = False
for specie in population.species:
for organism in specie.organisms:
if baby.isCompatibleWith(organism):
specie.addOrganism(baby)
baby.species = specie
found = True
break
if found:
break
if not found:
newSpecies = Species(len(population.species) + 1, novel=True)
newSpecies.addOrganism(baby)
baby.species = newSpecies
population.species.append(newSpecies)
def countOffspring(self, skim):
self.expectedOffspring = 0
for organism in self.organisms:
eoWhole = math.floor(organism.expectedOffspring)
eoFractional = math.fmod(organism.expectedOffspring, 1.0)
self.expectedOffspring += eoWhole
skim += eoFractional
if skim > 1.0:
skimWhole = math.floor(skim)
self.expectedOffspring += skimWhole
skim -= skimWhole
return skim
| liquidkarma/pyneat | pyNEAT/Species.py | Python | gpl-2.0 | 9,353 | [
"Brian",
"Gaussian"
] | e36c9da3dc25aae9bf3eac2432abaade529c13ebacc055ed2212095aa0020b63 |
# coding: utf-8
# # risklearning demo 2
#
# In the second risklearning demo, we consider multidimensional non-stationary Poisson processes with dependence structure given by a Gaussian copula.
# In[3]:
import risklearning.learning_frequency as rlf
reload(rlf)
# In[4]:
import pandas as pd
import numpy as np
import scipy.stats as stats
import math
import matplotlib.style
matplotlib.style.use('ggplot')
import ggplot as gg
#get_ipython().magic(u'matplotlib inline')
# ## Set up frequency distribution to generate samples
# In[35]:
# Read in Poisson parameters used to simulate loss counts
lambdas_df = pd.read_csv('data/lambdas_gauss_3d.csv')
lambda_start = lambdas_df.head(1).iloc[0]
lambda_end = lambdas_df.tail(1).iloc[0]
print 'Initial lambdas:\n{}'.format(lambda_start)
print '\nFinal lambdas: \n{}'.format(lambda_end)
#%%
lambda_start = lambdas_df['TCEM'][0]
lambda_end = lambdas_df['TCEM'].tail(1).iloc[0]
print('Lambda start value: {}, lambda end value: {}'.format(lambda_start, lambda_end))
lambda_ts = lambdas_df['TCEM']
# Read in simulated loss counts
counts_sim_df = pd.read_csv('data/tcem_1d.csv')
# EDPM: Execution, Delivery and Process Management
# TCEM: Transaction Capture, Execution and Maintenance--think fat-finger mistake
counts_sim_df.head()
# In[52]:
#%% Do MLE (simple average for Poisson process
t_start = np.min(counts_sim_df['t'])
t_end = np.max(counts_sim_df['t'])
n_tenors_train = -t_start
n_tenors_test = t_end
counts_train = (counts_sim_df[counts_sim_df.t < 0]).groupby('L2_cat').sum()
counts_test = (counts_sim_df[counts_sim_df.t >= 0]).groupby('L2_cat').sum()
# ## MLE for training data
#
# For the Poisson distribution, the MLE of the intensity (here lambda) is just the average of the counts per model horizon. In practice, OpRisk models sometimes take a weighted average, with the weight linearly decreasing over a period of years (see e.g. "LDA at Work" by Aue and Kalkbrener).
# In[31]:
lambdas_train = counts_train['counts']/n_tenors_train
lambdas_test = counts_test['counts']/n_tenors_test
bin_tops = [1,2,3,4,5,6,7,8,9,10,15,101]
# Recall that digitize (used later) defines bins by lower <= x < upper
count_tops =[count - 1 for count in bin_tops]
# Calculate bin probabilities from MLE poisson
poi_mle = stats.poisson(lambdas_train)
poi_bins = rlf.bin_probs(poi_mle, bin_tops)
mle_probs = pd.DataFrame({'Count Top': count_tops, 'Probs': poi_bins})
# For later comparison
mle_probs_vals = list(mle_probs.Probs)
# ## Prep simulated losses for neural network
#
# For example
#
# * Use one-hot-encoding for L1 and L2 categories (this will make more sense once we look at multiple dependent categories)
# * Bin count data
# * Normalize tenors (i.e. scale so that first tenor maps to -1 with 0 preserved)
# * Export as numpy arrays to feed into keras / tensorflow
# In[32]:
import warnings
warnings.filterwarnings('ignore') # TODO: improve slicing to avoid warnings
x_train, y_train, x_test, y_test = rlf.prep_count_data(counts_sim_df, bin_tops)
## With tensorflow
import tensorflow as tf
layer_width = [y_train.shape[1]]
x = tf.placeholder(tf.float32, [None, x_train.shape[1]])
W = tf.Variable(tf.zeros([x_train.shape[1], layer_width[0]]))
b = tf.Variable(tf.zeros([layer_width[0]]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, y_train.shape[1]])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess
# ## Set up the network architecture and train
#
# We use keras with TensorFlow backend. Later we will look at optimizing metaparameters.
#
# In[33]:
#from keras.optimizers import SGD
#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# rl_train_net is a wrapper for standard keras functionality that
# makes it easier below to optimize hyperparameters
rl_net = rlf.rl_train_net(x_train, y_train, x_test, y_test, [150], n_epoch = 300, optimizer = 'adagrad')
proba = rl_net['probs_nn']
# ## Evaluating the neural network
# Let's see now how the neural network tracks the true distribution over time, and compare with the MLE fitted distribution.
#
# We do this both numerically (Kullback-Leibler divergance) and graphically.
# In[43]:
#% Convert proba from wide to long and append to other probs
mle_probs_vals = list(mle_probs.Probs)
# TODO: Missing last tenor in nn proba (already in x_test, y_test)
probs_list = []
kl_mle_list = []
kl_nn_list = []
for t in range(proba.shape[0]):
nn_probs_t = proba[t]
true_bins_t = rlf.bin_probs(stats.poisson(lambda_ts[-t_start+t]), bin_tops)
probs_t = pd.DataFrame({'Tenor': t, 'Count Top': count_tops, 'Probs True': true_bins_t, 'Probs NN': nn_probs_t, 'Probs MLE': mle_probs_vals}, index = range(t*len(count_tops), t*len(count_tops) + len(count_tops)))
probs_list.append(probs_t)
# Calculate KL divergences
kl_mle_list.append(stats.entropy(true_bins_t, mle_probs_vals))
kl_nn_list.append(stats.entropy(true_bins_t, nn_probs_t))
probs = pd.concat(probs_list)
# In[44]:
probs_tail = probs[probs.Tenor > 360 ]
gg.ggplot(probs_tail, gg.aes(x='Count Top',weight='Probs True')) + gg.facet_grid('Tenor') + gg.geom_bar() + gg.geom_step(gg.aes(y='Probs MLE', color = 'red')) + gg.geom_step(gg.aes(y='Probs NN', color = 'blue')) + gg.scale_x_continuous(limits = (0,len(count_tops)))
# In[57]:
# KL divergences
kl_df = pd.DataFrame({'Tenor': range(0, t_end+1), 'KL MLE': kl_mle_list, 'KL NN': kl_nn_list})
print kl_df.head()
print kl_df.tail()
#%
# Plot KL divergences
gg.ggplot(kl_df, gg.aes(x='Tenor')) + gg.geom_step(gg.aes(y='KL MLE', color = 'red')) + gg.geom_step(gg.aes(y='KL NN', color = 'blue'))
# # Optimizing network architecture
# In[61]:
# More systematically with NN architecture
# Loop over different architectures, create panel plot
neurons_list = [10, 20,50,100, 150, 200]
#neurons_list = [10, 20,50]
depths_list = [1,2,3]
optimizer = 'adagrad'
#%%
kl_df_list = []
for depth in depths_list:
for n_neurons in neurons_list:
nn_arch = [n_neurons]*depth
print("Training " + str(depth) + " layer(s) of " + str(n_neurons) + " neurons")
rl_net = rlf.rl_train_net(x_train, y_train, x_test, y_test, nn_arch, n_epoch = 300, optimizer = optimizer)
proba = rl_net['probs_nn']
print("\nPredicting with " + str(depth) + " layer(s) of " + str(n_neurons) + " neurons")
probs_kl_dict = rlf.probs_kl(proba, lambda_ts, t_start, t_end+1, bin_tops, mle_probs_vals)
probs = probs_kl_dict['Probs']
kl_df_n = probs_kl_dict['KL df']
kl_df_n['Hidden layers'] = depth
kl_df_n['Neurons per layer'] = n_neurons
kl_df_n['Architecture'] = str(depth) + '_layers_of_' + str(n_neurons) + '_neurons'
kl_df_list.append(kl_df_n)
#%%
kl_df_hyper = pd.concat(kl_df_list)
# In[62]:
# Plot
kl_mle = kl_df_n['KL MLE'] # These values are constant over the above loops (KL between MLE and true distribution)
for depth in depths_list:
kl_df_depth = kl_df_hyper[kl_df_hyper['Hidden layers'] == depth]
kl_df_depth = kl_df_hyper[kl_df_hyper['Hidden layers'] == depth]
kl_depth_vals = kl_df_depth.pivot(index = 'Tenor', columns = 'Neurons per layer', values = 'KL NN')
kl_depth_vals['KL MLE'] = kl_mle
kl_depth_vals.plot(title = 'Kullback-Leibler divergences from true distribution \n for ' + str(depth) + ' hidden layer(s)', figsize = (16,10))
# In[65]:
# Try again, but now with RMSprop
neurons_list = [10, 20,50]
#neurons_list = [50]
depths_list = [2,3]
optimizer = 'RMSprop'
#%%
kl_df_list = []
for depth in depths_list:
for n_neurons in neurons_list:
nn_arch = [n_neurons]*depth
print("Training " + str(depth) + " layer(s) of " + str(n_neurons) + " neurons")
rl_net = rlf.rl_train_net(x_train, y_train, x_test, y_test, nn_arch, n_epoch = 300, optimizer = optimizer)
proba = rl_net['probs_nn']
print("\nPredicting with " + str(depth) + " layer(s) of " + str(n_neurons) + " neurons")
probs_kl_dict = rlf.probs_kl(proba, lambda_ts, t_start, t_end+1, bin_tops, mle_probs_vals)
probs = probs_kl_dict['Probs']
kl_df_n = probs_kl_dict['KL df']
kl_df_n['Hidden layers'] = depth
kl_df_n['Neurons per layer'] = n_neurons
kl_df_n['Architecture'] = str(depth) + '_layers_of_' + str(n_neurons) + '_neurons'
kl_df_list.append(kl_df_n)
#%%
kl_df_hyper = pd.concat(kl_df_list)
# Plot
kl_mle = kl_df_n['KL MLE'] # These values are constant over the above loops (KL between MLE and true distribution)
for depth in depths_list:
kl_df_depth = kl_df_hyper[kl_df_hyper['Hidden layers'] == depth]
kl_df_depth = kl_df_hyper[kl_df_hyper['Hidden layers'] == depth]
kl_depth_vals = kl_df_depth.pivot(index = 'Tenor', columns = 'Neurons per layer', values = 'KL NN')
kl_depth_vals['KL MLE'] = kl_mle
kl_depth_vals.plot(title = 'Kullback-Leibler divergences from true distribution \n for ' + str(depth) + ' hidden layer(s)', figsize = (16,10))
# Note that with 50 nodes per layer, the KL error for RBM Neural Networks is worse than MLE once we are more than 100 tenors (here, days) from the beginning of the test sample. With more nodes per layer, the results are even worse, though we do not show them here.
# ## Summary and next steps
#
# We can see by the nn_probs data frame that the probability mass of the neural network shifts to the right, as does the underlying Poisson processes, with its intensity starting at 1 events per tenor / day at - 5 yrs and ending at 4 events per tenor / day at +1 yrs.
#
# Next steps:
#
# * Simulate multiple, correlated Poisson processes
# * Test different optimizers
# * Test non-linear non-stationarities
# * Try recurrent neural network (?)
# * Try convolution network (?)
#
#
| munichpavel/risklearning | risklearning_demo_lowLevel.py | Python | mit | 10,292 | [
"Gaussian"
] | fcab6339e66a4e35a7e421f4db064e0b58e1f6935179b27fd7f715f5f1c47113 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes # type: ignore
import sys
from nixnet import errors
class PlatformUnsupportedError(errors.Error):
def __init__(self, platform):
message = '{0} is unsupported by this package.'.format(platform)
super(PlatformUnsupportedError, self).__init__(message, platform)
class XnetNotFoundError(errors.Error):
def __init__(self, *args):
message = (
'Could not find an installation of NI-XNET. Please '
'ensure that NI-XNET is installed on this machine or '
'contact National Instruments for support.')
super(XnetNotFoundError, self).__init__(message, *args)
class XnetFunctionNotSupportedError(errors.Error):
def __init__(self, function):
message = (
'The NI-XNET function "{0}" is not supported in this '
'version of NI-XNET. Visit ni.com/downloads to upgrade your '
'version of NI-XNET.'.format(function))
super(XnetFunctionNotSupportedError, self).__init__(message, function)
class XnetLibrary(object):
"""Proxy Library to consolidate nixnet-specific logic."""
def __init__(self, library):
self._library = library
def __getattr__(self, function):
try:
return getattr(self._library, function)
except AttributeError:
raise XnetFunctionNotSupportedError(function)
def _import_win_lib():
lib_name = "nixnet"
try:
cdll = ctypes.cdll.LoadLibrary(lib_name)
except OSError:
raise XnetNotFoundError()
return XnetLibrary(cdll)
def _import_unsupported():
raise PlatformUnsupportedError(sys.platform)
if sys.platform.startswith('win') or sys.platform.startswith('cli'):
import_lib = _import_win_lib
else:
import_lib = _import_unsupported
| epage/nixnet-python | nixnet/_lib.py | Python | mit | 1,898 | [
"VisIt"
] | 7b59ce6597f23d35158831571310447835d2ad60093680ad1dc9cb38f0dae321 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from six.moves import range
import re
import requests
from ....kodion.utils import FunctionCache
from .json_script_engine import JsonScriptEngine
class Cipher(object):
def __init__(self, context, javascript_url):
self._context = context
self._verify = context.get_settings().verify_ssl()
self._javascript_url = javascript_url
self._object_cache = {}
def get_signature(self, signature):
function_cache = self._context.get_function_cache()
json_script = function_cache.get_cached_only(self._load_json_script, self._javascript_url)
if not json_script:
json_script = function_cache.get(FunctionCache.ONE_DAY, self._load_json_script, self._javascript_url)
if json_script:
json_script_engine = JsonScriptEngine(json_script)
return json_script_engine.execute(signature)
return u''
def _load_json_script(self, javascript_url):
headers = {'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'DNT': '1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8,de;q=0.6'}
url = javascript_url
if not url.startswith('http'):
url = ''.join(['http://', url])
result = requests.get(url, headers=headers, verify=self._verify, allow_redirects=True)
javascript = result.text
return self._load_javascript(javascript)
def _load_javascript(self, javascript):
function_name = self._find_signature_function_name(javascript)
if not function_name:
raise Exception('Signature function not found')
_function = self._find_function_body(function_name, javascript)
function_parameter = _function[0].replace('\n', '').split(',')
function_body = _function[1].replace('\n', '').split(';')
json_script = {'actions': []}
for line in function_body:
# list of characters
split_match = re.match(r'%s\s?=\s?%s.split\(""\)' % (function_parameter[0], function_parameter[0]), line)
if split_match:
json_script['actions'].append({'func': 'list',
'params': ['%SIG%']})
# return
return_match = re.match(r'return\s+%s.join\(""\)' % function_parameter[0], line)
if return_match:
json_script['actions'].append({'func': 'join',
'params': ['%SIG%']})
# real object functions
cipher_match = re.match(
r'(?P<object_name>[$a-zA-Z0-9]+)\.?\[?"?(?P<function_name>[$a-zA-Z0-9]+)"?\]?\((?P<parameter>[^)]+)\)',
line)
if cipher_match:
object_name = cipher_match.group('object_name')
function_name = cipher_match.group('function_name')
parameter = cipher_match.group('parameter').split(',')
for i in range(len(parameter)):
param = parameter[i].strip()
if i == 0:
param = '%SIG%'
else:
param = int(param)
parameter[i] = param
# get function from object
_function = self._get_object_function(object_name, function_name, javascript)
# try to find known functions and convert them to our json_script
slice_match = re.match(r'[a-zA-Z]+.slice\((?P<a>\d+),[a-zA-Z]+\)', _function['body'][0])
if slice_match:
a = int(slice_match.group('a'))
params = ['%SIG%', a, parameter[1]]
json_script['actions'].append({'func': 'slice',
'params': params})
splice_match = re.match(r'[a-zA-Z]+.splice\((?P<a>\d+),[a-zA-Z]+\)', _function['body'][0])
if splice_match:
a = int(splice_match.group('a'))
params = ['%SIG%', a, parameter[1]]
json_script['actions'].append({'func': 'splice',
'params': params})
swap_match = re.match(r'var\s?[a-zA-Z]+=\s?[a-zA-Z]+\[0\]', _function['body'][0])
if swap_match:
params = ['%SIG%', parameter[1]]
json_script['actions'].append({'func': 'swap',
'params': params})
reverse_match = re.match(r'[a-zA-Z].reverse\(\)', _function['body'][0])
if reverse_match:
params = ['%SIG%']
json_script['actions'].append({'func': 'reverse',
'params': params})
return json_script
@staticmethod
def _find_signature_function_name(javascript):
# match_patterns source is youtube-dl
# https://github.com/ytdl-org/youtube-dl/blob/master/youtube_dl/extractor/youtube.py#L1344
# LICENSE: The Unlicense
match_patterns = [
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<name>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<name>[a-zA-Z0-9$]+)\(',
r'(?:\b|[^a-zA-Z0-9$])(?P<name>[a-zA-Z0-9$]{2})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<name>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(["\'])signature\1\s*,\s*(?P<name>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<name>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*'
r'(?P<name>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<name>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<name>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<name>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<name>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<name>[a-zA-Z0-9$]+)\('
]
for pattern in match_patterns:
match = re.search(pattern, javascript)
if match:
return match.group('name')
return ''
@staticmethod
def _find_function_body(function_name, javascript):
# normalize function name
function_name = function_name.replace('$', '\\$')
match = re.search(r'\s?%s=function\((?P<parameter>[^)]+)\)\s?{\s?(?P<body>[^}]+)\s?\}' % function_name, javascript)
if match:
return match.group('parameter'), match.group('body')
return '', ''
@staticmethod
def _find_object_body(object_name, javascript):
object_name = object_name.replace('$', '\\$')
match = re.search(r'var %s={(?P<object_body>.*?})};' % object_name, javascript, re.S)
if match:
return match.group('object_body')
return ''
def _get_object_function(self, object_name, function_name, javascript):
if object_name not in self._object_cache:
self._object_cache[object_name] = {}
else:
if function_name in self._object_cache[object_name]:
return self._object_cache[object_name][function_name]
_object_body = self._find_object_body(object_name, javascript)
_object_body = _object_body.split('},')
for _function in _object_body:
if not _function.endswith('}'):
_function = ''.join([_function, '}'])
_function = _function.strip()
match = re.match(r'(?P<name>[^:]*):function\((?P<parameter>[^)]*)\){(?P<body>[^}]+)}', _function)
if match:
name = match.group('name').replace('"', '')
parameter = match.group('parameter')
body = match.group('body').split(';')
self._object_cache[object_name][name] = {'name': name,
'body': body,
'params': parameter}
return self._object_cache[object_name][function_name]
| jdf76/plugin.video.youtube | resources/lib/youtube_plugin/youtube/helper/signature/cipher.py | Python | gpl-2.0 | 8,937 | [
"ADF"
] | fd9a8d8700b67554d73dd59b479c7100d318b3c10cfb81e051f406ee85521923 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dialogflow_v2beta1.services.documents import DocumentsAsyncClient
from google.cloud.dialogflow_v2beta1.services.documents import DocumentsClient
from google.cloud.dialogflow_v2beta1.services.documents import pagers
from google.cloud.dialogflow_v2beta1.services.documents import transports
from google.cloud.dialogflow_v2beta1.types import document
from google.cloud.dialogflow_v2beta1.types import document as gcd_document
from google.cloud.dialogflow_v2beta1.types import gcs
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DocumentsClient._get_default_mtls_endpoint(None) is None
assert DocumentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert (
DocumentsClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DocumentsClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DocumentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert DocumentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [DocumentsClient, DocumentsAsyncClient,])
def test_documents_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DocumentsGrpcTransport, "grpc"),
(transports.DocumentsGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_documents_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [DocumentsClient, DocumentsAsyncClient,])
def test_documents_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_documents_client_get_transport_class():
transport = DocumentsClient.get_transport_class()
available_transports = [
transports.DocumentsGrpcTransport,
]
assert transport in available_transports
transport = DocumentsClient.get_transport_class("grpc")
assert transport == transports.DocumentsGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DocumentsClient, transports.DocumentsGrpcTransport, "grpc"),
(
DocumentsAsyncClient,
transports.DocumentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DocumentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentsClient)
)
@mock.patch.object(
DocumentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentsAsyncClient),
)
def test_documents_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DocumentsClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DocumentsClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DocumentsClient, transports.DocumentsGrpcTransport, "grpc", "true"),
(
DocumentsAsyncClient,
transports.DocumentsGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DocumentsClient, transports.DocumentsGrpcTransport, "grpc", "false"),
(
DocumentsAsyncClient,
transports.DocumentsGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DocumentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentsClient)
)
@mock.patch.object(
DocumentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentsAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_documents_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [DocumentsClient, DocumentsAsyncClient])
@mock.patch.object(
DocumentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DocumentsClient)
)
@mock.patch.object(
DocumentsAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DocumentsAsyncClient),
)
def test_documents_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DocumentsClient, transports.DocumentsGrpcTransport, "grpc"),
(
DocumentsAsyncClient,
transports.DocumentsGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_documents_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DocumentsClient, transports.DocumentsGrpcTransport, "grpc", grpc_helpers),
(
DocumentsAsyncClient,
transports.DocumentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_documents_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_documents_client_client_options_from_dict():
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.documents.transports.DocumentsGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DocumentsClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DocumentsClient, transports.DocumentsGrpcTransport, "grpc", grpc_helpers),
(
DocumentsAsyncClient,
transports.DocumentsGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_documents_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=None,
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [document.ListDocumentsRequest, dict,])
def test_list_documents(request_type, transport: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document.ListDocumentsResponse(
next_page_token="next_page_token_value",
)
response = client.list_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document.ListDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDocumentsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_documents_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
client.list_documents()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document.ListDocumentsRequest()
@pytest.mark.asyncio
async def test_list_documents_async(
transport: str = "grpc_asyncio", request_type=document.ListDocumentsRequest
):
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document.ListDocumentsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document.ListDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDocumentsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_documents_async_from_dict():
await test_list_documents_async(request_type=dict)
def test_list_documents_field_headers():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.ListDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
call.return_value = document.ListDocumentsResponse()
client.list_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_documents_field_headers_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.ListDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document.ListDocumentsResponse()
)
await client.list_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_documents_flattened():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document.ListDocumentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_documents(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_documents_flattened_error():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_documents(
document.ListDocumentsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_documents_flattened_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document.ListDocumentsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document.ListDocumentsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_documents(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_documents_flattened_error_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_documents(
document.ListDocumentsRequest(), parent="parent_value",
)
def test_list_documents_pager(transport_name: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
document.ListDocumentsResponse(
documents=[
document.Document(),
document.Document(),
document.Document(),
],
next_page_token="abc",
),
document.ListDocumentsResponse(documents=[], next_page_token="def",),
document.ListDocumentsResponse(
documents=[document.Document(),], next_page_token="ghi",
),
document.ListDocumentsResponse(
documents=[document.Document(), document.Document(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_documents(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, document.Document) for i in results)
def test_list_documents_pages(transport_name: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_documents), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
document.ListDocumentsResponse(
documents=[
document.Document(),
document.Document(),
document.Document(),
],
next_page_token="abc",
),
document.ListDocumentsResponse(documents=[], next_page_token="def",),
document.ListDocumentsResponse(
documents=[document.Document(),], next_page_token="ghi",
),
document.ListDocumentsResponse(
documents=[document.Document(), document.Document(),],
),
RuntimeError,
)
pages = list(client.list_documents(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_documents_async_pager():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_documents), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
document.ListDocumentsResponse(
documents=[
document.Document(),
document.Document(),
document.Document(),
],
next_page_token="abc",
),
document.ListDocumentsResponse(documents=[], next_page_token="def",),
document.ListDocumentsResponse(
documents=[document.Document(),], next_page_token="ghi",
),
document.ListDocumentsResponse(
documents=[document.Document(), document.Document(),],
),
RuntimeError,
)
async_pager = await client.list_documents(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, document.Document) for i in responses)
@pytest.mark.asyncio
async def test_list_documents_async_pages():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_documents), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
document.ListDocumentsResponse(
documents=[
document.Document(),
document.Document(),
document.Document(),
],
next_page_token="abc",
),
document.ListDocumentsResponse(documents=[], next_page_token="def",),
document.ListDocumentsResponse(
documents=[document.Document(),], next_page_token="ghi",
),
document.ListDocumentsResponse(
documents=[document.Document(), document.Document(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_documents(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [document.GetDocumentRequest, dict,])
def test_get_document(request_type, transport: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document.Document(
name="name_value",
display_name="display_name_value",
mime_type="mime_type_value",
knowledge_types=[document.Document.KnowledgeType.FAQ],
enable_auto_reload=True,
content_uri="content_uri_value",
)
response = client.get_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document.GetDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.mime_type == "mime_type_value"
assert response.knowledge_types == [document.Document.KnowledgeType.FAQ]
assert response.enable_auto_reload is True
def test_get_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_document), "__call__") as call:
client.get_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document.GetDocumentRequest()
@pytest.mark.asyncio
async def test_get_document_async(
transport: str = "grpc_asyncio", request_type=document.GetDocumentRequest
):
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
document.Document(
name="name_value",
display_name="display_name_value",
mime_type="mime_type_value",
knowledge_types=[document.Document.KnowledgeType.FAQ],
enable_auto_reload=True,
)
)
response = await client.get_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document.GetDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, document.Document)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.mime_type == "mime_type_value"
assert response.knowledge_types == [document.Document.KnowledgeType.FAQ]
assert response.enable_auto_reload is True
@pytest.mark.asyncio
async def test_get_document_async_from_dict():
await test_get_document_async(request_type=dict)
def test_get_document_field_headers():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.GetDocumentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_document), "__call__") as call:
call.return_value = document.Document()
client.get_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_document_field_headers_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.GetDocumentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(document.Document())
await client.get_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_document_flattened():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document.Document()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_document(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_document_flattened_error():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_document(
document.GetDocumentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_document_flattened_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = document.Document()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(document.Document())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_document(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_document_flattened_error_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_document(
document.GetDocumentRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcd_document.CreateDocumentRequest, dict,])
def test_create_document(request_type, transport: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_document.CreateDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_document), "__call__") as call:
client.create_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_document.CreateDocumentRequest()
@pytest.mark.asyncio
async def test_create_document_async(
transport: str = "grpc_asyncio", request_type=gcd_document.CreateDocumentRequest
):
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_document.CreateDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_document_async_from_dict():
await test_create_document_async(request_type=dict)
def test_create_document_field_headers():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_document.CreateDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_document), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_document_field_headers_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_document.CreateDocumentRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_document_flattened():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_document(
parent="parent_value", document=gcd_document.Document(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].document
mock_val = gcd_document.Document(name="name_value")
assert arg == mock_val
def test_create_document_flattened_error():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_document(
gcd_document.CreateDocumentRequest(),
parent="parent_value",
document=gcd_document.Document(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_document_flattened_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_document(
parent="parent_value", document=gcd_document.Document(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].document
mock_val = gcd_document.Document(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_document_flattened_error_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_document(
gcd_document.CreateDocumentRequest(),
parent="parent_value",
document=gcd_document.Document(name="name_value"),
)
@pytest.mark.parametrize("request_type", [document.ImportDocumentsRequest, dict,])
def test_import_documents(request_type, transport: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document.ImportDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_import_documents_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
client.import_documents()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document.ImportDocumentsRequest()
@pytest.mark.asyncio
async def test_import_documents_async(
transport: str = "grpc_asyncio", request_type=document.ImportDocumentsRequest
):
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document.ImportDocumentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_import_documents_async_from_dict():
await test_import_documents_async(request_type=dict)
def test_import_documents_field_headers():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.ImportDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_import_documents_field_headers_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.ImportDocumentsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.import_documents), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.import_documents(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.parametrize("request_type", [document.DeleteDocumentRequest, dict,])
def test_delete_document(request_type, transport: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document.DeleteDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_document), "__call__") as call:
client.delete_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document.DeleteDocumentRequest()
@pytest.mark.asyncio
async def test_delete_document_async(
transport: str = "grpc_asyncio", request_type=document.DeleteDocumentRequest
):
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document.DeleteDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_document_async_from_dict():
await test_delete_document_async(request_type=dict)
def test_delete_document_field_headers():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.DeleteDocumentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_document), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_document_field_headers_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.DeleteDocumentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_document_flattened():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_document(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_document_flattened_error():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_document(
document.DeleteDocumentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_document_flattened_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_document(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_document_flattened_error_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_document(
document.DeleteDocumentRequest(), name="name_value",
)
@pytest.mark.parametrize("request_type", [gcd_document.UpdateDocumentRequest, dict,])
def test_update_document(request_type, transport: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.update_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_document.UpdateDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_document), "__call__") as call:
client.update_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_document.UpdateDocumentRequest()
@pytest.mark.asyncio
async def test_update_document_async(
transport: str = "grpc_asyncio", request_type=gcd_document.UpdateDocumentRequest
):
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.update_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == gcd_document.UpdateDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_document_async_from_dict():
await test_update_document_async(request_type=dict)
def test_update_document_field_headers():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_document.UpdateDocumentRequest()
request.document.name = "document.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_document), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.update_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "document.name=document.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_document_field_headers_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = gcd_document.UpdateDocumentRequest()
request.document.name = "document.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.update_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "document.name=document.name/value",) in kw[
"metadata"
]
def test_update_document_flattened():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_document(
document=gcd_document.Document(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].document
mock_val = gcd_document.Document(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_document_flattened_error():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_document(
gcd_document.UpdateDocumentRequest(),
document=gcd_document.Document(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_document_flattened_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_document(
document=gcd_document.Document(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].document
mock_val = gcd_document.Document(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_document_flattened_error_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_document(
gcd_document.UpdateDocumentRequest(),
document=gcd_document.Document(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize("request_type", [document.ReloadDocumentRequest, dict,])
def test_reload_document(request_type, transport: str = "grpc"):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reload_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.reload_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == document.ReloadDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_reload_document_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reload_document), "__call__") as call:
client.reload_document()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == document.ReloadDocumentRequest()
@pytest.mark.asyncio
async def test_reload_document_async(
transport: str = "grpc_asyncio", request_type=document.ReloadDocumentRequest
):
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reload_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.reload_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == document.ReloadDocumentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_reload_document_async_from_dict():
await test_reload_document_async(request_type=dict)
def test_reload_document_field_headers():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.ReloadDocumentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reload_document), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.reload_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_reload_document_field_headers_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = document.ReloadDocumentRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reload_document), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.reload_document(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_reload_document_flattened():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reload_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.reload_document(
name="name_value", gcs_source=gcs.GcsSource(uri="uri_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert args[0].gcs_source == gcs.GcsSource(uri="uri_value")
def test_reload_document_flattened_error():
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.reload_document(
document.ReloadDocumentRequest(),
name="name_value",
gcs_source=gcs.GcsSource(uri="uri_value"),
)
@pytest.mark.asyncio
async def test_reload_document_flattened_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.reload_document), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.reload_document(
name="name_value", gcs_source=gcs.GcsSource(uri="uri_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
assert args[0].gcs_source == gcs.GcsSource(uri="uri_value")
@pytest.mark.asyncio
async def test_reload_document_flattened_error_async():
client = DocumentsAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.reload_document(
document.ReloadDocumentRequest(),
name="name_value",
gcs_source=gcs.GcsSource(uri="uri_value"),
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DocumentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DocumentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentsClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DocumentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DocumentsClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DocumentsClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DocumentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DocumentsClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DocumentsClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DocumentsGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DocumentsGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[transports.DocumentsGrpcTransport, transports.DocumentsGrpcAsyncIOTransport,],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DocumentsClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.DocumentsGrpcTransport,)
def test_documents_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DocumentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_documents_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.dialogflow_v2beta1.services.documents.transports.DocumentsTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DocumentsTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"list_documents",
"get_document",
"create_document",
"import_documents",
"delete_document",
"update_document",
"reload_document",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
def test_documents_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.dialogflow_v2beta1.services.documents.transports.DocumentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DocumentsTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
def test_documents_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.dialogflow_v2beta1.services.documents.transports.DocumentsTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DocumentsTransport()
adc.assert_called_once()
def test_documents_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DocumentsClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[transports.DocumentsGrpcTransport, transports.DocumentsGrpcAsyncIOTransport,],
)
def test_documents_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DocumentsGrpcTransport, grpc_helpers),
(transports.DocumentsGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_documents_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"dialogflow.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
),
scopes=["1", "2"],
default_host="dialogflow.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.DocumentsGrpcTransport, transports.DocumentsGrpcAsyncIOTransport],
)
def test_documents_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_documents_host_no_port():
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com"
),
)
assert client.transport._host == "dialogflow.googleapis.com:443"
def test_documents_host_with_port():
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="dialogflow.googleapis.com:8000"
),
)
assert client.transport._host == "dialogflow.googleapis.com:8000"
def test_documents_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DocumentsGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_documents_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DocumentsGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DocumentsGrpcTransport, transports.DocumentsGrpcAsyncIOTransport],
)
def test_documents_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DocumentsGrpcTransport, transports.DocumentsGrpcAsyncIOTransport],
)
def test_documents_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_documents_grpc_lro_client():
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_documents_grpc_lro_async_client():
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_document_path():
project = "squid"
knowledge_base = "clam"
document = "whelk"
expected = "projects/{project}/knowledgeBases/{knowledge_base}/documents/{document}".format(
project=project, knowledge_base=knowledge_base, document=document,
)
actual = DocumentsClient.document_path(project, knowledge_base, document)
assert expected == actual
def test_parse_document_path():
expected = {
"project": "octopus",
"knowledge_base": "oyster",
"document": "nudibranch",
}
path = DocumentsClient.document_path(**expected)
# Check that the path construction is reversible.
actual = DocumentsClient.parse_document_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DocumentsClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = DocumentsClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DocumentsClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = DocumentsClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = DocumentsClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DocumentsClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = DocumentsClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = DocumentsClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DocumentsClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = DocumentsClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = DocumentsClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DocumentsClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = DocumentsClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = DocumentsClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DocumentsClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DocumentsTransport, "_prep_wrapped_messages"
) as prep:
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DocumentsTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DocumentsClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DocumentsAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DocumentsClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(DocumentsClient, transports.DocumentsGrpcTransport),
(DocumentsAsyncClient, transports.DocumentsGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-dialogflow | tests/unit/gapic/dialogflow_v2beta1/test_documents.py | Python | apache-2.0 | 108,344 | [
"Octopus"
] | 0274060e24e9f82fd9096117c97b131629a63b4a7c2e35db2f755b96b75b7444 |
import graphviz as gv
from stargate import visit, value
from random import choice, seed
__author__ = 'Lai Tash'
class GraphBuilder(visit.Visitor):
arrow_colors = [
'black', 'green', 'red', '#770099', 'blue', 'magenta',
'#77aa00', '#007799', 'darkgreen'
]
styles = {
'Node': {
'shape': 'box',
},
'SwitchXOR': {
'shape': 'hexagon',
},
'SwitchOR': {
'shape': 'triangle',
},
'OR': {
'shape': 'triangle',
},
'SwitchAND': {
'shape': 'invtriangle',
},
'AND': {
'shape': 'invtriangle',
},
'SwitchNOT': {
'shape': 'doubleoctagon',
},
'NOT': {
'shape': 'doubleoctagon',
},
'Compound': {
'color': 'black',
},
'Button': {
'shape': 'circle',
},
'PersistentSwitch': {
'shape': 'signature',
},
'Informer': {
'shape': 'Mcircle',
},
'Switch': {
'shape': 'component'
},
'Timer': {
'shape': 'doublecircle',
},
'Transmitter': {
'shape': 'rarrow',
'color': 'blue',
}
}
def __init__(self):
self.cluster_count = 0
self.styles = self.styles
def pick_style(self, node):
for cls in node.__class__.mro():
style = self.styles.get(cls.__name__)
if style is not None:
return style
return {}
def visit_Node(self, node, graph):
style = self.pick_style(node).copy()
style['color'] = 'green' if value(node) else style.get('color')
graph.node(node.uuid, label=node.name,
style='filled',
**style)
for input in node.inputs:
for signal in input.inputs:
label = input._name
if label == 'input':
label = None
seed(hash(label))
self.root_graph.edge(signal.node.uuid, node.uuid, label=label,
color=choice(self.arrow_colors))
def visit_Compound(self, node, graph=None):
parent = graph
graph = gv.Digraph(name='cluster_%i' % self.cluster_count,format='png')
if not parent:
self.root_graph = graph
self.cluster_count += 1
graph.graph_attr.update(self.pick_style(node))
graph.graph_attr.update({'label': node.name})
if value(node):
graph.graph_attr.update({
'style': 'filled',
'color': 'darkgreen'
})
#graph.graph_attr.update({'bgcolor': '#%i33333' % random.randint(0, 99)})
for child in node.children.values():
self.visit(child, graph)
if parent:
parent.subgraph(graph)
return graph
| LaiTash/starschematic | stargate/exports/graphviz.py | Python | gpl-3.0 | 2,984 | [
"VisIt"
] | c81d3187719d565fab4ad7d2eaa61c69a2582770a7410b2ab1a34d33df5f21a6 |
#
# Copyright 2015 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# Numina is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Numina is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Numina. If not, see <http://www.gnu.org/licenses/>.
#
'''Constants for the numina package.'''
# Factor to obtain FWHM in Gaussian profile from sigma
FWHM_G = 2.3548200450309493
# Square root of two pi
M_SQRT_2_PI = 2.5066282746310005024157652848110452530069867406099383166299
| Pica4x6/numina | numina/constants.py | Python | gpl-3.0 | 930 | [
"Gaussian"
] | cec56c516e4432a9e2e59da2d7e14adfc19d0436c5e5c2ec640883306aa9c7c5 |
from __future__ import unicode_literals
import json
from tests import TestCase, with_settings, logged_in
from nose.tools import eq_
from catsnap import Client
from catsnap.table.image import Image
from catsnap.table.tag import Tag
from catsnap.table.image_tag import ImageTag
from catsnap.table.album import Album
from unittest.case import SkipTest
class TestUpdateImage(TestCase):
@logged_in
@with_settings(bucket='snapcats')
def test_update_an_image(self):
session = Client().session()
album = Album(name='cow shots')
session.add(album)
image = Image(filename='deadbeef',
description='one time I saw a dead cow',
title='dead beef')
session.add(image)
session.flush()
response = self.app.patch('/image/%d.json' % image.image_id, data={
'album_id': album.album_id,
})
body = json.loads(response.data)
eq_(body, {
'status': 'ok',
'image': {
'title': 'dead beef',
'description': 'one time I saw a dead cow',
'album_id': str(album.album_id),
'caption': 'dead beef',
'tags': [],
}
})
del image
image = session.query(Image).one()
eq_(image.album_id, album.album_id)
@logged_in
def test_unknown_attributes_generate_an_error(self):
session = Client().session()
image = Image(filename='deadbeef')
session.add(image)
session.flush()
response = self.app.patch('/image/%d.json' % image.image_id, data={
'rochambeau': 'fleur de lis',
})
body = json.loads(response.data)
eq_(body['status'], 'error')
eq_(body['error'], "No such attribute 'rochambeau'")
eq_(response.status_code, 400)
@logged_in
def test_illegal_attributes_generate_an_error(self):
session = Client().session()
image = Image(filename='deadbeef')
session.add(image)
session.flush()
response = self.app.patch('/image/%d.json' % image.image_id, data={
'filename': 'something evil',
})
body = json.loads(response.data)
eq_(body['status'], 'error')
eq_(body['error'], "'filename' is read-only")
@logged_in
def test_invalid_album_id_generates_an_error(self):
session = Client().session()
image = Image(filename='deadbeef')
session.add(image)
session.flush()
response = self.app.patch('/image/%d.json' % image.image_id, data={
'album_id': 5,
})
body = json.loads(response.data)
eq_(body['status'], 'error')
eq_(body['error'], "No such album_id '5'")
@logged_in
def test_clear_album_id(self):
session = Client().session()
album = Album(name='wolves')
session.add(album)
session.flush()
image = Image(filename='01f5', album_id=album.album_id)
session.add(image)
session.flush()
response = self.app.patch('/image/%d.json' % image.image_id, data={
'album_id': '',
})
body = json.loads(response.data)
eq_(body['status'], 'ok')
image = session.query(Image).one()
eq_(image.album_id, None)
def test_login_is_required(self):
response = self.app.patch('/image/1.json', data={
'title': 'BUTTFARTS',
})
eq_(response.status_code, 401)
@logged_in
def test_add_a_tag(self):
session = Client().session()
image = Image(filename='deadbeef')
session.add(image)
session.flush()
response = self.app.patch('/image/%d.json' % image.image_id, data={
'add_tag': 'cow',
})
eq_(response.status_code, 200)
body = json.loads(response.data)
eq_(body['status'], 'ok')
tag = session.query(Tag).filter(Tag.name == 'cow').one()
image_tag = session.query(ImageTag).\
filter(ImageTag.tag_id == tag.tag_id).\
one()
eq_(image_tag.image_id, image.image_id)
@logged_in
def test_remove_a_tag(self):
session = Client().session()
image = Image(filename='deadbeef')
tag = Tag(name='cow')
session.add(image)
session.add(tag)
session.flush()
image_tag = ImageTag(image_id=image.image_id, tag_id=tag.tag_id)
session.add(image_tag)
session.flush()
response = self.app.patch('/image/%d.json' % image.image_id, data={
'remove_tag': 'cow',
})
eq_(response.status_code, 200)
body = json.loads(response.data)
eq_(body['status'], 'ok')
image_tags = session.query(ImageTag).all()
eq_(image_tags, [])
| ErinCall/catsnap | tests/web/test_edit_image.py | Python | mit | 4,857 | [
"FLEUR"
] | 169bf785f2fb71291b0f58835e7d87762bc46b7c48d6bb56fc7b9582028b1723 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2022, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from .plot import lorentzian, gaussian, Plot
| exa-analytics/exatomic | exatomic/plotter/__init__.py | Python | apache-2.0 | 183 | [
"Gaussian"
] | 980877b56e4dfd93a8046105e419c888b52b21ac4091be329d4e98d6ccbe7b7e |
""" This is a test of the chain
FileCatalogClient -> FileCatalogHandler -> FileCatalogDB
It supposes that the DB is present, and that the service is running
"""
# pylint: disable=invalid-name,wrong-import-position
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import csv
import filecmp
import os
import unittest
import tempfile
import sys
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.Core.Security.Properties import FC_MANAGEMENT
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
seName = "mySE"
testUser = 'atsareg'
testGroup = 'dirac_user'
testDir = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir'
parentDir = '/vo.formation.idgrilles.fr/user/a/atsareg'
parentparentDir = '/vo.formation.idgrilles.fr/user/a'
nonExistingDir = "/I/Dont/exist/dir"
testFile = '/vo.formation.idgrilles.fr/user/a/atsareg/testdir/testfile'
nonExistingFile = "/I/Dont/exist"
isAdmin = False
proxyUser = 'anon'
proxyGroup = 'anon'
class DFCTestCase(unittest.TestCase):
def setUp(self):
# gLogger.setLevel( "DEBUG" )
self.dfc = FileCatalogClient("DataManagement/FileCatalog")
class UserGroupCase(DFCTestCase):
def test_userOperations(self):
"""Testing the user related operations
If you are an admin, you should be allowed to, if not, it should fail
CAUTION : THEY ARE DESIGNED FOR THE SecurityManager VOMSPolicy
"""
expectedRes = None
if isAdmin:
print("Running UserTest in admin mode")
expectedRes = True
else:
print("Running UserTest in non admin mode")
expectedRes = False
# Add the user
result = self.dfc.addUser(testUser)
self.assertEqual(result['OK'], expectedRes, "AddUser failed when adding new user: %s" % result)
# Add an existing user
result = self.dfc.addUser(testUser)
self.assertEqual(
result['OK'],
expectedRes,
"AddUser failed when adding existing user: %s" %
result)
# Fetch the list of user
result = self.dfc.getUsers()
self.assertEqual(result['OK'], expectedRes, "getUsers failed: %s" % result)
if isAdmin:
# Check if our user is present
self.assertTrue(testUser in result['Value'], "getUsers failed: %s" % result)
# remove the user we created
result = self.dfc.deleteUser(testUser)
self.assertEqual(result['OK'], expectedRes, "deleteUser failed: %s" % result)
def test_groupOperations(self):
"""Testing the group related operations
If you are an admin, you should be allowed to, if not, it should fail
CAUTION : THEY ARE DESIGNED FOR THE SecurityManager DirectorySecurityManagerWithDelete or VOMSPolicy
"""
expectedRes = None
if isAdmin:
print("Running UserTest in admin mode")
expectedRes = True
else:
print("Running UserTest in non admin mode")
expectedRes = False
# Create new group
result = self.dfc.addGroup(testGroup)
self.assertEqual(result['OK'], expectedRes, "AddGroup failed when adding new user: %s" % result)
result = self.dfc.addGroup(testGroup)
self.assertEqual(
result['OK'],
expectedRes,
"AddGroup failed when adding existing user: %s" %
result)
result = self.dfc.getGroups()
self.assertEqual(result['OK'], expectedRes, "getGroups failed: %s" % result)
if isAdmin:
self.assertTrue(testGroup in result['Value'])
result = self.dfc.deleteGroup(testGroup)
self.assertEqual(result['OK'], expectedRes, "deleteGroup failed: %s" % result)
class FileCase(DFCTestCase):
def test_fileOperations(self):
"""
Tests the File related Operations
CAUTION : THEY ARE DESIGNED FOR THE SecurityManager DirectorySecurityManagerWithDelete or VOMSPolicy
"""
if isAdmin:
print("Running UserTest in admin mode")
else:
print("Running UserTest in non admin mode")
# Adding a new file
result = self.dfc.addFile({testFile: {'PFN': 'testfilePFN',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}})
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
self.assertTrue(testFile in result['Value'].get('Successful', {}), result)
result = self.dfc.exists(testFile)
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile) should be the same lfn %s" % result)
result = self.dfc.exists({testFile: '1000'})
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
result = self.dfc.exists({testFile: {'GUID': '1000', 'PFN': 'blabla'}})
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1000) should be the same lfn %s" % result)
# In fact, we don't check if the GUID is correct...
result = self.dfc.exists({testFile: '1001'})
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile),
testFile, "exists( testFile : 1001) should be the same lfn %s" % result)
result = self.dfc.exists({testFile + '2': '1000'})
self.assertTrue(result['OK'])
self.assertEqual(result['Value'].get('Successful', {}).get(testFile + '2'),
testFile, "exists( testFile2 : 1000) should return testFile %s" % result)
# Re-adding the same file
result = self.dfc.addFile({testFile: {'PFN': 'testfilePFN',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}})
self.assertTrue(result["OK"], "addFile failed when adding existing file %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"addFile failed: it should be possible to add an existing lfn with the same attributes %s" %
result)
# Re-adding the same file
result = self.dfc.addFile({testFile: {'PFN': 'testfilePFN',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '1'}})
self.assertTrue(result["OK"], "addFile failed when adding existing file %s" % result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"addFile failed: it should not be possible to add an existing lfn with the different attributes %s" %
result)
# Re-adding the different LFN but same GUID
result = self.dfc.addFile({testFile + '2': {'PFN': 'testfilePFN',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}})
self.assertTrue(
result["OK"],
"addFile failed when adding non existing file with existing GUID %s" %
result)
self.assertTrue(
testFile +
'2' in result["Value"]["Failed"],
"addFile failed: it should not be possible to add an existing GUID %s" %
result)
##################################################################################
# Setting existing status of existing file
result = self.dfc.setFileStatus({testFile: "AprioriGood"})
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setFileStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting unexisting status of existing file
result = self.dfc.setFileStatus({testFile: "Happy"})
self.assertTrue(
result["OK"],
"setFileStatus failed when setting un-existing status of existing file %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setFileStatus should have failed %s" %
result)
# Setting existing status of unexisting file
result = self.dfc.setFileStatus({nonExistingFile: "Trash"})
self.assertTrue(
result["OK"],
"setFileStatus failed when setting existing status of non-existing file %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setFileStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
##################################################################################
result = self.dfc.isFile([testFile, nonExistingFile])
self.assertTrue(result["OK"], "isFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"isFile : %s should be seen as a file %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Successful"],
"isFile : %s should be in Successful %s" %
(nonExistingFile,
result))
self.assertTrue(result["Value"]["Successful"][nonExistingFile] is False,
"isFile : %s should be seen as a file %s" % (nonExistingFile, result))
result = self.dfc.changePathOwner({testFile: "toto", nonExistingFile: "tata"})
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
# Only admin can change path owner
if isAdmin:
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(testFile,
result))
else:
self.assertTrue(
testFile in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(nonExistingFile,
result))
# Only admin can change path group
result = self.dfc.changePathGroup({testFile: "toto", nonExistingFile: "tata"})
self.assertTrue(result["OK"], "changePathGroup failed: %s" % result)
if isAdmin:
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(testFile,
result))
else:
self.assertTrue(
testFile in result["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.dfc.changePathMode({testFile: 0o44, nonExistingFile: 0o44})
self.assertTrue(result["OK"], "changePathMode failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"changePathMode : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.dfc.getFileSize([testFile, nonExistingFile])
self.assertTrue(result["OK"], "getFileSize failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileSize : %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile],
123,
"getFileSize got incorrect file size %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileSize : %s should be in Failed %s" %
(nonExistingFile,
result))
result = self.dfc.getFileMetadata([testFile, nonExistingFile])
self.assertTrue(result["OK"], "getFileMetadata failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getFileMetadata : %s should be in Successful %s" %
(testFile,
result))
# The owner changed only if we are admin
if isAdmin:
self.assertEqual(
result["Value"]["Successful"][testFile]["Owner"],
"toto",
"getFileMetadata got incorrect Owner %s" %
result)
self.assertEqual(
result["Value"]["Successful"][testFile]["Status"],
"AprioriGood",
"getFileMetadata got incorrect status %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getFileMetadata : %s should be in Failed %s" %
(nonExistingFile,
result))
# Here we write the output to a file
# So we dump the expected content in a file
_, expectedDumpFn = tempfile.mkstemp()
with open(expectedDumpFn, 'w') as expectedDumpFd:
csvWriter = csv.writer(expectedDumpFd, delimiter='|')
csvWriter.writerow([testFile, '0', 123])
actualDumpFn = expectedDumpFn + 'real'
result = self.dfc.getSEDump('testSE', actualDumpFn)
self.assertTrue(result['OK'], "Error when getting SE dump %s" % result)
self.assertTrue(filecmp.cmp(expectedDumpFn, actualDumpFn), "Did not get the expected SE Dump")
os.remove(expectedDumpFn)
os.remove(actualDumpFn)
result = self.dfc.removeFile([testFile, nonExistingFile])
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"removeFile : %s should be in Successful %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][testFile],
"removeFile : %s should be in True %s" %
(testFile,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingFile],
"removeFile : %s should be in True %s" %
(nonExistingFile,
result))
class ReplicaCase(DFCTestCase):
def test_replicaOperations(self):
"""
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new file
result = self.dfc.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}})
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Adding new replica
result = self.dfc.addReplica({testFile: {"PFN": "testFilePFN", "SE": "otherSE"}})
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding the same replica
result = self.dfc.addReplica({testFile: {"PFN": "testFilePFN", "SE": "otherSE"}})
self.assertTrue(result['OK'], "addReplica failed when adding new Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"addReplica failed when adding new Replica %s" %
result)
# Adding replica of a non existing file
result = self.dfc.addReplica({nonExistingFile: {"PFN": "IdontexistPFN", "SE": "otherSE"}})
self.assertTrue(
result['OK'],
"addReplica failed when adding Replica to non existing Replica %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Failed"],
"addReplica for non existing file should go in Failed %s" %
result)
# Setting existing status of existing Replica
result = self.dfc.setReplicaStatus({testFile: {"Status": "Trash", "SE": "otherSE"}})
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"setReplicaStatus failed: %s should be in successful (%s)" %
(testFile,
result))
# Setting non existing status of existing Replica
result = self.dfc.setReplicaStatus({testFile: {"Status": "randomStatus", "SE": "otherSE"}})
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting non-existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing Replica
result = self.dfc.setReplicaStatus({testFile: {"Status": "Trash", "SE": "nonExistingSe"}})
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(testFile,
result))
# Setting existing status of non-existing File
result = self.dfc.setReplicaStatus(
{nonExistingFile: {"Status": "Trash", "SE": "nonExistingSe"}})
self.assertTrue(
result["OK"],
"setReplicaStatus failed when setting existing status of non-existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"setReplicaStatus failed: %s should be in Failed (%s)" %
(nonExistingFile,
result))
# Getting existing status of existing Replica but not visible
result = self.dfc.getReplicaStatus({testFile: "testSE"})
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting existing status of existing Replica but not visible
result = self.dfc.getReplicaStatus({testFile: "otherSE"})
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting existing status of existing Replica but not visible %s" %
result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicaStatus failed: %s should be in Successful (%s)" %
(testFile,
result))
# Getting status of non-existing File but not visible
result = self.dfc.getReplicaStatus({nonExistingFile: "testSE"})
self.assertTrue(
result["OK"],
"getReplicaStatus failed when getting status of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicaStatus failed: %s should be in failed (%s)" %
(nonExistingFile,
result))
# Getting replicas of existing File and non existing file, seeing all replicas
result = self.dfc.getReplicas([testFile, nonExistingFile], allStatus=True)
self.assertTrue(result["OK"], "getReplicas failed %s" % result)
self.assertTrue(
testFile in result["Value"]["Successful"],
"getReplicas failed, %s should be in Successful %s" %
(testFile,
result))
self.assertEqual(
result["Value"]["Successful"][testFile], {
"otherSE": testFile, "testSE": testFile}, "getReplicas failed, %s should be in Successful %s" %
(testFile, result))
self.assertTrue(
nonExistingFile in result["Value"]["Failed"],
"getReplicas failed, %s should be in Failed %s" %
(nonExistingFile,
result))
expectedSize = {'PhysicalSize': {'TotalSize': 246,
'otherSE': {'Files': 1, 'Size': 123},
'TotalFiles': 2,
'testSE': {'Files': 1, 'Size': 123}},
'LogicalFiles': 1, 'LogicalDirectories': 0, 'LogicalSize': 123}
result = self.dfc.getDirectorySize([testDir], True, False)
self.assertTrue(result["OK"], "getDirectorySize failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(
result["Value"]["Successful"][testDir],
expectedSize,
"getDirectorySize got incorrect directory size %s" %
result)
result = self.dfc.getDirectorySize([testDir], True, True)
self.assertTrue(result["OK"], "getDirectorySize (calc) failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize (calc): %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(
result["Value"]["Successful"][testDir],
expectedSize,
"getDirectorySize got incorrect directory size %s" %
result)
# removing master replica
result = self.dfc.removeReplica({testFile: {"SE": "testSE"}})
self.assertTrue(result['OK'], "removeReplica failed when removing master Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing master Replica %s" %
result)
# removing non existing replica of existing File
result = self.dfc.removeReplica({testFile: {"SE": "nonExistingSe2"}})
self.assertTrue(
result['OK'],
"removeReplica failed when removing non existing Replica %s" %
result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing new Replica %s" %
result)
# removing non existing replica of non existing file
result = self.dfc.removeReplica({nonExistingFile: {"SE": "nonExistingSe3"}})
self.assertTrue(
result['OK'],
"removeReplica failed when removing replica of non existing File %s" %
result)
self.assertTrue(
nonExistingFile in result['Value']["Successful"],
"removeReplica of non existing file, %s should be in Successful %s" %
(nonExistingFile,
result))
# removing last replica
result = self.dfc.removeReplica({testFile: {"SE": "otherSE"}})
self.assertTrue(result['OK'], "removeReplica failed when removing last Replica %s" % result)
self.assertTrue(
testFile in result['Value']["Successful"],
"removeReplica failed when removing last Replica %s" %
result)
# Cleaning after us
result = self.dfc.removeFile(testFile)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
class DirectoryCase(DFCTestCase):
def test_directoryOperations(self):
"""
Tests the Directory related Operations
this test requires the SE to be properly defined in the CS -> NO IT DOES NOT!!
"""
# Adding a new directory
result = self.dfc.createDirectory(testDir)
self.assertTrue(result['OK'], "addDirectory failed when adding new directory %s" % result)
result = self.dfc.addFile({testFile: {'PFN': 'testfile',
'SE': 'testSE',
'Size': 123,
'GUID': '1000',
'Checksum': '0'}})
self.assertTrue(result['OK'], "addFile failed when adding new file %s" % result)
# Re-adding the same directory (CAUTION, different from addFile)
result = self.dfc.createDirectory(testDir)
self.assertTrue(result["OK"], "addDirectory failed when adding existing directory %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"addDirectory failed: it should be possible to add an existing lfn %s" %
result)
result = self.dfc.isDirectory([testDir, nonExistingDir])
self.assertTrue(result["OK"], "isDirectory failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertTrue(
result["Value"]["Successful"][testDir],
"isDirectory : %s should be seen as a directory %s" %
(testDir,
result))
self.assertTrue(
nonExistingDir in result["Value"]["Successful"],
"isDirectory : %s should be in Successful %s" %
(nonExistingDir,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingDir] is False,
"isDirectory : %s should be seen as a directory %s" %
(nonExistingDir,
result))
result = self.dfc.getDirectorySize([testDir, nonExistingDir], False, False)
self.assertTrue(result["OK"], "getDirectorySize failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.dfc.getDirectorySize([testDir, nonExistingDir], False, True)
self.assertTrue(result["OK"], "getDirectorySize (calc) failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"getDirectorySize (calc): %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(result["Value"]["Successful"][testDir],
{'LogicalFiles': 1,
'LogicalDirectories': 0,
'LogicalSize': 123},
"getDirectorySize got incorrect directory size %s" % result)
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"getDirectorySize (calc) : %s should be in Failed %s" %
(nonExistingDir,
result))
result = self.dfc.listDirectory([parentDir, testDir, nonExistingDir])
self.assertTrue(result["OK"], "listDirectory failed: %s" % result)
self.assertTrue(
parentDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(list(result["Value"]["Successful"][parentDir]["SubDirs"]), [testDir],
"listDir : incorrect content for %s (%s)" % (parentDir, result))
self.assertTrue(
testDir in result["Value"]["Successful"],
"listDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertEqual(list(result["Value"]["Successful"][testDir]["Files"]), [testFile],
"listDir : incorrect content for %s (%s)" % (testDir, result))
self.assertTrue(
nonExistingDir in result["Value"]["Failed"],
"listDirectory : %s should be in Failed %s" %
(nonExistingDir,
result))
# We do it two times to make sure that
# when updating something to the same value
# returns a success if it is allowed
for attempt in range(2):
print("Attempt %s" % (attempt + 1))
# Only admin can change path group
resultG = self.dfc.changePathGroup({parentDir: "toto"})
resultM = self.dfc.changePathMode({parentDir: 0o777})
result = self.dfc.changePathOwner({parentDir: "toto"})
result2 = self.dfc.getDirectoryMetadata([parentDir, testDir])
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathMode : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Mode'),
0o775,
"testDir should not have changed %s" %
result2)
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed %s" %
result2)
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed %s" %
result2)
else:
self.assertTrue(
parentDir in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get('Successful', {}).get(parentDir, {}).get('Owner'),
proxyUser,
"parentDir should not have changed Owner from %s ==> %s)" % (proxyUser, result2)
)
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed Owner from %s ==> %s" %
(proxyUser,
result2))
self.assertTrue(
parentDir in resultG["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
proxyGroup,
"parentDir should not have changed OwnerGroup from %s ==> %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed Owner from %s ==> %s" %
(proxyGroup,
result2))
# Do it recursively now
# Only admin can change path group
resultM = self.dfc.changePathMode({parentDir: 0o777}, True)
resultG = self.dfc.changePathGroup({parentDir: "toto"}, True)
result = self.dfc.changePathOwner({parentDir: "toto"}, True)
result2 = self.dfc.getDirectoryMetadata([parentDir, testDir])
result3 = self.dfc.getFileMetadata(testFile)
self.assertTrue(result["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultG["OK"], "changePathOwner failed: %s" % result)
self.assertTrue(resultM["OK"], "changePathMode failed: %s" % result)
self.assertTrue(result2["OK"], "getDirectoryMetadata failed: %s" % result)
self.assertTrue(result3["OK"], "getFileMetadata failed: %s" % result)
# Since we were the owner we should have been able to do it in any case, admin or not
self.assertTrue(
parentDir in resultM["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultM))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Mode'),
0o777,
"parentDir should have mode %s %s" %
(0o777,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Mode'), 0o777, "testDir should have mode %s %s" %
(0o777, result2))
self.assertEqual(
result3['Value'].get(
'Successful', {}).get(
testFile, {}).get('Mode'), 0o777, "testFile should have mode %s %s" %
(0o777, result3))
if isAdmin:
self.assertTrue(
parentDir in result["Value"]["Successful"],
"changePathOwner : %s should be in Successful %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
'toto',
"parentDir should belong to %s %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful', {}).get(
testDir, {}).get('Owner'), 'toto', "testDir should belong to %s %s" %
(proxyUser, result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('Owner'),
'toto',
"testFile should belong to %s %s" %
(proxyUser,
result3))
self.assertTrue(
parentDir in resultG["Value"]["Successful"],
"changePathGroup : %s should be in Successful %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
'toto',
"parentDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
'toto',
"testDir should belong to %s %s" %
(proxyGroup,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('OwnerGroup'),
'toto',
"testFile should belong to %s %s" %
(proxyGroup,
result3))
else:
self.assertTrue(
parentDir in result["Value"]["Failed"],
"changePathOwner : %s should be in Failed %s" %
(parentDir,
result))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('Owner'),
proxyUser,
"parentDir should not have changed from %s ==> %s" %
(proxyUser,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('Owner'),
proxyUser,
"testDir should not have changed from %s ==> %s" %
(proxyUser,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('Owner'),
proxyUser,
"testFile should not have changed from %s ==> %s" %
(proxyUser,
result3))
self.assertTrue(
parentDir in resultG["Value"]["Failed"],
"changePathGroup : %s should be in Failed %s" %
(parentDir,
resultG))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
parentDir,
{}).get('OwnerGroup'),
proxyGroup,
"parentDir should not have changed from %s ==> %s" %
(proxyGroup,
result2))
self.assertEqual(
result2['Value'].get(
'Successful',
{}).get(
testDir,
{}).get('OwnerGroup'),
proxyGroup,
"testDir should not have changed from %s ==> %s" %
(proxyGroup,
result2))
self.assertEqual(
result3['Value'].get(
'Successful',
{}).get(
testFile,
{}).get('OwnerGroup'),
proxyGroup,
"testFile should not have changed from %s ==> %s" %
(proxyGroup,
result3))
# Cleaning after us
result = self.dfc.removeFile(testFile)
self.assertTrue(result["OK"], "removeFile failed: %s" % result)
result = self.dfc.removeDirectory([testDir, nonExistingDir])
self.assertTrue(result["OK"], "removeDirectory failed: %s" % result)
self.assertTrue(
testDir in result["Value"]["Successful"],
"removeDirectory : %s should be in Successful %s" %
(testDir,
result))
self.assertTrue(
result["Value"]["Successful"][testDir],
"removeDirectory : %s should be in True %s" %
(testDir,
result))
self.assertTrue(
nonExistingDir in result["Value"]["Successful"],
"removeDirectory : %s should be in Successful %s" %
(nonExistingDir,
result))
self.assertTrue(
result["Value"]["Successful"][nonExistingDir],
"removeDirectory : %s should be in True %s" %
(nonExistingDir,
result))
if __name__ == '__main__':
res = getProxyInfo()
if not res['OK']:
sys.exit(1)
res = res['Value']
proxyUser = res.get('username', 'anon')
proxyGroup = res.get('group', 'anon')
properties = res.get('properties', [])
properties.extend(res.get('groupProperties', []))
isAdmin = FC_MANAGEMENT in properties
print("Running test with admin privileges : ", isAdmin)
suite = unittest.defaultTestLoader.loadTestsFromTestCase(UserGroupCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(ReplicaCase))
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryCase))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
| yujikato/DIRAC | tests/Integration/DataManagementSystem/Test_Client_DFC.py | Python | gpl-3.0 | 40,265 | [
"DIRAC"
] | 76534b1031a2d5ab863779e2003e982e9eacba787d207419be32104d57175614 |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl"
_path_str = "scattergl.marker"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"line",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color`is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color` array are
all positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmin`
must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color`is set to a numerical array. Value should have
the same units as in `marker.color` and if set, `marker.cmax`
must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# color
# -----
@property
def color(self):
"""
Sets themarkercolor. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A number that will be interpreted as a color
according to scattergl.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see: h
ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f
ormat. And for dates see:
https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two
items to d3's date formatter: "%h" for half of
the year as a decimal number as well as "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter
gl.marker.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.scattergl.marker.colorbar.tickformatstopdefau
lts), sets the default property values to use
for elements of
scattergl.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn relative
to the ticks. Left and right options are used
when `orientation` is "h", top and bottom when
`orientation` is "v".
ticklabelstep
Sets the spacing between tick labels as
compared to the spacing between ticks. A value
of 1 (default) means each tick gets a label. A
value of 2 means shows every 2nd label. A
larger value n means only every nth tick is
labeled. `tick0` determines which labels are
shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is
"array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for `ticktext`.
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for `tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scattergl.marker.c
olorbar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
scattergl.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scattergl.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h". Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction). Defaults to 1.02 when `orientation`
is "v" and 0.5 when `orientation` is "h".
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar. Defaults to "left" when `orientation` is
"v" and "center" when `orientation` is "h".
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction). Defaults to 0.5 when `orientation`
is "v" and 1.02 when `orientation` is "h".
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.scattergl.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color`is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Blackbody,B
luered,Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic
,Portland,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergl.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Blackbody,Bluered,Blues,Cividis,Earth,Ele
ctric,Greens,Greys,Hot,Jet,Picnic,Portland,Rain
bow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
widthsrc
Sets the source reference on Chart Studio Cloud
for `width`.
Returns
-------
plotly.graph_objs.scattergl.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# opacitysrc
# ----------
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color`is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color`is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizemin
# -------
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
# sizemode
# --------
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
# sizeref
# -------
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# symbol
# ------
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, '0', 'circle', 100, '100', 'circle-open', 200, '200',
'circle-dot', 300, '300', 'circle-open-dot', 1, '1',
'square', 101, '101', 'square-open', 201, '201',
'square-dot', 301, '301', 'square-open-dot', 2, '2',
'diamond', 102, '102', 'diamond-open', 202, '202',
'diamond-dot', 302, '302', 'diamond-open-dot', 3, '3',
'cross', 103, '103', 'cross-open', 203, '203',
'cross-dot', 303, '303', 'cross-open-dot', 4, '4', 'x',
104, '104', 'x-open', 204, '204', 'x-dot', 304, '304',
'x-open-dot', 5, '5', 'triangle-up', 105, '105',
'triangle-up-open', 205, '205', 'triangle-up-dot', 305,
'305', 'triangle-up-open-dot', 6, '6', 'triangle-down',
106, '106', 'triangle-down-open', 206, '206',
'triangle-down-dot', 306, '306', 'triangle-down-open-dot',
7, '7', 'triangle-left', 107, '107', 'triangle-left-open',
207, '207', 'triangle-left-dot', 307, '307',
'triangle-left-open-dot', 8, '8', 'triangle-right', 108,
'108', 'triangle-right-open', 208, '208',
'triangle-right-dot', 308, '308',
'triangle-right-open-dot', 9, '9', 'triangle-ne', 109,
'109', 'triangle-ne-open', 209, '209', 'triangle-ne-dot',
309, '309', 'triangle-ne-open-dot', 10, '10',
'triangle-se', 110, '110', 'triangle-se-open', 210, '210',
'triangle-se-dot', 310, '310', 'triangle-se-open-dot', 11,
'11', 'triangle-sw', 111, '111', 'triangle-sw-open', 211,
'211', 'triangle-sw-dot', 311, '311',
'triangle-sw-open-dot', 12, '12', 'triangle-nw', 112,
'112', 'triangle-nw-open', 212, '212', 'triangle-nw-dot',
312, '312', 'triangle-nw-open-dot', 13, '13', 'pentagon',
113, '113', 'pentagon-open', 213, '213', 'pentagon-dot',
313, '313', 'pentagon-open-dot', 14, '14', 'hexagon', 114,
'114', 'hexagon-open', 214, '214', 'hexagon-dot', 314,
'314', 'hexagon-open-dot', 15, '15', 'hexagon2', 115,
'115', 'hexagon2-open', 215, '215', 'hexagon2-dot', 315,
'315', 'hexagon2-open-dot', 16, '16', 'octagon', 116,
'116', 'octagon-open', 216, '216', 'octagon-dot', 316,
'316', 'octagon-open-dot', 17, '17', 'star', 117, '117',
'star-open', 217, '217', 'star-dot', 317, '317',
'star-open-dot', 18, '18', 'hexagram', 118, '118',
'hexagram-open', 218, '218', 'hexagram-dot', 318, '318',
'hexagram-open-dot', 19, '19', 'star-triangle-up', 119,
'119', 'star-triangle-up-open', 219, '219',
'star-triangle-up-dot', 319, '319',
'star-triangle-up-open-dot', 20, '20',
'star-triangle-down', 120, '120',
'star-triangle-down-open', 220, '220',
'star-triangle-down-dot', 320, '320',
'star-triangle-down-open-dot', 21, '21', 'star-square',
121, '121', 'star-square-open', 221, '221',
'star-square-dot', 321, '321', 'star-square-open-dot', 22,
'22', 'star-diamond', 122, '122', 'star-diamond-open',
222, '222', 'star-diamond-dot', 322, '322',
'star-diamond-open-dot', 23, '23', 'diamond-tall', 123,
'123', 'diamond-tall-open', 223, '223',
'diamond-tall-dot', 323, '323', 'diamond-tall-open-dot',
24, '24', 'diamond-wide', 124, '124', 'diamond-wide-open',
224, '224', 'diamond-wide-dot', 324, '324',
'diamond-wide-open-dot', 25, '25', 'hourglass', 125,
'125', 'hourglass-open', 26, '26', 'bowtie', 126, '126',
'bowtie-open', 27, '27', 'circle-cross', 127, '127',
'circle-cross-open', 28, '28', 'circle-x', 128, '128',
'circle-x-open', 29, '29', 'square-cross', 129, '129',
'square-cross-open', 30, '30', 'square-x', 130, '130',
'square-x-open', 31, '31', 'diamond-cross', 131, '131',
'diamond-cross-open', 32, '32', 'diamond-x', 132, '132',
'diamond-x-open', 33, '33', 'cross-thin', 133, '133',
'cross-thin-open', 34, '34', 'x-thin', 134, '134',
'x-thin-open', 35, '35', 'asterisk', 135, '135',
'asterisk-open', 36, '36', 'hash', 136, '136',
'hash-open', 236, '236', 'hash-dot', 336, '336',
'hash-open-dot', 37, '37', 'y-up', 137, '137',
'y-up-open', 38, '38', 'y-down', 138, '138',
'y-down-open', 39, '39', 'y-left', 139, '139',
'y-left-open', 40, '40', 'y-right', 140, '140',
'y-right-open', 41, '41', 'line-ew', 141, '141',
'line-ew-open', 42, '42', 'line-ns', 142, '142',
'line-ns-open', 43, '43', 'line-ne', 143, '143',
'line-ne-open', 44, '44', 'line-nw', 144, '144',
'line-nw-open', 45, '45', 'arrow-up', 145, '145',
'arrow-up-open', 46, '46', 'arrow-down', 146, '146',
'arrow-down-open', 47, '47', 'arrow-left', 147, '147',
'arrow-left-open', 48, '48', 'arrow-right', 148, '148',
'arrow-right-open', 49, '49', 'arrow-bar-up', 149, '149',
'arrow-bar-up-open', 50, '50', 'arrow-bar-down', 150,
'150', 'arrow-bar-down-open', 51, '51', 'arrow-bar-left',
151, '151', 'arrow-bar-left-open', 52, '52',
'arrow-bar-right', 152, '152', 'arrow-bar-right-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
# symbolsrc
# ---------
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `symbol`.
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattergl.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
line
:class:`plotly.graph_objects.scattergl.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.Marker`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color`is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color`is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color`is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color`is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a specific color
or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattergl.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use`marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
line
:class:`plotly.graph_objects.scattergl.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color`is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color`is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("cauto", None)
_v = cauto if cauto is not None else _v
if _v is not None:
self["cauto"] = _v
_v = arg.pop("cmax", None)
_v = cmax if cmax is not None else _v
if _v is not None:
self["cmax"] = _v
_v = arg.pop("cmid", None)
_v = cmid if cmid is not None else _v
if _v is not None:
self["cmid"] = _v
_v = arg.pop("cmin", None)
_v = cmin if cmin is not None else _v
if _v is not None:
self["cmin"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("opacitysrc", None)
_v = opacitysrc if opacitysrc is not None else _v
if _v is not None:
self["opacitysrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizemin", None)
_v = sizemin if sizemin is not None else _v
if _v is not None:
self["sizemin"] = _v
_v = arg.pop("sizemode", None)
_v = sizemode if sizemode is not None else _v
if _v is not None:
self["sizemode"] = _v
_v = arg.pop("sizeref", None)
_v = sizeref if sizeref is not None else _v
if _v is not None:
self["sizeref"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("symbol", None)
_v = symbol if symbol is not None else _v
if _v is not None:
self["symbol"] = _v
_v = arg.pop("symbolsrc", None)
_v = symbolsrc if symbolsrc is not None else _v
if _v is not None:
self["symbolsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scattergl/_marker.py | Python | mit | 59,088 | [
"Bowtie"
] | 6e05345ee6fcd5f0c80b1746f9aac786ba48ba0e5119579e1fc0b56b8b31312f |
from ddsc.core.util import ProjectWalker, KindType
from ddsc.core.ddsapi import DataServiceAuth, DataServiceApi
from ddsc.core.fileuploader import FileUploader, FileUploadOperations, ParentData, ParallelChunkProcessor
from ddsc.core.parallel import TaskRunner
class UploadSettings(object):
"""
Settings used to upload a project
"""
def __init__(self, config, data_service, watcher, project_name_or_id, file_upload_post_processor):
"""
:param config: ddsc.config.Config user configuration settings from YAML file/environment
:param data_service: DataServiceApi: where we will upload to
:param watcher: ProgressPrinter we notify of our progress
:param project_name_or_id: ProjectNameOrId: name or id of the project so we can create it if necessary
:param file_upload_post_processor: object: has run(data_service, file_response) method to run after download
"""
self.config = config
self.data_service = data_service
self.watcher = watcher
self.project_name_or_id = project_name_or_id
self.project_id = None
self.file_upload_post_processor = file_upload_post_processor
def get_data_service_auth_data(self):
"""
Serialize data_service setup into something that can be passed to another process.
:return: tuple of data service settings
"""
return self.data_service.auth.get_auth_data()
@staticmethod
def rebuild_data_service(config, data_service_auth_data):
"""
Deserialize value into DataServiceApi object.
:param config:
:param data_service_auth_data:
:return:
"""
auth = DataServiceAuth(config)
auth.set_auth_data(data_service_auth_data)
return DataServiceApi(auth, config.url)
class UploadContext(object):
"""
Values passed to a background worker.
Contains UploadSettings and parameters specific to the function to be run.
"""
def __init__(self, settings, params, message_queue, task_id):
"""
Setup context so it can be passed.
:param settings: UploadSettings: project level info
:param params: tuple: values specific to the function being run
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
self.data_service_auth_data = settings.get_data_service_auth_data()
self.config = settings.config
self.project_name_or_id = settings.project_name_or_id
self.project_id = settings.project_id
self.params = params
self.message_queue = message_queue
self.task_id = task_id
def make_data_service(self):
"""
Recreate data service from within background worker.
:return: DataServiceApi
"""
return UploadSettings.rebuild_data_service(self.config, self.data_service_auth_data)
def send_message(self, data):
"""
Sends a message to the command's on_message(data) method.
:param data: object: data sent to on_message
"""
self.message_queue.put((self.task_id, data))
def start_waiting(self):
"""
Called when we start waiting for project to be ready for file uploads.
"""
self.send_message(True)
def done_waiting(self):
"""
Called when project is ready for file uploads (after waiting).
"""
self.send_message(False)
class ProjectUploader(object):
"""
Uploads a project based on UploadSettings.
"""
def __init__(self, settings):
"""
Setup to talk to the data service based on settings.
:param settings: UploadSettings: settings to use for uploading.
"""
self.runner = TaskRunner(settings.config.upload_workers)
self.settings = settings
self.small_item_task_builder = SmallItemUploadTaskBuilder(self.settings, self.runner)
self.small_files = []
self.large_files = []
def run(self, local_project):
"""
Upload a project by uploading project, folders, and small files then uploading the large files.
:param local_project: LocalProject: project to upload
"""
# Walks project adding project/folder to small_item_task_builder and adding files to small_files/large_files
ProjectWalker.walk_project(local_project, self)
self.sort_files_list(self.small_files)
self.add_small_files_to_task_builder()
# Run small items in parallel
self.runner.run()
# Run parts of each large item in parallel
self.sort_files_list(self.large_files)
self.upload_large_files()
@staticmethod
def sort_files_list(files_list):
"""
Sort files that are new first so they will will be processed before files that already exist in DukeDS.
This is to allow us to immediately begin making progress in retrying an upload.
:param files_list: [(LocalFile, LocalFolder|LocalProject)]: list of files to upload
"""
files_list.sort(key=lambda tuple: tuple[0].remote_id)
# Methods called by ProjectWalker.walk_project
def visit_project(self, item):
"""
Add create project to small task list.
"""
self.small_item_task_builder.visit_project(item)
def visit_folder(self, item, parent):
"""
Add create folder to small task list.
"""
self.small_item_task_builder.visit_folder(item, parent)
def visit_file(self, item, parent):
"""
If file is large add it to the large items to be processed after small task list.
else file is small add it to the small task list.
"""
if self.is_large_file(item):
self.large_files.append((item, parent))
else:
self.small_files.append((item, parent))
def is_large_file(self, item):
return item.size > self.settings.config.upload_bytes_per_chunk
def add_small_files_to_task_builder(self):
for local_file, parent in self.small_files:
self.small_item_task_builder.visit_file(local_file, parent)
def upload_large_files(self):
"""
Upload files that were too large.
"""
for local_file, parent in self.large_files:
self.settings.watcher.transferring_item(local_file, increment_amt=0, override_msg_verb='checking')
hash_data = local_file.calculate_local_hash()
if local_file.hash_matches_remote(hash_data):
self.file_already_uploaded(local_file)
else:
self.settings.watcher.transferring_item(local_file, increment_amt=0)
self.process_large_file(local_file, parent, hash_data)
def process_large_file(self, local_file, parent, hash_data):
"""
Upload a single file using multiple processes to upload multiple chunks at the same time.
Updates local_file with it's remote_id when done.
:param local_file: LocalFile: file we are uploading
:param parent: LocalFolder/LocalProject: parent of the file
"""
file_content_sender = FileUploader(self.settings.config, self.settings.data_service, local_file, hash_data,
self.settings.watcher, self.settings.file_upload_post_processor)
remote_id = file_content_sender.upload(self.settings.project_id, parent.kind, parent.remote_id)
local_file.set_remote_values_after_send(remote_id, hash_data.alg, hash_data.value)
def file_already_uploaded(self, local_file):
"""
Updates progress bar for a file that was already uploaded
:param local_file: LocalFile
"""
num_chunks = ParallelChunkProcessor.determine_num_chunks(self.settings.config.upload_bytes_per_chunk,
local_file.size)
self.settings.watcher.increment_progress(num_chunks)
class SmallItemUploadTaskBuilder(object):
"""
Uploads project, folders and small files to DukeDS.
Does them in parallel ordered based on their requirements.
"""
def __init__(self, settings, task_runner):
self.settings = settings
self.task_runner = task_runner
self.tasks = []
self.item_to_id = {}
def walk_project(self, project):
"""
Calls visit_* methods of self.
:param project: project we will visit children of.
"""
ProjectWalker.walk_project(project, self)
def visit_project(self, item):
"""
Adds create project command to task runner if project doesn't already exist.
"""
if not item.remote_id:
command = CreateProjectCommand(self.settings, item)
self.task_runner_add(None, item, command)
else:
self.settings.project_id = item.remote_id
def visit_folder(self, item, parent):
"""
Adds create folder command to task runner if folder doesn't already exist.
"""
if not item.remote_id:
command = CreateFolderCommand(self.settings, item, parent)
self.task_runner_add(parent, item, command)
def visit_file(self, item, parent):
"""
If file is small add create small file command otherwise raise error.
Large files shouldn't be passed to SmallItemUploadTaskBuilder.
"""
if item.size > self.settings.config.upload_bytes_per_chunk:
msg = "Programmer Error: Trying to upload large file as small item size:{} name:{}"
raise ValueError(msg.format(item.size, item.name))
else:
# Create a command to hash the file
hash_command = HashFileCommand(self.settings, item)
parent_task_id = self.item_to_id.get(parent)
hash_task_id = self.task_runner.add(parent_task_id, hash_command)
# Create a command to upload the file that waits for the results from the HashFileCommand
send_command = CreateSmallFileCommand(self.settings, item, parent,
self.settings.file_upload_post_processor)
self.task_runner.add(hash_task_id, send_command)
def task_runner_add(self, parent, item, command):
"""
Add command to task runner with parent's task id createing a task id for item/command.
Save this item's id to a lookup.
:param parent: object: parent of item
:param item: object: item we are running command on
:param command: parallel TaskCommand we want to have run
"""
parent_task_id = self.item_to_id.get(parent)
task_id = self.task_runner.add(parent_task_id, command)
self.item_to_id[item] = task_id
class CreateProjectCommand(object):
"""
Create project in DukeDS.
"""
def __init__(self, settings, local_project):
"""
Setup passing in all necessary data to create project and update external state.
:param settings: UploadSettings: settings to be used/updated when we upload the project.
:param local_project: LocalProject: information about the project(holds remote_id when done)
"""
self.settings = settings
self.local_project = local_project
if not settings.project_name_or_id.is_name:
raise ValueError('Programming Error: CreateProjectCommand called without project name.')
self.func = upload_project_run
def before_run(self, parent_task_result):
"""
Notify progress bar that we are creating the project.
"""
self.settings.watcher.transferring_item(self.local_project)
def create_context(self, message_queue, task_id):
"""
Create data needed by upload_project_run(DukeDS connection info).
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
return UploadContext(self.settings, (), message_queue, task_id)
def after_run(self, result_id):
"""
Save uuid associated with project we just created.
:param result_id: str: uuid of the project
"""
self.local_project.set_remote_id_after_send(result_id)
self.settings.project_id = result_id
def upload_project_run(upload_context):
"""
Function run by CreateProjectCommand to create the project.
Runs in a background process.
:param upload_context: UploadContext: contains data service setup and project name to create.
"""
data_service = upload_context.make_data_service()
project_name = upload_context.project_name_or_id.get_name_or_raise()
result = data_service.create_project(project_name, project_name)
data_service.close()
return result.json()['id']
class CreateFolderCommand(object):
"""
Create folder in DukeDS.
"""
def __init__(self, settings, remote_folder, parent):
"""
Setup passing in all necessary data to create folder and update external state.
:param settings: UploadSettings: contains data_service connection info
:param remote_folder: object: contains data about the folder we should create
:param parent: object: contains info about the parent of the folder we will create.
"""
self.settings = settings
self.remote_folder = remote_folder
self.parent = parent
self.func = upload_folder_run
def before_run(self, parent_task_result):
"""
Notify progress bar that we are creating this folder.
"""
self.settings.watcher.transferring_item(self.remote_folder)
def create_context(self, message_queue, task_id):
"""
Create values to be used by upload_folder_run function.
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
params = (self.remote_folder.name, self.parent.kind, self.parent.remote_id)
return UploadContext(self.settings, params, message_queue, task_id)
def after_run(self, result_id):
"""
Save the uuid of our new folder back to our LocalFolder object.
:param result_id: str: uuid of the folder we just created.
"""
self.remote_folder.set_remote_id_after_send(result_id)
def upload_folder_run(upload_context):
"""
Function run by CreateFolderCommand to create the folder.
Runs in a background process.
:param upload_context: UploadContext: contains data service setup and folder details.
"""
data_service = upload_context.make_data_service()
folder_name, parent_kind, parent_remote_id = upload_context.params
result = data_service.create_folder(folder_name, parent_kind, parent_remote_id)
data_service.close()
return result.json()['id']
class HashFileCommand(object):
"""
Hashes file and returns result
"""
def __init__(self, settings, local_file):
self.settings = settings
self.local_file = local_file
self.func = hash_file
def before_run(self, parent_task_result):
# Update progress bar that we are checking this file
self.settings.watcher.transferring_item(self.local_file, increment_amt=0, override_msg_verb='checking')
def create_context(self, message_queue, task_id):
params = self.local_file.get_path_data()
return UploadContext(self.settings, params, message_queue, task_id)
def after_run(self, result):
pass
def hash_file(upload_context):
"""
Function run by HashFileCommand to calculate a file hash.
:param upload_context: PathData: contains path to a local file to hash
:return HashData: result of hash (alg + value)
"""
path_data = upload_context.params
hash_data = path_data.get_hash()
return hash_data
class CreateSmallFileCommand(object):
"""
Creates a small file in the data service.
This includes:
1) creating an upload
2) creating an upload url
3) posting the contents of the file
4) completing the upload
5) creating or updating file version
"""
def __init__(self, settings, local_file, parent, file_upload_post_processor=None):
"""
Setup passing in all necessary data to create file and update external state.
:param settings: UploadSettings: contains data_service connection info
:param local_file: object: information about the file we will upload
:param parent: object: parent of the file (folder or project)
:param file_upload_post_processor: object: has run(data_service, file_response) method to run after download
"""
self.settings = settings
self.local_file = local_file
self.parent = parent
self.func = create_small_file
self.file_upload_post_processor = file_upload_post_processor
self.hash_data = None
def before_run(self, parent_task_result):
self.hash_data = parent_task_result
def create_context(self, message_queue, task_id):
"""
Create values to be used by create_small_file function.
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
parent_data = ParentData(self.parent.kind, self.parent.remote_id)
path_data = self.local_file.get_path_data()
params = parent_data, path_data, self.hash_data, self.local_file.remote_id, \
self.local_file.remote_file_hash_alg, self.local_file.remote_file_hash
return UploadContext(self.settings, params, message_queue, task_id)
def after_run(self, remote_file_data):
"""
Save uuid and hash values of file to our LocalFile if it was updated. If remote_file_data is None that means
the file was already up to date.
:param remote_file_data: dict: DukeDS file data
"""
if remote_file_data:
if self.file_upload_post_processor:
self.file_upload_post_processor.run(self.settings.data_service, remote_file_data)
remote_file_id = remote_file_data['id']
remote_hash_dict = remote_file_data['current_version']['upload']['hashes'][0]
self.local_file.set_remote_values_after_send(remote_file_id,
remote_hash_dict['algorithm'],
remote_hash_dict['value'])
self.settings.watcher.transferring_item(self.local_file, transferred_bytes=self.local_file.size)
else:
self.settings.watcher.increment_progress()
def on_message(self, started_waiting):
"""
Receives started_waiting boolean from create_small_file method and notifies project_status_monitor in settings.
:param started_waiting: boolean: True when we start waiting, False when done
"""
watcher = self.settings.watcher
if started_waiting:
watcher.start_waiting()
else:
watcher.done_waiting()
def create_small_file(upload_context):
"""
Function run by CreateSmallFileCommand to create the file.
Runs in a background process.
:param upload_context: UploadContext: contains data service setup and file details.
:return dict: DukeDS file data
"""
parent_data, path_data, hash_data, remote_file_id, remote_file_hash_alg, remote_file_hash = upload_context.params
if hash_data.matches(remote_file_hash_alg, remote_file_hash):
return None
data_service = upload_context.make_data_service()
# The small file will fit into one chunk so read into memory and hash it.
chunk = path_data.read_whole_file()
# Talk to data service uploading chunk and creating the file.
upload_operations = FileUploadOperations(data_service, upload_context)
upload_id, url_info = upload_operations.create_upload_and_chunk_url(
upload_context.project_id, path_data, hash_data, storage_provider_id=upload_context.config.storage_provider_id)
upload_operations.send_file_external(url_info, chunk)
file_response_json = upload_operations.finish_upload(upload_id, hash_data, parent_data, remote_file_id)
data_service.close()
return file_response_json
class ProjectUploadDryRun(object):
"""
Recursively visits children of the project passed to run.
Builds a list of the names of folders/files that need to be uploaded.
"""
def __init__(self, local_project):
self.upload_items = []
self._run(local_project)
def add_upload_item(self, name):
self.upload_items.append(name)
def _run(self, local_project):
"""
Appends file/folder paths to upload_items based on the contents of this project that need to be uploaded.
:param local_project: LocalProject: project we will build the list for
"""
self._visit_recur(local_project)
def _visit_recur(self, item):
"""
Recursively visits children of item.
:param item: object: project, folder or file we will add to upload_items if necessary.
"""
if item.kind == KindType.file_str:
hash_data = item.calculate_local_hash()
if not item.hash_matches_remote(hash_data):
self.add_upload_item(item.path)
else:
if item.kind == KindType.project_str:
pass
else:
if not item.remote_id:
self.add_upload_item(item.path)
for child in item.children:
self._visit_recur(child)
def get_report(self):
"""
Returns text displaying the items that need to be uploaded or a message saying there are no files/folders
to upload.
:return: str: report text
"""
if not self.upload_items:
return "\n\nNo changes found. Nothing needs to be uploaded.\n\n"
else:
result = "\n\nFiles/Folders that need to be uploaded:\n"
for item in self.upload_items:
result += "{}\n".format(item)
result += "\n"
return result
| Duke-GCB/DukeDSClient | ddsc/core/projectuploader.py | Python | mit | 22,522 | [
"VisIt"
] | 0b579311ba7ae419d2e23c9c1808789c1d1e5d3f962115156c88686075e0fc1e |
# any netcdf module level stuff goes here (after all it's the init file)
from __future__ import absolute_import
from .region_model_repository import RegionModelRepository, RegionConfigError
from .arome_data_repository import AromeDataRepository
from .arome_data_repository import AromeDataRepositoryError
from .geo_ts_repository import GeoTsRepository, get_geo_ts_collection
__all__ = ["RegionModelRepository",
"AromeDataRepository",
"AromeDataRepositoryError",
"GeoTsRepository"]
| felixmatt/shyft | shyft/repository/netcdf/__init__.py | Python | lgpl-3.0 | 516 | [
"NetCDF"
] | cb61bdc27e0a376420d2be37ebdc9f4ba22c90d76ca2f6635b944b9ef8f38013 |
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/')
from data_variable_hshv import Fmat_original_hshv
from data_variable_hslv import Fmat_original_hslv
from data_variable_lshv import Fmat_original_lshv
from data_variable_lslv import Fmat_original_lslv
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.1] * 10
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_hshv,sigma_rf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:51,0:15], Fmat_original_lshv[0:51,0:15], Fmat_original_lslv[0:51,0:15]))))
mu_rm_hshv,sigma_rm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:51,15:30], Fmat_original_lshv[0:51,15:16], Fmat_original_lslv[0:51,15:28]))))
mu_sf_hshv,sigma_sf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:51,30:45], Fmat_original_lshv[0:51,16:23], Fmat_original_lslv[0:51,28:38]))))
mu_sm_hshv,sigma_sm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:51,45:57], Fmat_original_lshv[0:51,23:34], Fmat_original_lslv[0:51,38:49]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = np.zeros((10,2))
B_rm_hshv = np.zeros((10,2))
B_sf_hshv = np.zeros((10,2))
B_sm_hshv = np.zeros((10,2))
for num_states in range(10):
B_rf_hshv[num_states,0] = mu_rf_hshv[num_states]
B_rf_hshv[num_states,1] = sigma_rf_hshv[num_states]
B_rm_hshv[num_states,0] = mu_rm_hshv[num_states]
B_rm_hshv[num_states,1] = sigma_rm_hshv[num_states]
B_sf_hshv[num_states,0] = mu_sf_hshv[num_states]
B_sf_hshv[num_states,1] = sigma_sf_hshv[num_states]
B_sm_hshv[num_states,0] = mu_sm_hshv[num_states]
B_sm_hshv[num_states,1] = sigma_sm_hshv[num_states]
B_rf_hshv = B_rf_hshv.tolist()
B_rm_hshv = B_rm_hshv.tolist()
B_sf_hshv = B_sf_hshv.tolist()
B_sm_hshv = B_sm_hshv.tolist()
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:51,0:15], Fmat_original_lshv[0:51,0:15], Fmat_original_lslv[0:51,0:15])))
total_seq_rm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:51,15:30], Fmat_original_lshv[0:51,15:16], Fmat_original_lslv[0:51,15:28])))
total_seq_sf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:51,30:45], Fmat_original_lshv[0:51,16:23], Fmat_original_lslv[0:51,28:38])))
total_seq_sm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:51,45:57], Fmat_original_lshv[0:51,23:34], Fmat_original_lslv[0:51,38:49])))
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = Fmat_original_hshv[0:51,:]
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[0:51,k]).T).tolist()
new_test_seq_obj_hshv = np.array(sum(test_seq_obj_hshv,[]))
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:17])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,17:30])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,30:40])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:17])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,17:30])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,30:40])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:17])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,17:30])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,30:40])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:17])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,17:30])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,30:40])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_hslv,sigma_rf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,0:15], Fmat_original_lshv[0:51,0:15], Fmat_original_lslv[0:51,0:15]))))
mu_rm_hslv,sigma_rm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,15:17], Fmat_original_lshv[0:51,15:16], Fmat_original_lslv[0:51,15:28]))))
mu_sf_hslv,sigma_sf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,17:30], Fmat_original_lshv[0:51,17:23], Fmat_original_lslv[0:51,28:38]))))
mu_sm_hslv,sigma_sm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,30:40], Fmat_original_lshv[0:51,23:34], Fmat_original_lslv[0:51,38:49]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = np.zeros((10,2))
B_rm_hslv = np.zeros((10,2))
B_sf_hslv = np.zeros((10,2))
B_sm_hslv = np.zeros((10,2))
for num_states in range(10):
B_rf_hslv[num_states,0] = mu_rf_hslv[num_states]
B_rf_hslv[num_states,1] = sigma_rf_hslv[num_states]
B_rm_hslv[num_states,0] = mu_rm_hslv[num_states]
B_rm_hslv[num_states,1] = sigma_rm_hslv[num_states]
B_sf_hslv[num_states,0] = mu_sf_hslv[num_states]
B_sf_hslv[num_states,1] = sigma_sf_hslv[num_states]
B_sm_hslv[num_states,0] = mu_sm_hslv[num_states]
B_sm_hslv[num_states,1] = sigma_sm_hslv[num_states]
B_rf_hslv = B_rf_hslv.tolist()
B_rm_hslv = B_rm_hslv.tolist()
B_sf_hslv = B_sf_hslv.tolist()
B_sm_hslv = B_sm_hslv.tolist()
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,0:15], Fmat_original_lshv[0:51,0:15], Fmat_original_lslv[0:51,0:15])))
total_seq_rm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,15:17], Fmat_original_lshv[0:51,15:16], Fmat_original_lslv[0:51,15:28])))
total_seq_sf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,17:30], Fmat_original_lshv[0:51,17:23], Fmat_original_lslv[0:51,28:38])))
total_seq_sm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,30:40], Fmat_original_lshv[0:51,23:34], Fmat_original_lslv[0:51,38:49])))
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = Fmat_original_hslv[0:51,:]
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[0:51,k]).T).tolist()
new_test_seq_obj_hslv = np.array(sum(test_seq_obj_hslv,[]))
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:57])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:57])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:57])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:57])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_lshv,sigma_rf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,0:15], Fmat_original_hslv[0:51,0:15], Fmat_original_lslv[0:51,0:15]))))
mu_rm_lshv,sigma_rm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,15:17], Fmat_original_hslv[0:51,15:30], Fmat_original_lslv[0:51,15:28]))))
mu_sf_lshv,sigma_sf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,17:30], Fmat_original_hslv[0:51,30:45], Fmat_original_lslv[0:51,28:38]))))
mu_sm_lshv,sigma_sm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,30:40], Fmat_original_hslv[0:51,45:57], Fmat_original_lslv[0:51,38:49]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = np.zeros((10,2))
B_rm_lshv = np.zeros((10,2))
B_sf_lshv = np.zeros((10,2))
B_sm_lshv = np.zeros((10,2))
for num_states in range(10):
B_rf_lshv[num_states,0] = mu_rf_lshv[num_states]
B_rf_lshv[num_states,1] = sigma_rf_lshv[num_states]
B_rm_lshv[num_states,0] = mu_rm_lshv[num_states]
B_rm_lshv[num_states,1] = sigma_rm_lshv[num_states]
B_sf_lshv[num_states,0] = mu_sf_lshv[num_states]
B_sf_lshv[num_states,1] = sigma_sf_lshv[num_states]
B_sm_lshv[num_states,0] = mu_sm_lshv[num_states]
B_sm_lshv[num_states,1] = sigma_sm_lshv[num_states]
B_rf_lshv = B_rf_lshv.tolist()
B_rm_lshv = B_rm_lshv.tolist()
B_sf_lshv = B_sf_lshv.tolist()
B_sm_lshv = B_sm_lshv.tolist()
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,0:15], Fmat_original_hslv[0:51,0:15], Fmat_original_lslv[0:51,0:15])))
total_seq_rm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,15:17], Fmat_original_hslv[0:51,15:30], Fmat_original_lslv[0:51,15:28])))
total_seq_sf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,17:30], Fmat_original_hslv[0:51,30:45], Fmat_original_lslv[0:51,28:38])))
total_seq_sm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,30:40], Fmat_original_hslv[0:51,45:57], Fmat_original_lslv[0:51,38:49])))
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = Fmat_original_lshv[0:51,:]
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[0:51,k]).T).tolist()
new_test_seq_obj_lshv = np.array(sum(test_seq_obj_lshv,[]))
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:16])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,16:23])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,23:34])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:16])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,16:23])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,23:34])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:16])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,16:23])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,23:34])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:16])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,16:23])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,23:34])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_lslv,sigma_rf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,0:15], Fmat_original_hslv[0:51,0:15], Fmat_original_lshv[0:51,0:15]))))
mu_rm_lslv,sigma_rm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,15:17], Fmat_original_hslv[0:51,15:30], Fmat_original_lshv[0:51,15:16]))))
mu_sf_lslv,sigma_sf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,17:30], Fmat_original_hslv[0:51,30:45], Fmat_original_lshv[0:51,16:23]))))
mu_sm_lslv,sigma_sm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:51,30:40], Fmat_original_hslv[0:51,45:57], Fmat_original_lshv[0:51,23:34]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = np.zeros((10,2))
B_rm_lslv = np.zeros((10,2))
B_sf_lslv = np.zeros((10,2))
B_sm_lslv = np.zeros((10,2))
for num_states in range(10):
B_rf_lslv[num_states,0] = mu_rf_lslv[num_states]
B_rf_lslv[num_states,1] = sigma_rf_lslv[num_states]
B_rm_lslv[num_states,0] = mu_rm_lslv[num_states]
B_rm_lslv[num_states,1] = sigma_rm_lslv[num_states]
B_sf_lslv[num_states,0] = mu_sf_lslv[num_states]
B_sf_lslv[num_states,1] = sigma_sf_lslv[num_states]
B_sm_lslv[num_states,0] = mu_sm_lslv[num_states]
B_sm_lslv[num_states,1] = sigma_sm_lslv[num_states]
B_rf_lslv = B_rf_lslv.tolist()
B_rm_lslv = B_rm_lslv.tolist()
B_sf_lslv = B_sf_lslv.tolist()
B_sm_lslv = B_sm_lslv.tolist()
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,0:15], Fmat_original_hslv[0:51,0:15], Fmat_original_lshv[0:51,0:15])))
total_seq_rm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,15:17], Fmat_original_hslv[0:51,15:30], Fmat_original_lshv[0:51,15:16])))
total_seq_sf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,17:30], Fmat_original_hslv[0:51,30:45], Fmat_original_lshv[0:51,16:23])))
total_seq_sm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:51,30:40], Fmat_original_hslv[0:51,45:57], Fmat_original_lshv[0:51,23:34])))
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = Fmat_original_lslv[0:51,:]
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[0:51,k]).T).tolist()
new_test_seq_obj_lslv = np.array(sum(test_seq_obj_lslv,[]))
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
# Find Viterbi Path
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:28])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,28:38])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,38:49])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:28])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,28:38])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,38:49])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:28])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,28:38])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,38:49])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:28])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,28:38])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,38:49])
print cmat
############################################################################################################################################
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_10_states.png')
pp.show()
| tapomayukh/projects_in_python | classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with 0.5s/hmm_crossvalidation_force_10_states.py | Python | mit | 27,327 | [
"Mayavi"
] | 4a1030edfcd1c7d769c99149c5747a25842a849a76060a5f12e2cbd860812ac3 |
"""
set of tools to deal with crispex data
"""
import numpy as np
import scipy.interpolate as interp
def write_buf(intensity, outfile, wave=None, stokes=False):
''' Writes crispex image and spectral cubes, for when the data is already
resident in memory. To be used when there is ample memory for all
the cubes.
IN:
intensity: array with intensities (possibly IQUV). Its shape depends
on the value of stokes. If stokes=False, then its shape is
[nt, nx, ny, nwave]. If stokes=True, then its shape is
[4, nt, nx, ny, nwave], where the first index corresponds
to I, Q, U, V.
outfile: name of files to be writtenp. Will be prefixed by im_ and
sp_.
stokes: If True, will write full stokes.
'''
from . import lp
if not stokes:
nt, nx, ny, nw = intensity.shape
ax = [(1, 2, 0, 3), (3, 0, 2, 1)]
rs = [(nx, ny, nt * nw), (nw, nt, ny * nx)]
extrahd = ''
else:
ns, nt, nx, ny, nw = intensity.shape
ax = [(2, 3, 1, 0, 4), (4, 1, 3, 2, 0)]
rs = [(nx, ny, nt * ns * nw), (nw, nt, ny * nx * ns)]
extrahd = ', stokes=[I,Q,U,V], ns=4'
# this is the image cube:
im = np.transpose(intensity, axes=ax[0])
im = im.reshape(rs[0])
# this is the spectral cube
sp = np.transpose(intensity, axes=ax[1])
sp = sp.reshape(rs[1])
# write lp.put, etc.
# , extraheader_sep=False)
lp.writeto('im_' + outfile, im, extraheader=extrahd)
# , extraheader_sep=False)
lp.writeto('sp_' + outfile, sp, extraheader=extrahd)
return
def write_from_rh(files, outfile, stokes=False, waveidx=None, waveinterp=None,
verbose=False):
''' Writes crispex image cube from RH 1.5D netcdf output.'''
from . import ncdf, lp
from ..utils.shell import progressbar
# open first file to get some data
ii = ncdf.getvar(files[0], 'intensity', memmap=True)
nx, ny, nw = ii.shape
nt = len(files)
dtype = ii.dtype
del ii
wave = ncdf.getvar(files[0], 'wavelength', memmap=False)
if waveidx is not None:
wave = wave[waveidx]
if waveinterp is None:
nw = len(wave)
else:
nw = len(waveinterp)
if stokes:
try:
ii = ncdf.getvar(files[0], 'stokes_V', memmap=True)
del ii
except KeyError:
print('(WWW) write_from_rh: stokes selected but no data in file.')
stokes = False
if stokes:
vars = ['intensity', 'stokes_Q', 'stokes_U', 'stokes_V']
extrahd = ', stokes=[I,Q,U,V], ns=4'
else:
vars = ['intensity']
extrahd = ''
ns = len(vars)
# write image cube
print('writing image cube, %i files' % nt)
for i, f in enumerate(files):
for v in vars:
ii = ncdf.getvar(f, v, memmap=True)
ii = np.array(ii) # Tiago new
if waveidx is not None:
ii = ii[:, :, waveidx]
if waveinterp is not None:
fint = interp.interp1d(wave, ii, kind='linear')
ii = fint(waveinterp).astype(dtype)
lp.writeto('im_' + outfile, ii, append=True,
extraheader=extrahd, extraheader_sep=False)
del ii
if verbose:
progressbar(i + 1, nt)
print()
return
# old stuff, NOT IN USE
# write spectral cube
print('\nwriting spectral cube, %i rows' % ny)
isave = np.empty((nw, nt, nx * ns), dtype=dtype)
for y in range(ny):
for i, f in enumerate(files):
for j, v in enumerate(vars):
ii = ncdf.getvar(f, v, memmap=True)[:, y]
if waveidx is not None:
ii = ii[:, waveidx]
if waveinterp is not None:
fint = interp.interp1d(wave, ii, kind='linear')
ii = fint(waveinterp).astype(dtype)
isave[:, i, j::ns] = np.transpose(ii)
lp.writeto('sp_' + outfile, isave, append=True,
extraheader=extrahd, extraheader_sep=False)
if verbose:
progressbar(y + 1, ny)
print()
return
def write_from_rh_sp(files, outfile, stokes=False, waveidx=None,
waveinterp=None, verbose=False):
''' Writes crispex spectral cubes only, from RH 1.5D netcdf output.'''
from . import ncdf, lp
from ..utils.shell import progressbar
# open first file to get some data
ii = ncdf.getvar(files[0], 'intensity', memmap=True)
nx, ny, nw = ii.shape
nt = len(files)
dtype = ii.dtype
del ii
wave = ncdf.getvar(files[0], 'wavelength', memmap=False)
if waveidx is not None:
wave = wave[waveidx]
if waveinterp is None:
nw = len(wave)
else:
nw = len(waveinterp)
if stokes:
try:
ii = ncdf.getvar(files[0], 'stokes_V', memmap=True)
del ii
except KeyError:
print('(WWW) write_from_rh: stokes selected but no data in file.')
stokes = False
if stokes:
vars = ['intensity', 'stokes_Q', 'stokes_U', 'stokes_V']
extrahd = ', stokes=[I,Q,U,V], ns=4'
else:
vars = ['intensity']
extrahd = ''
ns = len(vars)
# write spectral cube
print('\nwriting spectral cube, %i rows' % ny)
isave = np.empty((nw, nt, nx * ns), dtype=dtype)
for y in range(ny):
for i, f in enumerate(files):
for j, v in enumerate(vars):
ii = ncdf.getvar(f, v, memmap=True)[:, y]
if waveidx is not None:
ii = ii[:, waveidx]
if waveinterp is not None:
fint = interp.interp1d(wave, ii, kind='linear')
ii = fint(waveinterp).astype(dtype)
isave[:, i, j::ns] = np.transpose(ii)
lp.writeto('sp_' + outfile, isave, append=True,
extraheader=extrahd, extraheader_sep=False)
if verbose:
progressbar(y + 1, ny)
print()
return
def sp_from_im(infile, outfile, nwave, maxmem=4, verbose=True):
''' Creates a CRISPEX spectral cube from a quasi-transposition of an
image cube.
IN:
infile - lp image cube file to read.
outfile - lp spectral cube file to write. Overwritten if exists.
nwave - number of spectral points.
maxmem - maximum memory (in GB) to use when creating temporary arrays
'''
from . import lp
from ..utils.shell import progressbar
GB = 2**30
nx, ny, ntl = lp.getheader(infile)[0]
ns = 1 # for now
nt = ntl / nwave
if (ntl % nwave != 0):
raise ValueError('sp_from_im: image cube nlt axis not multiple of' +
' given nwave (%i).' % (nwave) + ' Check values!')
ninc = maxmem * GB / (ntl * nx * ns * 4)
if ninc < 1:
raise MemoryError('sp_from_im: memory supplied for temporary arrays' +
' (%i GB) not enough.' % (maxmem) +
' Need at least %f.2 GB.' % (ntl * nx * ns * 4. / GB))
for i in range(ny / ninc + 1):
imc = lp.getdata(infile)
isave = imc[:, i * ninc:(i + 1) * ninc]
sy = isave.shape[1]
isave = np.transpose(
np.transpose(isave).reshape(nt, nwave, nx * sy), axes=(1, 0, 2))
lp.writeto(outfile, isave, append=i != 0, extraheader='',
extraheader_sep=False)
imc.close()
if verbose:
progressbar(i + 1, ny / ninc + 1)
print()
return
| M1kol4j/helita | helita/io/crispex.py | Python | bsd-3-clause | 7,675 | [
"NetCDF"
] | c402ab6cc862a77d3bee848c54a2b75a9fbab0d35ae8d87bec7b56ad55f12335 |
from nutils import testing, export
import os, tempfile, pathlib, treelog
import numpy
class mplfigure(testing.TestCase):
def setUp(self):
super().setUp()
self.outdir = pathlib.Path(self.enter_context(tempfile.TemporaryDirectory()))
self.enter_context(treelog.set(treelog.DataLog(str(self.outdir))))
@testing.requires('matplotlib', 'PIL')
def test_autodetect_imagetype(self):
for (imagetype, test) in (('jpg', lambda data: self.assertEqual(data[:3], b'\xFF\xD8\xFF')),
('png', lambda data: self.assertEqual(data[:8], b'\x89\x50\x4E\x47\x0D\x0A\x1A\x0A')),
('pdf', lambda data: self.assertEqual(data[:4], b'\x25\x50\x44\x46')),
('svg', lambda data: self.assertRegex(data, b'<svg[^<>]*>'))):
with self.subTest(imagetype=imagetype):
with export.mplfigure('test.{}'.format(imagetype)) as fig:
ax = fig.add_subplot(111)
ax.plot([1,2,3],[1,2,3])
with (self.outdir/'test.{}'.format(imagetype)).open('rb') as f:
test(f.read())
@testing.parametrize
class vtk(testing.TestCase):
def setUp(self):
super().setUp()
if self.ndims == 1:
self.x = numpy.array([[0,],[1,],[2,],[3,]], dtype=self.xtype)
self.tri = numpy.array([[0,1],[1,2],[2,3]])
elif self.ndims == 2:
self.x = numpy.array([[0,0],[0,1],[1,0],[1,1]], dtype=self.xtype)
self.tri = numpy.array([[0,1,2],[1,2,3]])
elif self.ndims == 3:
self.x = numpy.array([[0,0,0],[0,1,0],[1,0,0],[0,0,1]], dtype=self.xtype)
self.tri = numpy.array([[0,1,2,3]])
else:
raise Exception('invalid ndims {}'.format(self.ndims))
if hasattr(self, 'ptype'):
self.p = numpy.arange(len(self.x) * numpy.prod(self.pshape)).astype(self.ptype).reshape((len(self.x),)+self.pshape)
else:
self.p = None
if hasattr(self, 'ctype'):
self.c = numpy.arange(len(self.tri) * numpy.prod(self.cshape)).astype(self.ctype).reshape((len(self.tri),)+self.cshape)
else:
self.c = None
@property
def data(self):
yield b'# vtk DataFile Version 3.0\nvtk output\nBINARY\nDATASET UNSTRUCTURED_GRID\n'
if self.xtype == 'i4':
yield b'POINTS 4 int\n'
elif self.xtype == 'f4':
yield b'POINTS 4 float\n'
elif self.xtype == 'f8':
yield b'POINTS 4 double\n'
else:
raise Exception('not supported: xtype={!r}'.format(self.xtype))
if self.ndims == 1 and self.xtype == 'i4':
yield bytes([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0])
elif self.ndims == 1 and self.xtype == 'f4':
yield bytes([0,0,0,0,0,0,0,0,0,0,0,0,63,128,0,0,0,0,0,0,0,0,0,0,64,0,0,0,0,0,0,0,0,0,0,0,64,64,0,0,0,0,0,0,0,0,0,0])
elif self.ndims == 2 and self.xtype == 'i4':
yield bytes([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0])
elif self.ndims == 2 and self.xtype == 'f4':
yield bytes([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,63,128,0,0,0,0,0,0,63,128,0,0,0,0,0,0,0,0,0,0,63,128,0,0,63,128,0,0,0,0,0,0])
elif self.ndims == 2 and self.xtype == 'f8':
yield bytes([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,63,240,0,0,0,0,0,0,0,0,0,0,0,0,0,0,63,240,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,63,240,0,0,0,0,0,0,63,240,0,0,0,0,0,0,0,0,0,0,0,0,0,0])
elif self.ndims == 3 and self.xtype == 'f4':
yield bytes([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,63,128,0,0,0,0,0,0,63,128,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,63,128,0,0])
else:
raise Exception('not supported: xtype={!r}, ndims={}'.format(self.xtype, self.ndims))
if self.ndims == 1:
yield b'CELLS 3 9\n'
yield bytes([0,0,0,2,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,1,0,0,0,2,0,0,0,2,0,0,0,2,0,0,0,3])
yield b'CELL_TYPES 3\n'
yield bytes([0,0,0,3,0,0,0,3,0,0,0,3])
elif self.ndims == 2:
yield b'CELLS 2 8\n'
yield bytes([0,0,0,3,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3,0,0,0,1,0,0,0,2,0,0,0,3])
yield b'CELL_TYPES 2\n'
yield bytes([0,0,0,5,0,0,0,5])
elif self.ndims == 3:
yield b'CELLS 1 5\n'
yield bytes([0,0,0,4,0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3])
yield b'CELL_TYPES 1'
yield bytes([10,0,0,0,10])
else:
raise Exception('invalid ndims {}'.format(self.ndims))
if self.p is not None:
yield b'POINT_DATA 4\n'
if self.ptype == 'f4' and self.pshape == ():
yield b'SCALARS p float 1\nLOOKUP_TABLE default\n'
yield bytes([0,0,0,0,63,128,0,0,64,0,0,0,64,64,0,0])
elif self.ptype == 'f8' and self.pshape == ():
yield b'SCALARS p double 1\nLOOKUP_TABLE default\n'
yield bytes([0,0,0,0,0,0,0,0,63,240,0,0,0,0,0,0,64,0,0,0,0,0,0,0,64,8,0,0,0,0,0,0])
elif self.ptype == 'i1' and self.pshape == ():
yield b'SCALARS p char 1\nLOOKUP_TABLE default\n'
yield bytes([0,1,2,3])
elif self.ptype == 'i2' and self.pshape == ():
yield b'SCALARS p short 1\nLOOKUP_TABLE default\n'
yield bytes([0,0,0,1,0,2,0,3])
elif self.ptype == 'i4' and self.pshape == ():
yield b'SCALARS p int 1\nLOOKUP_TABLE default\n'
yield bytes([0,0,0,0,0,0,0,1,0,0,0,2,0,0,0,3])
elif self.ptype == 'i1' and self.pshape == (2,):
yield b'VECTORS p char\n'
yield bytes([0,1,0,2,3,0,4,5,0,6,7,0])
elif self.ptype == 'f4' and self.pshape == (2,):
yield b'VECTORS p float\n'
yield bytes([0,0,0,0,63,128,0,0,0,0,0,0,64,0,0,0,64,64,0,0,0,0,0,0,64,128,0,0,64,160,0,0,0,0,0,0,64,192,0,0,64,224,0,0,0,0,0,0])
elif self.ptype == 'i1' and self.pshape == (3,):
yield b'VECTORS p char\n'
yield bytes([0,1,2,3,4,5,6,7,8,9,10,11])
elif self.ptype == 'i1' and self.pshape == (2,2):
yield b'TENSORS p char\n'
yield bytes([0,1,0,2,3,0,0,0,0,4,5,0,6,7,0,0,0,0,8,9,0,10,11,0,0,0,0,12,13,0,14,15,0,0,0,0])
elif self.ptype == 'i1' and self.pshape == (3,3):
yield b'TENSORS p char\n'
yield bytes([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35])
else:
raise Exception('not supported: ptype={}, udims={}'.format(self.ptype, self.udims))
if self.c is not None:
yield b'CELL_DATA 1\n'
if self.ndims == 3 and self.ctype == 'i1' and self.cshape == ():
yield b'SCALARS c char 1\nLOOKUP_TABLE default\n'
yield bytes([0])
else:
raise Exception('not supported: ndims={}, ctype={}, cdims={}'.format(self.ndims, self.ctype, self.cdims))
def test_data(self):
with tempfile.TemporaryDirectory() as outdir, treelog.set(treelog.DataLog(outdir)):
kwargs = {}
if self.p is not None:
kwargs['p'] = self.p
if self.c is not None:
kwargs['c'] = self.c
export.vtk('test', self.tri, self.x, **kwargs)
with open(os.path.join(outdir, 'test.vtk'), 'rb') as f:
data = f.read()
self.assertEqual(data, b''.join(self.data))
vtk(ndims=1, xtype='i4')
vtk(ndims=1, xtype='f4')
vtk(ndims=2, xtype='i4')
vtk(ndims=2, xtype='f4')
vtk(ndims=2, xtype='f8')
vtk(ndims=3, xtype='f4')
vtk(ndims=1, xtype='f4', ptype='f4', pshape=())
vtk(ndims=2, xtype='f4', ptype='f4', pshape=())
vtk(ndims=2, xtype='f4', ptype='f8', pshape=())
vtk(ndims=2, xtype='f4', ptype='i1', pshape=())
vtk(ndims=2, xtype='f4', ptype='i2', pshape=())
vtk(ndims=2, xtype='f4', ptype='i4', pshape=())
vtk(ndims=3, xtype='f4', ptype='i1', pshape=())
vtk(ndims=2, xtype='f4', ptype='i1', pshape=(2,))
vtk(ndims=2, xtype='f4', ptype='f4', pshape=(2,))
vtk(ndims=3, xtype='f4', ptype='i1', pshape=(3,))
vtk(ndims=2, xtype='f4', ptype='i1', pshape=(2,2))
vtk(ndims=3, xtype='f4', ptype='i1', pshape=(3,3))
vtk(ndims=3, xtype='f4', ctype='i1', cshape=())
| joostvanzwieten/nutils | tests/test_export.py | Python | mit | 7,788 | [
"VTK"
] | ce9bfd75b005f50ca3246005e13383831035f4c6a12c2aea69f2b0620c9783fe |
# -*- coding: utf-8 -*-
import os
import socket
from dirac.lib.base import *
from DIRAC import gConfig, gLogger
from dirac.lib.diset import getRPCClient
from DIRAC.Core.Utilities.List import uniqueElements, fromChar
from dirac.lib.credentials import getUserDN, getUsername, getAvailableGroups
from dirac.lib.credentials import getProperties, checkUserCredentials
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.FrameworkSystem.Client.UserProfileClient import UserProfileClient
REG_PROFILE_NAME = "User Registration"
class GeneralController( BaseController ):
def index( self ):
return redirect_to( controller = "info/general", action = "diracOverview" )
def diracOverview( self ):
return render( "/info/diracOverview.mako" )
@jsonify
def proxyUpload(self):
"""
Get p12 file and passwords as input. Split p12 to user key and certificate
and creating proxy for groups user belongs to. Upload proxy to proxy store
"""
# Otherwise the browser would offer to download a file
response.headers['Content-type'] = "text/html"
username = getUsername()
gLogger.info("Start upload proxy out of p12 for user: %s" % (username))
disclaimer = "\nNo proxy was created\nYour private info was safely deleted"
disclaimer = disclaimer + " from DIRAC service"
if username == "anonymous":
error = "Please, send a registration request first"
gLogger.error("Anonymous is not allowed")
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
groupList = getAvailableGroups()
groups = ", ".join(groupList)
gLogger.info("Available groups for the user %s: %s" % (username,groups))
if not len(groupList) > 0:
gLogger.error("User is not registered in any group")
error = "Seems that user %s is not register in any group" % username
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
store = list()
gLogger.debug("Request's body:")
for key in request.params.keys():
try:
gLogger.debug("%s - %s" % (key,request.params[key]))
except Exception,x:
gLogger.error("Exception: %s" % str(x))
error = "An exception has happen '%s'" % str(x)
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
try:
if request.params[key].filename:
name = request.params[key].filename
name = name.strip()
if name[-4:] == ".p12":
gLogger.info(".p12 in filename detected")
if request.params["pass_p12"]:
fileObject = request.params[key]
fileObject.p12 = str(request.params["pass_p12"])
gLogger.info(".p12 password detected")
store.append(fileObject)
gLogger.info("Certificate object is loaded")
except Exception,x:
gLogger.debug("Non fatal for logic, exception happens: %s" % str(x))
pass
if not len(store) > 0: # If there is a file(s) to store
gLogger.error("No file with *.p12 found")
error = "Failed to find any suitable *.p12 filename in your request"
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
gLogger.info("Number of p12 file(s) to process: %s" % len(store))
import tempfile
import shutil
import os
import random
import string
storePath = tempfile.mkdtemp(prefix='DIRAC_')
gLogger.info("Saving file from request to a tmp directory")
descriptionList = list()
try:
for file in store:
desc = dict()
for i in "name","p12","pem":
tmp = "".join(random.choice(string.letters) for x in range(10))
desc[i] = os.path.join(storePath,tmp)
tmpFile = open(desc["name"],"w")
shutil.copyfileobj(file.file, tmpFile)
file.file.close()
tmpFile.close()
tmpFile = open(desc["p12"],"w")
tmpFile.write(file.p12)
tmpFile.close()
pemPassword = "".join(random.choice(string.letters) for x in range(10))
tmpFile = open(desc["pem"],"w")
tmpFile.write(pemPassword)
tmpFile.close()
descriptionList.append(desc)
except Exception,x:
shutil.rmtree(storePath)
gLogger.error("Exception: %s" % str(x))
error = "An exception has happen '%s'" % str(x)
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
if not len(descriptionList) > 0: # If there is a file(s) to store
shutil.rmtree(storePath)
gLogger.error("No certificate(s) found")
error = "List of certificate(s) is empty"
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
gLogger.info("Split certificate(s) to public and private keys")
keyList = list()
from DIRAC.Core.Utilities import Subprocess
for i in descriptionList:
key = dict()
name = i["name"]
p12 = i["p12"]
key["pem"] = i["pem"]
for j in "pub","private":
tmp = "".join(random.choice(string.letters) for x in range(10))
key[j] = os.path.join(storePath,tmp)
cmdCert = "openssl pkcs12 -clcerts -nokeys -in %s -out %s -password file:%s" % (name,key["pub"],p12)
cmdKey = "openssl pkcs12 -nocerts -in %s -out %s -passout file:%s -password file:%s" % (name,key["private"],key["pem"],p12)
for cmd in cmdCert,cmdKey:
result = Subprocess.shellCall(900,cmd)
gLogger.debug("Command is: %s" % cmd)
gLogger.debug("Result is: %s" % result)
if not result["OK"]:
shutil.rmtree(storePath)
gLogger.error(result["Message"])
error = "Error while executing SSL command: %s" % result["Message"]
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
keyList.append(key)
if not len(keyList) > 0:
shutil.rmtree(storePath)
error = "List of public and private keys is empty"
gLogger.error(error)
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
resultList = list()
for key in keyList:
for group in groupList:
gLogger.info("Uploading proxy for group: %s" % group)
cmd = "cat %s | dirac-proxy-init -U -g %s -C %s -K %s -p" % (key["pem"],group,key["pub"],key["private"])
result = Subprocess.shellCall(900,cmd)
gLogger.debug("Command is: %s" % cmd)
gLogger.debug("Result is: %s" % result)
if not result[ 'OK' ]:
shutil.rmtree(storePath)
error = "".join(result["Message"])
gLogger.error(error)
if len(resultList) > 0:
success = "\nHowever some operations has finished successfully:\n"
success = success + "\n".join(resultList)
error = error + success
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
code = result["Value"][0]
stdout = result["Value"][1]
error = result["Value"][2]
if len(error) > 0:
error = error.replace(">","")
error = error.replace("<","")
if not code == 0:
if len(resultList) > 0:
success = "\nHowever some operations has finished successfully:\n"
success = success + "\n".join(resultList)
error = error + success
error = error + disclaimer
gLogger.debug("Service response: %s" % error)
return {"success":"false","error":error}
resultList.append(stdout)
shutil.rmtree(storePath)
debug = "\n".join(resultList)
gLogger.debug(debug)
groups = ", ".join(groupList)
result = "Proxy uploaded for user: %s" % username
if len(groupList) > 0:
result = result + " in groups: %s" % groups
else:
result = result + " in group: %s" % groups
gLogger.info(result)
result = "Operation finished successfully\n" + result
result = result + "\nYour private info was safely deleted from DIRAC server"
gLogger.debug("Service response: %s" % result)
return {"success":"true","result":result}
################################################################################
@jsonify
def action(self):
if "getVOList" in request.params:
return { "success" : "true" , "result" : self.getVOList() }
elif "getCountries" in request.params:
return { "success" : "true" , "result" : self.getCountries() }
elif "send_message" in request.params:
return self.__sendMessage()
elif "registration_request" in request.params:
return self.registerUser()
else:
return { "success" : "false" , "error" : "Request parameters are not defined"}
################################################################################
def registerRequest( self , dn , email ):
"""
Save hash made of email address to a profile REG_PROFILE_NAME
Return S_OK, S_ERROR
"""
upc = UserProfileClient( REG_PROFILE_NAME , getRPCClient )
return upc.storeVar( dn , email )
def isRequested( self , dn ):
"""
Checks if the email already saved as registration request key or not
Return True or False
"""
upc = UserProfileClient( REG_PROFILE_NAME , getRPCClient )
return upc.retrieveVar( dn )
def getVOAdmins( self , vo = None ):
"""
Get admin usernames for VOs in vo list
Argument is a list. Return value is a list
"""
names = list()
if not vo:
return names
for i in vo:
i = i.strip()
gLogger.debug( "VOAdmin for VO: %s" % i )
voadmins = gConfig.getValue( "/Registry/VO/%s/VOAdmin" % i , [] )
gLogger.debug( "/Registry/VO/%s/VOAdmin - %s" % ( i , voadmins ) )
names.extend( voadmins )
return names
def getUserByProperty( self , prop = "NormalUser" ):
"""
Get usernames based on group property
Argument is a string. Return value is a list
"""
groupList = list()
result = gConfig.getSections( "/Registry/Groups" )
gLogger.debug( "Group response: %s" % result )
if not result[ "OK" ]:
return groupList
groups = result[ "Value" ]
for j in groups:
props = getProperties( j )
gLogger.debug( "%s properties: %s" % ( j , props ) )
if prop in props:
groupList.append( j )
if not len( groupList ) > 0:
return groupList
groupList = uniqueElements( groupList )
gLogger.debug( "Chosen group(s): %s" % groupList )
userList = list()
for i in groupList:
users = gConfig.getValue( "/Registry/Groups/%s/Users" % i , [] )
gLogger.debug( "%s users: %s" % ( i , users ) )
if len( users ) > 0:
userList.extend( users )
return userList
def getMailDict( self , names = None ):
"""
Convert list of usernames to dict like { e-mail : full name }
Argument is a list. Return value is a dict
"""
resultDict = dict()
if not names:
return resultDict
for user in names:
email = gConfig.getValue( "/Registry/Users/%s/Email" % user , "" )
gLogger.debug( "/Registry/Users/%s/Email - '%s'" % ( user , email ) )
emil = email.strip()
if not email:
gLogger.error( "Can't find value for option /Registry/Users/%s/Email" % user )
continue
fname = gConfig.getValue( "/Registry/Users/%s/FullName" % user , "" )
gLogger.debug( "/Registry/Users/%s/FullName - '%s'" % ( user , fname ) )
fname = fname.strip()
if not fname:
fname = user
gLogger.debug( "FullName is absent, name to be used: %s" % fname )
resultDict[ email ] = fname
return resultDict
def __getAdminList( self , vo ):
"""
Return a list of admins who can register a new user.
Look first for vo admins then to user with property UserAdministrator and
looking at /Website/UserRegistrationAdmin as fallback
"""
adminList = list()
adminList = self.getVOAdmins( vo )
if not len( adminList ) > 0:
adminList = self.getUserByProperty( "UserManager" )
if not len( adminList ) > 0:
adminList = gConfig.getValue( "/Website/UserRegistrationAdmin" , [] )
return adminList
def sendMail( self , sendDict = None , title = None , body = None , fromAddress = None ):
"""
Sending an email using sendDict: { e-mail : name } as addressbook
title and body is the e-mail's Subject and Body
fromAddress is an email address in behalf of whom the message is sent
Return success/failure JSON structure
"""
if not sendDict:
result = ""
gLogger.debug( result )
return { "success" : "false" , "error" : result }
if not title:
result = "title argument is missing"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
if not body:
result = "body argument is missing"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
if not fromAddress:
result = "fromAddress argument is missing"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
sentSuccess = list()
sentFailed = list()
gLogger.debug( "Initializing Notification client" )
ntc = NotificationClient( lambda x , timeout: getRPCClient( x , timeout = timeout , static = True ) )
for email , name in sendDict.iteritems():
result = ntc.sendMail( email , title , body , fromAddress , False )
if not result[ "OK" ]:
error = name + ": " + result[ "Message" ]
sentFailed.append( error )
gLogger.error( "Sent failure: " , error )
else:
gLogger.info( "Successfully sent to %s" % name )
sentSuccess.append( name )
success = ", ".join( sentSuccess )
failure = "\n".join( sentFailed )
if len( success ) > 0 and len( failure ) > 0:
result = "Successfully sent e-mail to: "
result = result + success + "\n\nFailed to send e-mail to:\n" + failure
gLogger.debug( result )
return { "success" : "true" , "result" : result }
elif len( success ) > 0 and len( failure ) < 1:
result = "Successfully sent e-mail to: %s" % success
gLogger.debug( result )
return { "success" : "true" , "result" : result }
elif len( success ) < 1 and len( failure ) > 0:
result = "Failed to sent email to:\n%s" % failure
gLogger.debug( result )
return { "success" : "false" , "error" : result }
else:
result = "No messages were sent due technical failure"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
def checkUnicode( self , text = None ):
"""
Check if value is unicode or not and return properly converted string
Arguments are string and unicode/string, return value is a string
"""
try:
text = text.decode( 'utf-8' , "replace" )
except :
pass
text = text.encode( "utf-8" )
gLogger.debug( text )
return text
def __messageLog( user , group , title , body ):
"""
Save sent message to a profile. Max 500 are messages allowed
"""
return True
def __sendMessage( self ):
"""
This function is used to send a mail to specific group of DIRAC user
Expected parameters from request are group, title, body
"""
gLogger.info("Start message broadcasting")
checkUserCredentials()
dn = getUserDN()
if not dn:
error = "Certificate is not loaded in the browser or DN is absent"
gLogger.error( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
username = getUsername()
if username == "anonymous":
error = "Sending an anonymous message is forbidden"
gLogger.error( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
gLogger.info( "DN: %s" % dn )
email = gConfig.getValue( "/Registry/Users/%s/Email" % username , "" )
gLogger.debug( "/Registry/Users/%s/Email - '%s'" % ( username , email ) )
emil = email.strip()
if not email:
error = "Can't find value for option /Registry/Users/%s/Email" % user
gLogger.error( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
test = [ "group" , "title" , "body" ]
for i in test:
if not i in request.params:
error = "The required parameter %s is absent in request" % i
gLogger.error( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
group = request.params[ "group" ]
users = gConfig.getValue( "/Registry/Groups/%s/Users" % group , [] )
if not len( users ) > 0:
error = "No users for %s group found" % group
gLogger.error( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
sendDict = self.getMailDict( users )
if not len( sendDict ) > 0:
error = "Can't get a mail address for users in %s group" % group
gLogger.debug( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
gLogger.debug( "Final dictionary with mails to be used %s" % sendDict )
title = self.checkUnicode( request.params[ "title" ] )
gLogger.debug( "email title: %s" % title )
body = self.checkUnicode( request.params[ "body" ] )
gLogger.debug( "email body: %s" % body )
self.__messageLog( user , group , title , body )
return self.sendMail( sendDict , title , body , email )
@jsonify
def registerUser( self ):
"""
This function is used to notify DIRAC admins about user registration request
The logic is simple:
0) Check if request from this e-mail has already registered or not
1) Send mail to VO admin of requested VO
2) Send mail to users in group with UserAdministrator property
3) Send mail to users indicated in /Website/UserRegistrationAdmin option
"""
gLogger.info("Start processing a registration request")
checkUserCredentials()
# Check for having a DN but no username
dn = getUserDN()
if not dn:
error = "Certificate is not loaded in the browser or DN is absent"
gLogger.error( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
username = getUsername()
if not username == "anonymous":
error = "You are already registered in DIRAC with username: %s" % username
gLogger.error( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
gLogger.info( "DN: %s" % dn )
if not "email" in request.params:
error = "Can not get your email address from the request"
gLogger.debug( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
userMail = request.params[ "email" ]
result = self.isRequested( userMail )
gLogger.debug( result )
if result[ "OK" ]:
return render( "/reg_done.mako" )
result = self.registerRequest( dn , userMail )
gLogger.debug( result )
if not result[ "OK" ]:
return { "success" : "false" , "error" : result[ "Message" ] }
vo = fromChar( request.params[ "vo" ] )
if not vo:
error = "You should indicate a VirtualOrganization for membership"
gLogger.debug( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
gLogger.info( "User want to be register in VO(s): %s" % vo )
body = str()
for i in request.params:
if not i in [ "registration_request" , "email" , "vo" ]:
text = self.checkUnicode( request.params[ i ] )
info = "%s - %s" % ( i , text )
body = body + info + "\n"
body = body + "DN - " + dn
gLogger.debug( "email body: %s" % body )
adminList = self.__getAdminList( vo )
if not len( adminList ) > 0:
error = "Can't get in contact with administrators about your request\n"
error = error + "Most likely this DIRAC instance is not configured yet"
gLogger.debug( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
adminList = uniqueElements( adminList )
gLogger.info( "Chosen admin(s): %s" % adminList )
sendDict = self.getMailDict( adminList )
if not len( sendDict ) > 0:
error = "Can't get in contact with administrators about your request\n"
error = error + "Most likely this DIRAC instance is not configured yet"
gLogger.debug( "Service response: %s" % error )
return { "success" : "false" , "error" : error }
gLogger.debug( "Final dictionary with mails to be used %s" % sendDict )
if socket.gethostname().find( '.' ) >= 0:
hostname = socket.gethostname()
else:
hostname = socket.gethostbyaddr( socket.gethostname() )[ 0 ]
title = "New user has sent registration request to %s" % hostname
return self.sendMail( sendDict , title , body , userMail )
def getRequesterEmail( self ):
"""
"""
user = getUsername()
if not user:
gLogger.debug( "user value is empty" )
return None
if user == "anonymous":
gLogger.debug( "user is anonymous" )
return None
email = gConfig.getValue( "/Registry/Users/%s/Email" % user , "" )
gLogger.debug( "/Registry/Users/%s/Email - '%s'" % ( user , email ) )
emil = email.strip()
if not email:
return None
return email
def grouplistFromRequest( self ):
"""
"""
if not "group" in request.params:
return None
if not len( request.params[ "group" ] ) > 0:
return None
separator = gConfig.getValue( "/Website/ListSeparator" , ":::" )
group = request.params[ "group" ].split( separator )
return group
def userlistFromRequest( self ):
"""
"""
if not "user" in request.params:
return None
if not len( request.params[ "user" ] ) > 0:
return None
separator = gConfig.getValue( "/Website/ListSeparator" , ":::" )
user = request.params[ "user" ].split( separator )
return user
def userlistFromGroup( self , groupname = None ):
"""
"""
if not groupname:
gLogger.debug( "Argument groupname is missing" )
return None
users = gConfig.getValue( "/Registry/Groups/%s/Users" % groupname , [] )
gLogger.debug( "%s users: %s" % ( groupname , users ) )
if not len( users ) > 0:
gLogger.debug( "No users for group %s found" % groupname )
return None
return users
def getVOList( self ):
vo = list()
result = gConfig.getSections( "/Registry/VO" )
if result[ "OK" ]:
vo = result[ "Value" ]
return vo
def aftermath( self ):
"""
"""
action = self.action
success = ", ".join( self.actionSuccess )
failure = "\n".join( self.actionFailed )
if len( self.actionSuccess ) > 1:
sText = self.prefix + "s"
else:
sText = self.prefix
if len( self.actionFailed ) > 1:
fText = self.prefix + "s"
else:
fText = self.prefix
if len( success ) > 0 and len( failure ) > 0:
sMessage = "%s %sed successfully: " % ( sText , action , success)
fMessage = "Failed to %s %s:\n%s" % ( action , fText , failure )
result = sMessage + "\n\n" + fMessage
return { "success" : "true" , "result" : result }
elif len( success ) > 0 and len( failure ) < 1:
result = "%s %sed successfully: %s" % ( sText , action , success )
return { "success" : "true" , "result" : result }
elif len( success ) < 1 and len( failure ) > 0:
result = "Failed to %s %s:\n%s" % ( action , fText , failure )
gLogger.always( result )
return { "success" : "false" , "error" : result }
else:
result = "No action has performed due technical failure. Check the logs please"
gLogger.debug( result )
return { "success" : "false" , "error" : result }
def getCountriesReversed(self):
"""
Return the dictionary of country names and
corresponding country code top-level domain (ccTLD)
"""
result = self.getCountries()
name = dict( zip( result.values() , result ) )
return name
def getCountries( self ):
"""
Return the dictionary of country code top-level domain (ccTLD) and
corresponding country name
"""
countries = {
"af": "Afghanistan",
"al": "Albania",
"dz": "Algeria",
"as": "American Samoa",
"ad": "Andorra",
"ao": "Angola",
"ai": "Anguilla",
"aq": "Antarctica",
"ag": "Antigua and Barbuda",
"ar": "Argentina",
"am": "Armenia",
"aw": "Aruba",
"au": "Australia",
"at": "Austria",
"az": "Azerbaijan",
"bs": "Bahamas",
"bh": "Bahrain",
"bd": "Bangladesh",
"bb": "Barbados",
"by": "Belarus",
"be": "Belgium",
"bz": "Belize",
"bj": "Benin",
"bm": "Bermuda",
"bt": "Bhutan",
"bo": "Bolivia",
"ba": "Bosnia and Herzegowina",
"bw": "Botswana",
"bv": "Bouvet Island",
"br": "Brazil",
"io": "British Indian Ocean Territory",
"bn": "Brunei Darussalam",
"bg": "Bulgaria",
"bf": "Burkina Faso",
"bi": "Burundi",
"kh": "Cambodia",
"cm": "Cameroon",
"ca": "Canada",
"cv": "Cape Verde",
"ky": "Cayman Islands",
"cf": "Central African Republic",
"td": "Chad",
"cl": "Chile",
"cn": "China",
"cx": "Christmas Island",
"cc": "Cocos Islands",
"co": "Colombia",
"km": "Comoros",
"cg": "Congo",
"cd": "Congo",
"ck": "Cook Islands",
"cr": "Costa Rica",
"ci": "Cote D'Ivoire",
"hr": "Croatia",
"cu": "Cuba",
"cy": "Cyprus",
"cz": "Czech Republic",
"dk": "Denmark",
"dj": "Djibouti",
"dm": "Dominica",
"do": "Dominican Republic",
"tp": "East Timor",
"ec": "Ecuador",
"eg": "Egypt",
"sv": "El Salvador",
"gq": "Equatorial Guinea",
"er": "Eritrea",
"ee": "Estonia",
"et": "Ethiopia",
"fk": "Falkland Islands",
"fo": "Faroe Islands",
"fj": "Fiji",
"fi": "Finland",
"fr": "France",
"fx": "France, metropolitan",
"gf": "French Guiana",
"pf": "French Polynesia",
"tf": "French Southern Territories",
"ga": "Gabon",
"gm": "Gambia",
"ge": "Georgia",
"de": "Germany",
"gh": "Ghana",
"gi": "Gibraltar",
"gr": "Greece",
"gl": "Greenland",
"gd": "Grenada",
"gp": "Guadeloupe",
"gu": "Guam",
"gt": "Guatemala",
"gn": "Guinea",
"gw": "Guinea-Bissau",
"gy": "Guyana",
"ht": "Haiti",
"hm": "Heard and Mc Donald Islands",
"va": "Vatican City",
"hn": "Honduras",
"hk": "Hong Kong",
"hu": "Hungary",
"is": "Iceland",
"in": "India",
"id": "Indonesia",
"ir": "Iran",
"iq": "Iraq",
"ie": "Ireland",
"il": "Israel",
"it": "Italy",
"jm": "Jamaica",
"jp": "Japan",
"jo": "Jordan",
"kz": "Kazakhstan",
"ke": "Kenya",
"ki": "Kiribati",
"kp": "Korea",
"kr": "Korea",
"kw": "Kuwait",
"kg": "Kyrgyzstan",
"la": "Lao",
"lv": "Latvia",
"lb": "Lebanon",
"ls": "Lesotho",
"lr": "Liberia",
"ly": "Libyan",
"li": "Liechtenstein",
"lt": "Lithuania",
"lu": "Luxembourg",
"mo": "Macau",
"mk": "Macedonia",
"mg": "Madagascar",
"mw": "Malawi",
"my": "Malaysia",
"mv": "Maldives",
"ml": "Mali",
"mt": "Malta",
"mh": "Marshall Islands",
"mq": "Martinique",
"mr": "Mauritania",
"mu": "Mauritius",
"yt": "Mayotte",
"mx": "Mexico",
"fm": "Micronesia",
"md": "Moldova",
"mc": "Monaco",
"mn": "Mongolia",
"ms": "Montserrat",
"ma": "Morocco",
"mz": "Mozambique",
"mm": "Myanmar",
"na": "Namibia",
"nr": "Nauru",
"np": "Nepal",
"nl": "Netherlands",
"an": "Netherlands Antilles",
"nc": "New Caledonia",
"nz": "New Zealand",
"ni": "Nicaragua",
"ne": "Niger",
"ng": "Nigeria",
"nu": "Niue",
"nf": "Norfolk Island",
"mp": "Northern Mariana Islands",
"no": "Norway",
"om": "Oman",
"pk": "Pakistan",
"pw": "Palau",
"pa": "Panama",
"pg": "Papua New Guinea",
"py": "Paraguay",
"pe": "Peru",
"ph": "Philippines",
"pn": "Pitcairn",
"pl": "Poland",
"pt": "Portugal",
"pr": "Puerto Rico",
"qa": "Qatar",
"re": "Reunion",
"ro": "Romania",
"ru": "Russia",
"rw": "Rwanda",
"kn": "Saint Kitts and Nevis",
"lc": "Saint Lucia",
"vc": "Saint Vincent and the Grenadines",
"ws": "Samoa",
"sm": "San Marino",
"st": "Sao Tome and Principe",
"sa": "Saudi Arabia",
"sn": "Senegal",
"sc": "Seychelles",
"sl": "Sierra Leone",
"sg": "Singapore",
"sk": "Slovakia",
"si": "Slovenia",
"sb": "Solomon Islands",
"so": "Somalia",
"za": "South Africa",
"gs": "South Georgia and the South Sandwich Islands",
"es": "Spain",
"lk": "Sri Lanka",
"sh": "St. Helena",
"pm": "St. Pierre and Miquelon",
"sd": "Sudan",
"sr": "Suriname",
"sj": "Svalbard and Jan Mayen Islands",
"sz": "Swaziland",
"se": "Sweden",
"ch": "Switzerland",
"sy": "Syrian Arab Republic",
"tw": "Taiwan",
"tj": "Tajikistan",
"tz": "Tanzania",
"th": "Thailand",
"tg": "Togo",
"tk": "Tokelau",
"to": "Tonga",
"tt": "Trinidad and Tobago",
"tn": "Tunisia",
"tr": "Turkey",
"tm": "Turkmenistan",
"tc": "Turks and Caicos Islands",
"tv": "Tuvalu",
"ug": "Uganda",
"ua": "Ukraine",
"ae": "United Arab Emirates",
"gb": "United Kingdom",
"uk": "United Kingdom",
"us": "United States",
"um": "United States Minor Outlying Islands",
"uy": "Uruguay",
"uz": "Uzbekistan",
"vu": "Vanuatu",
"ve": "Venezuela",
"vn": "Viet Nam",
"vg": "Virgin Islands (British)",
"vi": "Virgin Islands (U.S.)",
"wf": "Wallis and Futuna Islands",
"eh": "Western Sahara",
"ye": "Yemen",
"yu": "Yugoslavia",
"zm": "Zambia",
"zw": "Zimbabwe",
"su": "Soviet Union"
}
return countries
| DIRACGrid/DIRACWeb | dirac/controllers/info/general.py | Python | gpl-3.0 | 30,979 | [
"DIRAC"
] | 7823a65d0b35c18d76ae1e51dacbfebd7c77d6483e8c8b1ff5cf84ef82ab6b33 |
#! /usr/bin/env python2.7 -3
__doc__ = '''
extract.py
This script implements the "transposon phylogeny extraction scheme".
As input, it takes the result of a blast search for one or several
transposons against a single fly genome, and it writes for each one
a fasta file suitable for multiple alignment. The currently supported
format for input is tab-delimited or comma-separated values (e.g. by
using -outfmt 6 or -outfmt 10 with blastn or blast_formatter) contained
in any number of files. The files must each have a header that labels
the data, which must include the following fields (in any order,
uppercase or lowercase):
[FIELDS_GO_HERE]
A companion utility is available that links this utility directly to
BLAST, without any need for your interaction: see blastextract.py in
this file.
'''
defaults = { 'max_overlap' : (int,1), 'min_distance' : (int,5000),
'min_length' : (int,-1), 'evalue_threshold': (float,0.0) }
def maybeint(x): return x if x is None else int(x)
import classify, argparse, sys, fasta, itertools as it, os
__doc__ = __doc__.replace('[FIELDS_GO_HERE]', ', '.join(classify.allflds))
def makeparser(parser=None):
if parser is None:
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-o','--out',default='-',help='''\
Output file name. Use '-' to output to stdout; this is the default.
NOTE: If the input contains more than one transposon, their output
will be written to separate files (e.g., 'penelope.fna') and this
argument will be ignored.''')
parser.add_argument('file',help='''\
Input file name; should be a csv file whose lines correspond to BLAST
hits, and including the fields QSTART, QEND, SSTART, SEND, and EVALUE.
Use '-' to read the file from stdin. This argument is required.''')
group = parser.add_mutually_exclusive_group()
group.add_argument('-a','--append',dest='mode',action='store_const',
help='''Triggers append mode: if the output file already exists, data is
appended to it.''',const='a')
group.add_argument('-w','--overwrite',action='store_const',help='''
Triggers overwrite mode: if the output file already exists, the program
replaces it. This is the default behavior.''',const='w',dest='mode')
parser.set_defaults(mode='w')
parser.add_argument('-d','--min-distance',help='''
Minimum distance between islands - in other words, if two fragments
are any closer than this in their subject ordinates, they will be
part of the same island. Defaults to {}. The
larger this value, the more the effects of this program: if set to
0, for instance, the only islands will be nests and every fragment
will go on a separate line.'''.format(defaults['min_distance']))
parser.add_argument('-e','--evalue-threshold', help='''\
Maximum evalue; hits with a higher evalue will be ignored. If
omitted, no threshold will be enforced. In practice, this option
and --min-length may have redundant effects, and this one is
probably more biologically meaningful.''')
parser.add_argument('-l','--min-length',help='''
Minimum length; hits in a nest that are shorter will be excluded.
If omitted, no minimum will be enforced. In either case, no minimum
will apply for hits outside of nests. Note: given the right argument
for --evalue-threshold, this argument may be redundant.''')
parser.add_argument('-p','--max-overlap',help='''\
Overlap threshold between blast hits (in the subject sequence) -
i.e. if two hits have an overlap by at least this many base pairs,
they are part of a nest. If this option is omitted, any overlap
whatever will trigger a nest relationship, while specifying a
higher number allows insignificant overlaps to be ignored.''')
return parser
if __name__=='__main__' and not sys.flags.interactive:
parser = makeparser()
args = parser.parse_args()
if 0 in (args.max_overlap,args.min_distance,args.min_length):
parser.print_usage()
sys.exit(parser.prog+': error: 0 not a valid arg')
for k,(T,v) in defaults.iteritems():
given = getattr(args,k)
try: setattr(args,k,v if given is None else T(given))
except ValueError:
parser.print_usage()
sys.exit('{}: error: bad type for --{} (got {})'.format(
parser.prog,k.replace('_','-'),given))
with fasta.fasta(args.out,args.mode) as out:
classify.full_transposon_treatment(
seq = classify.hitsfromcsv(args.file),
overlap = args.max_overlap,
gap = args.min_distance,
minlength = args.min_length,
evalue = args.evalue_threshold,
fastaout = out
)
| jpassaro/seq-align-prep | extract.py | Python | gpl-2.0 | 5,024 | [
"BLAST"
] | ddc046beac1be573aa428b7e1ffdb392e43ccb3170994b5d3a7b8e06e1ddd075 |
#-------------------------------------------------------------------------------
# rbtlib: __init__.py
#
# rbtlib module initialization.
#-------------------------------------------------------------------------------
# The MIT License (MIT)
# Copyright (c) 2016 Brian Minard
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#-------------------------------------------------------------------------------
__all__ = [
"Resource",
"ResourceFactory",
"Root",
"rbt",
"user",
]
from rbt import rbt
from root import Root
from resource import Resource
from resource import ResourceFactory
import user
__version__ = u'0.3.0'
| bminard/rbtlib | rbtlib/__init__.py | Python | mit | 1,653 | [
"Brian"
] | 95993c77901a99a8eeae81465461a6d7154ef2c7fcc665e0b518113ed860c597 |
import os
import numpy as np
import numpy.ma as ma
from pylru import lrudecorator
import rasterio
from ..rasterset import Raster
from .. import utils
REFERENCE_YEAR = 2000
class Hyde(object):
def __init__(self, year):
self._year = year
return
@property
def year(self):
return self._year
@property
def syms(self):
return ['grumps', 'hpd_ref', 'hpd_proj']
def eval(self, df):
div = ma.where(df['hpd_ref'] == 0, 1, df['hpd_ref'])
return ma.where(df['hpd_ref'] == 0,
df['hpd_proj'],
df['grumps'] * df['hpd_proj'] / div)
@lrudecorator(10)
def years():
with rasterio.open('netcdf:%s/luh2/hyde.nc:popd' % utils.outdir()) as ds:
return tuple(map(lambda idx: int(ds.tags(idx)['NETCDF_DIM_time']),
ds.indexes))
def raster(version, year):
if year not in years(version):
raise RuntimeError('year (%d) not present in HYDE dataset)' % year)
return {'hpd':
Raster('hpd', 'netcdf:%s/luh2/hyde.nc:popd' % utils.outdir(),
band=years().index(year))}
def scale_grumps(year):
rasters = {}
if year not in years():
raise RuntimeError('year %d not available in HYDE projection' % year)
ref_band = years().index(REFERENCE_YEAR)
year_band = years().index(year)
rasters['grumps'] = Raster('grumps', '%s/luh2/gluds00ag.tif' % utils.outdir())
rasters['hpd_ref'] = Raster('hpd_ref',
'netcdf:%s/luh2/hyde.nc:popd' % utils.outdir(),
band=ref_band + 1)
rasters['hpd_proj'] = Raster('hpd_proj',
'netcdf:%s/luh2/hyde.nc:popd' % utils.outdir(),
band=year_band + 1)
rasters['hpd'] = Hyde(year)
return rasters
| ricardog/raster-project | projections/hpd/hyde.py | Python | apache-2.0 | 1,764 | [
"NetCDF"
] | 7ea853f9a92ad9eaaee58d252c7dbd3ebc78dd776b8024206bd7f5d9fe75f9c3 |
"""
This is the boilerplate default configuration file.
Changes and additions to settings should be done in the config module
located in the application root rather than this config.
"""
config = {
# webapp2 sessions
'webapp2_extras.sessions': {'secret_key': '_PUT_KEY_HERE_YOUR_SECRET_KEY_'},
# webapp2 authentication
'webapp2_extras.auth': {'user_model': 'boilerplate.models.User',
'cookie_name': 'session_name'},
# jinja2 templates
'webapp2_extras.jinja2': {'template_path': ['templates', 'boilerplate/templates', 'bp_admin/templates'],
'environment_args': {'extensions': ['jinja2.ext.i18n']}},
# application name
'app_name': "Google App Engine Boilerplate",
# the default language code for the application.
# should match whatever language the site uses when i18n is disabled
'app_lang': 'en',
# Locale code = <language>_<territory> (ie 'en_US')
# to pick locale codes see http://cldr.unicode.org/index/cldr-spec/picking-the-right-language-code
# also see http://www.sil.org/iso639-3/codes.asp
# Language codes defined under iso 639-1 http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# Territory codes defined under iso 3166-1 alpha-2 http://en.wikipedia.org/wiki/ISO_3166-1
# disable i18n if locales array is empty or None
'locales': ['en_US', 'es_ES', 'it_IT', 'zh_CN', 'id_ID', 'fr_FR', 'de_DE', 'ru_RU', 'pt_BR', 'cs_CZ','vi_VN','nl_NL'],
# contact page email settings
'contact_sender': "PUT_SENDER_EMAIL_HERE",
'contact_recipient': "PUT_RECIPIENT_EMAIL_HERE",
# Password AES Encryption Parameters
# aes_key must be only 16 (*AES-128*), 24 (*AES-192*), or 32 (*AES-256*) bytes (characters) long.
'aes_key': "12_24_32_BYTES_KEY_FOR_PASSWORDS",
'salt': "_PUT_SALT_HERE_TO_SHA512_PASSWORDS_",
# get your own consumer key and consumer secret by registering at https://dev.twitter.com/apps
# callback url must be: http://[YOUR DOMAIN]/login/twitter/complete
'twitter_consumer_key': 'PUT_YOUR_TWITTER_CONSUMER_KEY_HERE',
'twitter_consumer_secret': 'PUT_YOUR_TWITTER_CONSUMER_SECRET_HERE',
#Facebook Login
# get your own consumer key and consumer secret by registering at https://developers.facebook.com/apps
#Very Important: set the site_url= your domain in the application settings in the facebook app settings page
# callback url must be: http://[YOUR DOMAIN]/login/facebook/complete
'fb_api_key': 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
'fb_secret': 'PUT_YOUR_FACEBOOK_PUBLIC_KEY_HERE',
#Linkedin Login
#Get you own api key and secret from https://www.linkedin.com/secure/developer
'linkedin_api': 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
'linkedin_secret': 'PUT_YOUR_LINKEDIN_PUBLIC_KEY_HERE',
# Github login
# Register apps here: https://github.com/settings/applications/new
'github_server': 'github.com',
'github_redirect_uri': 'http://www.example.com/social_login/github/complete',
'github_client_id': 'PUT_YOUR_GITHUB_CLIENT_ID_HERE',
'github_client_secret': 'PUT_YOUR_GITHUB_CLIENT_SECRET_HERE',
# get your own recaptcha keys by registering at http://www.google.com/recaptcha/
'captcha_public_key': "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE",
'captcha_private_key': "PUT_YOUR_RECAPCHA_PRIVATE_KEY_HERE",
# Use a complete Google Analytics code, no just the Tracking ID
# In config/boilerplate.py there is an example to fill out this value
'google_analytics_code': "",
# add status codes and templates used to catch and display errors
# if a status code is not listed here it will use the default app engine
# stacktrace error page or browser error page
'error_templates': {
403: 'errors/default_error.html',
404: 'errors/default_error.html',
500: 'errors/default_error.html',
},
# Enable Federated login (OpenID and OAuth)
# Google App Engine Settings must be set to Authentication Options: Federated Login
'enable_federated_login': True,
# jinja2 base layout template
'base_layout': 'base.html',
# send error emails to developers
'send_mail_developer': False,
# fellas' list
'developers': (
('Santa Klauss', 'snowypal@northpole.com'),
),
# If true, it will write in datastore a log of every email sent
'log_email': True,
# If true, it will write in datastore a log of every visit
'log_visit': True,
# ----> ADD MORE CONFIGURATION OPTIONS HERE <----
} # end config
| mats116/ElasticBigQuery | boilerplate/config.py | Python | lgpl-3.0 | 4,562 | [
"VisIt"
] | 4e1e273057c8680b53bf419e0e8366d2a13890a93deb055c231c8b9db34d5b1d |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Descriptive metrics for Clovis.
When executed as a script, prints the amount of data attributed to Ads, and
shows a graph of the amount of data to download for a new visit to the same
page, with a given time interval.
"""
import collections
import urlparse
import content_classification_lens
from request_track import CachingPolicy
HTTP_OK_LENGTH = len("HTTP/1.1 200 OK\r\n")
def _RequestTransferSize(request):
def HeadersSize(headers):
# 4: ':', ' ', '\r', '\n'
return sum(len(k) + len(v) + 4 for (k, v) in headers.items())
if request.protocol == 'data':
return {'get': 0, 'request_headers': 0, 'response_headers': 0, 'body': 0}
return {'get': len('GET ') + len(request.url) + 2,
'request_headers': HeadersSize(request.request_headers or {}),
'response_headers': HeadersSize(request.response_headers or {}),
'body': request.encoded_data_length}
def TransferSize(requests):
"""Returns the total transfer size (uploaded, downloaded) of requests.
This is an estimate as we assume:
- 200s (for the size computation)
- GET only.
Args:
requests: ([Request]) List of requests.
Returns:
(uploaded_bytes (int), downloaded_bytes (int))
"""
uploaded_bytes = 0
downloaded_bytes = 0
for request in requests:
request_bytes = _RequestTransferSize(request)
uploaded_bytes += request_bytes['get'] + request_bytes['request_headers']
downloaded_bytes += (HTTP_OK_LENGTH
+ request_bytes['response_headers']
+ request_bytes['body'])
return (uploaded_bytes, downloaded_bytes)
def TotalTransferSize(trace):
"""Returns the total transfer size (uploaded, downloaded) from a trace."""
return TransferSize(trace.request_track.GetEvents())
def TransferredDataRevisit(trace, after_time_s, assume_validation_ok=False):
"""Returns the amount of data transferred for a revisit.
Args:
trace: (LoadingTrace) loading trace.
after_time_s: (float) Time in s after which the site is revisited.
assume_validation_ok: (bool) Assumes that the resources to validate return
304s.
Returns:
(uploaded_bytes, downloaded_bytes)
"""
uploaded_bytes = 0
downloaded_bytes = 0
for request in trace.request_track.GetEvents():
caching_policy = CachingPolicy(request)
policy = caching_policy.PolicyAtDate(request.wall_time + after_time_s)
request_bytes = _RequestTransferSize(request)
if policy == CachingPolicy.VALIDATION_NONE:
continue
uploaded_bytes += request_bytes['get'] + request_bytes['request_headers']
if (policy in (CachingPolicy.VALIDATION_SYNC,
CachingPolicy.VALIDATION_ASYNC)
and caching_policy.HasValidators() and assume_validation_ok):
downloaded_bytes += len('HTTP/1.1 304 NOT MODIFIED\r\n')
continue
downloaded_bytes += (HTTP_OK_LENGTH
+ request_bytes['response_headers']
+ request_bytes['body'])
return (uploaded_bytes, downloaded_bytes)
def AdsAndTrackingTransferSize(trace, ad_rules_filename,
tracking_rules_filename):
"""Returns the transfer size attributed to ads and tracking.
Args:
trace: (LoadingTrace) a loading trace.
ad_rules_filename: (str) Path to an ad rules file.
tracking_rules_filename: (str) Path to a tracking rules file.
Returns:
(uploaded_bytes (int), downloaded_bytes (int))
"""
content_lens = (
content_classification_lens.ContentClassificationLens.WithRulesFiles(
trace, ad_rules_filename, tracking_rules_filename))
requests = content_lens.AdAndTrackingRequests()
return TransferSize(requests)
def DnsRequestsAndCost(trace):
"""Returns the number and cost of DNS requests for a trace."""
requests = trace.request_track.GetEvents()
requests_with_dns = [r for r in requests if r.timing.dns_start != -1]
dns_requests_count = len(requests_with_dns)
dns_cost = sum(r.timing.dns_end - r.timing.dns_start
for r in requests_with_dns)
return (dns_requests_count, dns_cost)
def ConnectionMetrics(trace):
"""Returns the connection metrics for a given trace.
Returns:
{
'connections': int,
'connection_cost_ms': float,
'ssl_connections': int,
'ssl_cost_ms': float,
'http11_requests': int,
'h2_requests': int,
'data_requests': int,
'domains': int
}
"""
requests = trace.request_track.GetEvents()
requests_with_connect = [r for r in requests if r.timing.connect_start != -1]
requests_with_connect_count = len(requests_with_connect)
connection_cost = sum(r.timing.connect_end - r.timing.connect_start
for r in requests_with_connect)
ssl_requests = [r for r in requests if r.timing.ssl_start != -1]
ssl_requests_count = len(ssl_requests)
ssl_cost = sum(r.timing.ssl_end - r.timing.ssl_start for r in ssl_requests)
requests_per_protocol = collections.defaultdict(int)
for r in requests:
requests_per_protocol[r.protocol] += 1
domains = set()
for r in requests:
if r.protocol == 'data':
continue
domain = urlparse.urlparse(r.url).hostname
domains.add(domain)
return {
'connections': requests_with_connect_count,
'connection_cost_ms': connection_cost,
'ssl_connections': ssl_requests_count,
'ssl_cost_ms': ssl_cost,
'http11_requests': requests_per_protocol['http/1.1'],
'h2_requests': requests_per_protocol['h2'],
'data_requests': requests_per_protocol['data'],
'domains': len(domains)
}
def PlotTransferSizeVsTimeBetweenVisits(trace):
times = [10, 60, 300, 600, 3600, 4 * 3600, 12 * 3600, 24 * 3600]
labels = ['10s', '1m', '10m', '1h', '4h', '12h', '1d']
(_, total_downloaded) = TotalTransferSize(trace)
downloaded = [TransferredDataRevisit(trace, delta_t)[1] for delta_t in times]
plt.figure()
plt.title('Amount of data to download for a revisit - %s' % trace.url)
plt.xlabel('Time between visits (log)')
plt.ylabel('Amount of data (bytes)')
plt.plot(times, downloaded, 'k+--')
plt.axhline(total_downloaded, color='k', linewidth=2)
plt.xscale('log')
plt.xticks(times, labels)
plt.show()
def main(trace_filename, ad_rules_filename, tracking_rules_filename):
trace = loading_trace.LoadingTrace.FromJsonFile(trace_filename)
(_, ads_downloaded_bytes) = AdsAndTrackingTransferSize(
trace, ad_rules_filename, tracking_rules_filename)
(_, total_downloaded_bytes) = TotalTransferSize(trace)
print '%e bytes linked to Ads/Tracking (%.02f%%)' % (
ads_downloaded_bytes,
(100. * ads_downloaded_bytes) / total_downloaded_bytes)
PlotTransferSizeVsTimeBetweenVisits(trace)
if __name__ == '__main__':
import sys
from matplotlib import pylab as plt
import loading_trace
if len(sys.argv) != 4:
print (
'Usage: %s trace_filename ad_rules_filename tracking_rules_filename'
% sys.argv[0])
sys.exit(0)
main(*sys.argv[1:])
| danakj/chromium | tools/android/loading/metrics.py | Python | bsd-3-clause | 7,135 | [
"VisIt"
] | 8043694fe5c5f7a1ccc8dc22d91c1334e0b8ec5fbddf937ebe0a6d319af51ee9 |
# Orca
#
# Copyright 2008-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Utilities for obtaining tutorial utterances for objects. In general,
there probably should be a singleton instance of the TutorialGenerator
class. For those wishing to override the generators, however,
one can create a new instance and replace/extend the tutorial generators
as they see fit."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2008-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
from . import debug
from . import settings
from .orca_i18n import _ # for gettext support
class TutorialGenerator:
"""Takes accessible objects and produces a tutorial string to speak
for those objects. See the getTutorialString method, which is the
primary entry point. Subclasses can feel free to override/extend
the getTutorialGenerators instance field as they see fit."""
def __init__(self, script):
# The script that created us. This allows us to ask the
# script for information if we need it.
#
self._script = script
# storing the last spoken message.
self.lastTutorial = ""
# Set up a dictionary that maps role names to functions
# that generate tutorial strings for objects that implement that role.
#
self.tutorialGenerators = {}
self.tutorialGenerators[pyatspi.ROLE_CHECK_BOX] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_COMBO_BOX] = \
self._getTutorialForComboBox
self.tutorialGenerators[pyatspi.ROLE_FRAME] = \
self._getTutorialForFrame
self.tutorialGenerators[pyatspi.ROLE_ICON] = \
self._getTutorialForIcon
self.tutorialGenerators[pyatspi.ROLE_LAYERED_PANE] = \
self._getTutorialForLayeredPane
self.tutorialGenerators[pyatspi.ROLE_LIST] = \
self._getTutorialForList
self.tutorialGenerators[pyatspi.ROLE_LIST_ITEM] = \
self._getTutorialForListItem
self.tutorialGenerators[pyatspi.ROLE_PAGE_TAB] = \
self._getTutorialForPageTab
self.tutorialGenerators[pyatspi.ROLE_PARAGRAPH] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_PASSWORD_TEXT] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_ENTRY] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_PUSH_BUTTON] = \
self._getTutorialForPushButton
self.tutorialGenerators[pyatspi.ROLE_SPIN_BUTTON] = \
self._getTutorialForSpinButton
self.tutorialGenerators[pyatspi.ROLE_TABLE_CELL] = \
self._getTutorialForTableCellRow
self.tutorialGenerators[pyatspi.ROLE_TEXT] = \
self._getTutorialForText
self.tutorialGenerators[pyatspi.ROLE_TOGGLE_BUTTON] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_RADIO_BUTTON] = \
self._getTutorialForRadioButton
self.tutorialGenerators[pyatspi.ROLE_MENU] = \
self._getTutorialForMenu
self.tutorialGenerators[pyatspi.ROLE_CHECK_MENU_ITEM] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_MENU_ITEM] = \
self._getTutorialForMenuItem
self.tutorialGenerators[pyatspi.ROLE_RADIO_MENU_ITEM] = \
self._getTutorialForCheckBox
self.tutorialGenerators[pyatspi.ROLE_SLIDER] = \
self._getTutorialForSlider
def _debugGenerator(self, generatorName, obj, alreadyFocused, utterances):
"""Prints debug.LEVEL_FINER information regarding
the tutorial generator.
Arguments:
- generatorName: the name of the generator
- obj: the object being presented
- alreadyFocused: False if object just received focus
- utterances: the generated text
"""
debug.println(debug.LEVEL_FINER,
"GENERATOR: %s" % generatorName)
debug.println(debug.LEVEL_FINER,
" obj = %s" % obj.name)
debug.println(debug.LEVEL_FINER,
" role = %s" % obj.getRoleName())
debug.println(debug.LEVEL_FINER,
" alreadyFocused = %s" % alreadyFocused)
debug.println(debug.LEVEL_FINER,
" utterances:")
for text in utterances:
debug.println(debug.LEVEL_FINER,
" (%s)" % text)
def _getDefaultTutorial(
self, obj, alreadyFocused, forceTutorial, role=None):
"""The default tutorial generator returns the empty tutorial string
because We have no associated tutorial function for the object.
Arguments:
- obj: an Accessible
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
- role: A role that should be used instead of the Accessible's
possible role.
Returns the empty list []
"""
return []
def _getTutorialForCheckBox(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a check box.
Arguments:
- obj: the check box
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user on how to toggle a checkbox.
msg = _("Press space to toggle.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForCheckBox",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForComboBox(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a combobox.
Arguments:
- obj: the combo box
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user on how to interact
# with a combobox.
msg = _("Press space to expand, and use up and down to select an item.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForComboBox",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForFrame(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a frame.
Arguments:
- obj: the frame
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
name = self._script.utilities.displayedText(obj)
if not name and obj.description:
name = obj.description
# Translators: If this application has more than one unfocused alert or
# dialog window, inform user of how to refocus these.
childWindowsMsg = _("Press alt+f6 to give focus to child windows.")
# If this application has more than one unfocused alert or
# dialog window, tell user how to give them focus.
try:
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
except:
alertAndDialogCount = 0
if alertAndDialogCount > 0:
utterances.append(childWindowsMsg)
self._debugGenerator("_getTutorialForFrame",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForIcon(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for an icon.
Arguments:
- obj: the icon
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
if obj.parent.getRole() == pyatspi.ROLE_LAYERED_PANE:
utterances = self._getTutorialForLayeredPane(obj.parent,
alreadyFocused,
forceTutorial)
else:
utterances = self._getDefaultTutorial(obj,
alreadyFocused,
forceTutorial)
self._debugGenerator("_getTutorialForIcon",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForLayeredPane(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a layered pane.
Arguments:
- obj: the layered pane
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
name = self._script.utilities.displayedText(obj)
if not name and obj.description:
name = obj.description
# Translators: this gives tips on how to navigate items in a
# layered pane.
msg = _("To move to items, use either " \
"the arrow keys or type ahead searching.")
utterances.append(msg)
# Translators: this is the tutorial string for when first landing
# on the desktop, describing how to access the system menus.
desktopMsg = _("To get to the system menus press the alt+f1 key.")
scriptName = self._script.name
try:
sibling = obj.parent.getChildAtIndex(0)
except AttributeError:
sibling = None
if 'nautilus' in scriptName and obj == sibling:
utterances.append(desktopMsg)
if (not alreadyFocused and self.lastTutorial != utterances) \
or forceTutorial:
pass
else:
utterances = []
self._debugGenerator("_getTutorialForLayeredPane",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForList(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a list.
Arguments:
- obj: the list
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string when navigating lists.
msg = _("Use up and down to select an item.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForList",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForListItem(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a listItem.
Arguments:
- obj: the listitem
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to collapse the node.
expandedMsg = _("To collapse, press shift plus left.")
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to expand the node.
collapsedMsg = _("To expand, press shift plus right.")
# If already in focus then the tree probably collapsed or expanded
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
if (self.lastTutorial != [expandedMsg]) or forceTutorial:
utterances.append(expandedMsg)
else:
if (self.lastTutorial != [collapsedMsg]) or forceTutorial:
utterances.append(collapsedMsg)
self._debugGenerator("_getTutorialForListItem",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForMenuItem(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a menu item
Arguments:
- obj: the menu item
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for activating a menu item
msg = _("To activate press return.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForMenuItem",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForText(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a text object.
Arguments:
- obj: the text component
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
if not obj.getState().contains(pyatspi.STATE_EDITABLE):
return []
utterances = []
# Translators: This is the tutorial string for when landing
# on text fields.
msg = _("Type in text.")
if (not alreadyFocused or forceTutorial) and \
not self._script.utilities.isReadOnlyTextArea(obj):
utterances.append(msg)
self._debugGenerator("_getTutorialForText",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForPageTab(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a page tab.
Arguments:
- obj: the page tab
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of tutorial utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for landing
# on a page tab, we are informing the
# user how to navigate these.
msg = _("Use left and right to view other tabs.")
if (self.lastTutorial != [msg]) or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForPageTabList",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForPushButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a push button
Arguments:
- obj: the push button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for activating a push button.
msg = _("To activate press space.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForPushButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForSpinButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a spin button. If the object already has
focus, then no tutorial is given.
Arguments:
- obj: the spin button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for when landing
# on a spin button.
msg = _("Use up or down arrow to select value." \
" Or type in the desired numerical value.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForSpinButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForTableCell(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial utterances for a single table cell
Arguments:
- obj: the table
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to collapse the node.
expandedMsg = _("To collapse, press shift plus left.")
# Translators: this represents the state of a node in a tree.
# 'expanded' means the children are showing.
# 'collapsed' means the children are not showing.
# this string informs the user how to expand the node.
collapsedMsg = _("To expand, press shift plus right.")
# If this table cell has 2 children and one of them has a
# 'toggle' action and the other does not, then present this
# as a checkbox where:
# 1) we get the checked state from the cell with the 'toggle' action
# 2) we get the label from the other cell.
# See Orca bug #376015 for more details.
#
if obj.childCount == 2:
cellOrder = []
hasToggle = [ False, False ]
for i, child in enumerate(obj):
try:
action = child.queryAction()
except NotImplementedError:
continue
else:
for j in range(0, action.nActions):
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(j) in ["toggle", _("toggle")]:
hasToggle[i] = True
break
if hasToggle[0] and not hasToggle[1]:
cellOrder = [ 1, 0 ]
elif not hasToggle[0] and hasToggle[1]:
cellOrder = [ 0, 1 ]
if cellOrder:
for i in cellOrder:
# Don't speak the label if just the checkbox state has
# changed.
#
if alreadyFocused and not hasToggle[i]:
pass
else:
utterances.extend( \
self._getTutorialForTableCell(obj[i],
alreadyFocused, forceTutorial))
return utterances
# [[[TODO: WDW - Attempt to infer the cell type. There's a
# bunch of stuff we can do here, such as check the EXPANDABLE
# state, check the NODE_CHILD_OF relation, etc. Logged as
# bugzilla bug 319750.]]]
#
try:
action = obj.queryAction()
except NotImplementedError:
action = None
if action:
for i in range(0, action.nActions):
debug.println(debug.LEVEL_FINEST,
"tutorialgenerator._getTutorialForTableCell" \
+ "looking at action %d" % i)
# Translators: this is the action name for
# the 'toggle' action. It must be the same
# string used in the *.po file for gail.
#
if action.getName(i) in ["toggle", _("toggle")]:
utterances = self._getTutorialForCheckBox(obj,
alreadyFocused, forceTutorial)
break
state = obj.getState()
if state.contains(pyatspi.STATE_EXPANDABLE):
if state.contains(pyatspi.STATE_EXPANDED):
if self.lastTutorial != [expandedMsg] or forceTutorial:
utterances.append(expandedMsg)
else:
if self.lastTutorial != [collapsedMsg] or forceTutorial:
utterances.append(collapsedMsg)
self._debugGenerator("_getTutorialForTableCell",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForTableCellRow(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for the active table cell in the table row.
Arguments:
- obj: the table
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
if (not alreadyFocused):
try:
parent_table = obj.parent.queryTable()
except NotImplementedError:
parent_table = None
if settings.readTableCellRow and parent_table \
and not self._script.utilities.isLayoutOnly(obj.parent):
parent = obj.parent
index = self._script.utilities.cellIndex(obj)
row = parent_table.getRowAtIndex(index)
column = parent_table.getColumnAtIndex(index)
# This is an indication of whether we should speak all the
# table cells (the user has moved focus up or down a row),
# or just the current one (focus has moved left or right in
# the same row).
#
speakAll = True
if "lastRow" in self._script.pointOfReference and \
"lastColumn" in self._script.pointOfReference:
pointOfReference = self._script.pointOfReference
speakAll = (pointOfReference["lastRow"] != row) or \
((row == 0 or row == parent_table.nRows-1) and \
pointOfReference["lastColumn"] == column)
utterances.extend(self._getTutorialForTableCell(obj,
alreadyFocused, forceTutorial))
else:
utterances = self._getTutorialForTableCell(obj,
alreadyFocused, forceTutorial)
else:
utterances = self._getTutorialForTableCell(obj, alreadyFocused, \
forceTutorial)
self._debugGenerator("_getTutorialForTableCellRow",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForRadioButton(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a radio button.
Arguments:
- obj: the radio button
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user, how to navigate radiobuttons.
msg = _("Use arrow keys to change.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForRadioButton",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForMenu(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a menu.
Arguments:
- obj: the menu
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is a tip for the user, how to navigate menues.
mainMenuMsg = _("To navigate, press left or right arrow. " \
"To move through items press up or down arrow.")
# Translators: this is a tip for the user, how to
# navigate into sub menues.
subMenuMsg = _("To enter sub menu, press right arrow.")
# Checking if we are a submenu,
# we can't rely on our parent being just a menu.
if obj.parent.name != "" and obj.parent.__class__ == obj.__class__:
if (self.lastTutorial != [subMenuMsg]) or forceTutorial:
utterances.append(subMenuMsg)
else:
if (self.lastTutorial != [mainMenuMsg]) or forceTutorial:
utterances.append(mainMenuMsg)
self._debugGenerator("_getTutorialForMenu",
obj,
alreadyFocused,
utterances)
return utterances
def _getTutorialForSlider(self, obj, alreadyFocused, forceTutorial):
"""Get the tutorial string for a slider. If the object already has
focus, then no tutorial is given.
Arguments:
- obj: the slider
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken for the object.
"""
utterances = []
# Translators: this is the tutorial string for when landing
# on a slider.
msg = _("To decrease press left arrow, to increase press right arrow." \
" To go to minimum press home, and for maximum press end.")
if (not alreadyFocused and self.lastTutorial != [msg]) \
or forceTutorial:
utterances.append(msg)
self._debugGenerator("_getTutorialForSlider",
obj,
alreadyFocused,
utterances)
return utterances
def getTutorial(self, obj, alreadyFocused, forceTutorial=False):
"""Get the tutorial for an Accessible object. This will look
first to the specific tutorial generators and if this
does not exist then return the empty tutorial.
This method is the primary method
that external callers of this class should use.
Arguments:
- obj: the object
- alreadyFocused: False if object just received focus
- forceTutorial: used for when whereAmI really needs the tutorial string
Returns a list of utterances to be spoken.
"""
if not settings.enableTutorialMessages:
return []
utterances = []
role = obj.getRole()
if role in self.tutorialGenerators:
generator = self.tutorialGenerators[role]
else:
generator = self._getDefaultTutorial
msg = generator(obj, alreadyFocused, forceTutorial)
if msg:
utterances = [" ".join(msg)]
self.lastTutorial = msg
if forceTutorial:
self.lastTutorial = ""
self._debugGenerator("getTutorial",
obj,
alreadyFocused,
utterances)
return utterances
| ruibarreira/linuxtrail | usr/lib/python3/dist-packages/orca/tutorialgenerator.py | Python | gpl-3.0 | 30,742 | [
"ORCA"
] | 972024aef03bbcf07b3a5e76d30f6f1a4b7c7b37dc584ceac2d23f023e2d90df |
class Visitor(object):
def visit(target):
pass
class Iterator(object):
def __init__(self, root):
self.it = iter(list(root))
self.last_it = None
self.visitor = Visitor()
def __iter__(self):
return self
def next(self):
try:
item = next(self.it)
except StopIteration:
if self.last_it:
self.it = self.last_it
self.last_it = None
return self.next()
raise
try:
depth_it = Iterator(item)
self.last_it = self.it
self.it = depth_it
return self.next()
except:
return item
def test_Iterator():
struct = [1, 2, 3, [30, 31, 32, 33], 4, 5, [61,62], 13]
it = Iterator(struct)
while True:
try:
elem = next(it)
print "elem =", elem
except StopIteration:
break
test_Iterator()
class BreadthFirst(Iterator):
def __init__(self, root):
self.queue = []
it = iter(list(root))
self.enqueue(it)
def enqueue(self, item):
self.queue.append(item)
def dequeue(self):
return self.queue.pop(0)
def __iter__(self):
return self
def next(self):
it = self.dequeue()
item = next(it)
try:
it2 = iter(item)
self.enqueue(it2)
self.enqueue(it)
return self.next()
except:
pass
self.enqueue(it)
return item
def test_BreadthFirst():
#struct = [1, 2, [3, 4], [20, [300]], [50, [600], [80, [900]]]]
struct = [1, 2, [30, 40, 50], 5, 6, 7]
it = BreadthFirst(struct)
print ""
while True:
try:
elem = next(it)
print "elem =", elem
except StopIteration:
break
test_BreadthFirst()
class DepthFirst(object):
def __init__(self, root):
self.root = root
self.it = iter(list(root))
def next(self):
item = next(self.it)
try:
depth_it = iter(list(item))
except:
self.visit
def __iter__(self):
return self
| bhdz/Brace.Shell | implementation.1/modules/bracesh/base/text/iterate.py | Python | bsd-2-clause | 2,198 | [
"VisIt"
] | 95fa8c5f8be850772dbe5bf541a885bf3e721d6cf97cb87a6dd30a293d9252ba |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
PredictionServiceAsyncClient,
)
from google.cloud.aiplatform_v1beta1.services.prediction_service import (
PredictionServiceClient,
)
from google.cloud.aiplatform_v1beta1.services.prediction_service import transports
from google.cloud.aiplatform_v1beta1.services.prediction_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.aiplatform_v1beta1.types import explanation
from google.cloud.aiplatform_v1beta1.types import prediction_service
from google.oauth2 import service_account
from google.protobuf import struct_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert PredictionServiceClient._get_default_mtls_endpoint(None) is None
assert (
PredictionServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
PredictionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
PredictionServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
PredictionServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
PredictionServiceClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
)
def test_prediction_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.PredictionServiceGrpcTransport, "grpc"),
(transports.PredictionServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_prediction_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [PredictionServiceClient, PredictionServiceAsyncClient,]
)
def test_prediction_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_prediction_service_client_get_transport_class():
transport = PredictionServiceClient.get_transport_class()
available_transports = [
transports.PredictionServiceGrpcTransport,
]
assert transport in available_transports
transport = PredictionServiceClient.get_transport_class("grpc")
assert transport == transports.PredictionServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"),
(
PredictionServiceAsyncClient,
transports.PredictionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
PredictionServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PredictionServiceClient),
)
@mock.patch.object(
PredictionServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PredictionServiceAsyncClient),
)
def test_prediction_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(PredictionServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
PredictionServiceClient,
transports.PredictionServiceGrpcTransport,
"grpc",
"true",
),
(
PredictionServiceAsyncClient,
transports.PredictionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
PredictionServiceClient,
transports.PredictionServiceGrpcTransport,
"grpc",
"false",
),
(
PredictionServiceAsyncClient,
transports.PredictionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
PredictionServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PredictionServiceClient),
)
@mock.patch.object(
PredictionServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(PredictionServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_prediction_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"),
(
PredictionServiceAsyncClient,
transports.PredictionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_prediction_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(PredictionServiceClient, transports.PredictionServiceGrpcTransport, "grpc"),
(
PredictionServiceAsyncClient,
transports.PredictionServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_prediction_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_prediction_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = PredictionServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_predict(
transport: str = "grpc", request_type=prediction_service.PredictRequest
):
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse(
deployed_model_id="deployed_model_id_value",
)
response = client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, prediction_service.PredictResponse)
assert response.deployed_model_id == "deployed_model_id_value"
def test_predict_from_dict():
test_predict(request_type=dict)
def test_predict_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
client.predict()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == prediction_service.PredictRequest()
@pytest.mark.asyncio
async def test_predict_async(
transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest
):
client = PredictionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse(
deployed_model_id="deployed_model_id_value",
)
)
response = await client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, prediction_service.PredictResponse)
assert response.deployed_model_id == "deployed_model_id_value"
@pytest.mark.asyncio
async def test_predict_async_from_dict():
await test_predict_async(request_type=dict)
def test_predict_field_headers():
client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.PredictRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = prediction_service.PredictResponse()
client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_predict_field_headers_async():
client = PredictionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.PredictRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
)
await client.predict(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
def test_predict_flattened_error():
client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.predict(
prediction_service.PredictRequest(),
endpoint="endpoint_value",
instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
)
@pytest.mark.asyncio
async def test_predict_flattened_error_async():
client = PredictionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.predict(
prediction_service.PredictRequest(),
endpoint="endpoint_value",
instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
)
def test_explain(
transport: str = "grpc", request_type=prediction_service.ExplainRequest
):
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.explain), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.ExplainResponse(
deployed_model_id="deployed_model_id_value",
)
response = client.explain(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == prediction_service.ExplainRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, prediction_service.ExplainResponse)
assert response.deployed_model_id == "deployed_model_id_value"
def test_explain_from_dict():
test_explain(request_type=dict)
def test_explain_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.explain), "__call__") as call:
client.explain()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == prediction_service.ExplainRequest()
@pytest.mark.asyncio
async def test_explain_async(
transport: str = "grpc_asyncio", request_type=prediction_service.ExplainRequest
):
client = PredictionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.explain), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.ExplainResponse(
deployed_model_id="deployed_model_id_value",
)
)
response = await client.explain(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == prediction_service.ExplainRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, prediction_service.ExplainResponse)
assert response.deployed_model_id == "deployed_model_id_value"
@pytest.mark.asyncio
async def test_explain_async_from_dict():
await test_explain_async(request_type=dict)
def test_explain_field_headers():
client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.ExplainRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.explain), "__call__") as call:
call.return_value = prediction_service.ExplainResponse()
client.explain(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_explain_field_headers_async():
client = PredictionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = prediction_service.ExplainRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.explain), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.ExplainResponse()
)
await client.explain(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
def test_explain_flattened_error():
client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.explain(
prediction_service.ExplainRequest(),
endpoint="endpoint_value",
instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
deployed_model_id="deployed_model_id_value",
)
@pytest.mark.asyncio
async def test_explain_flattened_error_async():
client = PredictionServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.explain(
prediction_service.ExplainRequest(),
endpoint="endpoint_value",
instances=[struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE)],
parameters=struct_pb2.Value(null_value=struct_pb2.NullValue.NULL_VALUE),
deployed_model_id="deployed_model_id_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.PredictionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = PredictionServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.PredictionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = PredictionServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.PredictionServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.PredictionServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.PredictionServiceGrpcTransport,
transports.PredictionServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = PredictionServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.PredictionServiceGrpcTransport,)
def test_prediction_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.PredictionServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_prediction_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.PredictionServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"predict",
"explain",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
@requires_google_auth_gte_1_25_0
def test_prediction_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PredictionServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_prediction_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PredictionServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_prediction_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1beta1.services.prediction_service.transports.PredictionServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.PredictionServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_prediction_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
PredictionServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_prediction_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
PredictionServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.PredictionServiceGrpcTransport,
transports.PredictionServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_prediction_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.PredictionServiceGrpcTransport,
transports.PredictionServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_prediction_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.PredictionServiceGrpcTransport, grpc_helpers),
(transports.PredictionServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_prediction_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.PredictionServiceGrpcTransport,
transports.PredictionServiceGrpcAsyncIOTransport,
],
)
def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_prediction_service_host_no_port():
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_prediction_service_host_with_port():
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_prediction_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PredictionServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_prediction_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.PredictionServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.PredictionServiceGrpcTransport,
transports.PredictionServiceGrpcAsyncIOTransport,
],
)
def test_prediction_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.PredictionServiceGrpcTransport,
transports.PredictionServiceGrpcAsyncIOTransport,
],
)
def test_prediction_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_endpoint_path():
project = "squid"
location = "clam"
endpoint = "whelk"
expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
project=project, location=location, endpoint=endpoint,
)
actual = PredictionServiceClient.endpoint_path(project, location, endpoint)
assert expected == actual
def test_parse_endpoint_path():
expected = {
"project": "octopus",
"location": "oyster",
"endpoint": "nudibranch",
}
path = PredictionServiceClient.endpoint_path(**expected)
# Check that the path construction is reversible.
actual = PredictionServiceClient.parse_endpoint_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = PredictionServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = PredictionServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = PredictionServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder,)
actual = PredictionServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = PredictionServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = PredictionServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization,)
actual = PredictionServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = PredictionServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = PredictionServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project,)
actual = PredictionServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = PredictionServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = PredictionServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = PredictionServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = PredictionServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = PredictionServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.PredictionServiceTransport, "_prep_wrapped_messages"
) as prep:
client = PredictionServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.PredictionServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = PredictionServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
| sasha-gitg/python-aiplatform | tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py | Python | apache-2.0 | 54,222 | [
"Octopus"
] | aab6b3480579285e358ca7af8bdf758459fd2bae38b3bea6c8226804e241c476 |
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for BLAST+ plain text output formats.
At the moment this is a wrapper around Biopython's NCBIStandalone text
parser (which is now deprecated).
"""
from Bio.Alphabet import generic_dna, generic_protein
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
import warnings
from Bio import BiopythonDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter('ignore', BiopythonDeprecationWarning)
from Bio.Blast import NCBIStandalone
__all__ = ['BlastTextParser']
__docformat__ = "restructuredtext en"
class BlastTextParser(object):
"""Parser for the BLAST text format."""
def __init__(self, handle):
self.handle = handle
blast_parser = NCBIStandalone.BlastParser()
self.blast_iter = NCBIStandalone.Iterator(handle, blast_parser)
def __iter__(self):
for rec in self.blast_iter:
# set attributes to SearchIO's
# get id and desc
if rec.query.startswith('>'):
rec.query = rec.query[1:]
try:
qid, qdesc = rec.query.split(' ', 1)
except ValueError:
qid, qdesc = rec.query, ''
qdesc = qdesc.replace('\n', '').replace('\r', '')
qresult = QueryResult(id=qid)
qresult.program = rec.application.lower()
qresult.target = rec.database
qresult.seq_len = rec.query_letters
qresult.version = rec.version
# determine alphabet based on program
if qresult.program == 'blastn':
alphabet = generic_dna
elif qresult.program in ['blastp', 'blastx', 'tblastn', 'tblastx']:
alphabet = generic_protein
# iterate over the 'alignments' (hits) and the hit table
for idx, aln in enumerate(rec.alignments):
# get id and desc
if aln.title.startswith('> '):
aln.title = aln.title[2:]
elif aln.title.startswith('>'):
aln.title = aln.title[1:]
try:
hid, hdesc = aln.title.split(' ', 1)
except ValueError:
hid, hdesc = aln.title, ''
hdesc = hdesc.replace('\n', '').replace('\r', '')
# iterate over the hsps and group them in a list
hsp_list = []
for bhsp in aln.hsps:
frag = HSPFragment(hid, qid)
frag.alphabet = alphabet
# set alignment length
frag.aln_span = bhsp.identities[1]
# set frames
try:
frag.query_frame = int(bhsp.frame[0])
except IndexError:
if qresult.program in ('blastp', 'tblastn'):
frag.query_frame = 0
else:
frag.query_frame = 1
try:
frag.hit_frame = int(bhsp.frame[1])
except IndexError:
if qresult.program in ('blastp', 'tblastn'):
frag.hit_frame = 0
else:
frag.hit_frame = 1
# set query coordinates
frag.query_start = min(bhsp.query_start,
bhsp.query_end) - 1
frag.query_end = max(bhsp.query_start, bhsp.query_end)
# set hit coordinates
frag.hit_start = min(bhsp.sbjct_start,
bhsp.sbjct_end) - 1
frag.hit_end = max(bhsp.sbjct_start, bhsp.sbjct_end)
# set query, hit sequences and its annotation
qseq = ''
hseq = ''
midline = ''
for seqtrio in zip(bhsp.query, bhsp.sbjct, bhsp.match):
qchar, hchar, mchar = seqtrio
if qchar == ' ' or hchar == ' ':
assert all(' ' == x for x in seqtrio)
else:
qseq += qchar
hseq += hchar
midline += mchar
frag.query, frag.hit = qseq, hseq
frag.aln_annotation['similarity'] = midline
# create HSP object with the fragment
hsp = HSP([frag])
hsp.evalue = bhsp.expect
hsp.bitscore = bhsp.bits
hsp.bitscore_raw = bhsp.score
# set gap
try:
hsp.gap_num = bhsp.gaps[0]
except IndexError:
hsp.gap_num = 0
# set identity
hsp.ident_num = bhsp.identities[0]
hsp.pos_num = bhsp.positives[0]
if hsp.pos_num is None:
hsp.pos_num = hsp[0].aln_span
hsp_list.append(hsp)
hit = Hit(hsp_list)
hit.seq_len = aln.length
hit.description = hdesc
qresult.append(hit)
qresult.description = qdesc
yield qresult
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
| updownlife/multipleK | dependencies/biopython-1.65/build/lib.linux-x86_64-2.7/Bio/SearchIO/BlastIO/blast_text.py | Python | gpl-2.0 | 5,732 | [
"BLAST",
"Biopython"
] | ae82d2630cd62da81ea16941da5e4cf47e7dee1369c1e1c61af6016e04478f1c |
#!/usr/bin/env python
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) 2008-2016 NIWA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Cylc scheduler server."""
from copy import copy, deepcopy
import logging
import os
from pipes import quote
from Queue import Empty
from shutil import copy as copyfile, copytree, rmtree
import signal
from subprocess import call, Popen, PIPE
import sys
from tempfile import mkstemp
import threading
import time
import traceback
import isodatetime.data
import isodatetime.parsers
from parsec.util import printcfg
from cylc.broadcast_report import (
CHANGE_FMT as BROADCAST_LOAD_FMT,
CHANGE_PREFIX_SET as BROADCAST_LOAD_PREFIX)
from cylc.cfgspec.globalcfg import GLOBAL_CFG
from cylc.config import SuiteConfig, TaskNotDefinedError
from cylc.cycling import PointParsingError
from cylc.cycling.loader import get_point, standardise_point_string
from cylc.daemonize import daemonize, SUITE_SCAN_INFO_TMPL
from cylc.exceptions import CylcError
import cylc.flags
from cylc.get_task_proxy import get_task_proxy
from cylc.job_file import JobFile
from cylc.job_host import RemoteJobHostManager, RemoteJobHostInitError
from cylc.log_diagnosis import LogSpec
from cylc.mp_pool import SuiteProcContext, SuiteProcPool
from cylc.network import (
PYRO_SUITEID_OBJ_NAME, PYRO_STATE_OBJ_NAME,
PYRO_CMD_OBJ_NAME, PYRO_BCAST_OBJ_NAME, PYRO_EXT_TRIG_OBJ_NAME,
PYRO_INFO_OBJ_NAME, PYRO_LOG_OBJ_NAME)
from cylc.network.ext_trigger import ExtTriggerServer
from cylc.network.pyro_daemon import PyroDaemon
from cylc.network.suite_broadcast import BroadcastServer
from cylc.network.suite_command import SuiteCommandServer
from cylc.network.suite_identifier import SuiteIdServer
from cylc.network.suite_info import SuiteInfoServer
from cylc.network.suite_log import SuiteLogServer
from cylc.network.suite_state import StateSummaryServer
from cylc.owner import USER
from cylc.registration import RegistrationDB
from cylc.regpath import RegPath
from cylc.rundb import CylcSuiteDAO
from cylc.suite_env import CylcSuiteEnv
from cylc.suite_host import get_suite_host
from cylc.suite_logging import suite_log
from cylc.task_id import TaskID
from cylc.task_pool import TaskPool
from cylc.task_proxy import TaskProxy, TaskProxySequenceBoundsError
from cylc.task_state import (
TASK_STATUS_HELD, TASK_STATUS_WAITING,
TASK_STATUS_QUEUED, TASK_STATUS_READY, TASK_STATUS_SUBMITTED,
TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_SUBMIT_RETRYING,
TASK_STATUS_RUNNING, TASK_STATUS_SUCCEEDED, TASK_STATUS_FAILED,
TASK_STATUS_RETRYING)
from cylc.version import CYLC_VERSION
from cylc.wallclock import (
get_current_time_string, get_seconds_as_interval_string)
class SchedulerError(CylcError):
"""Scheduler error."""
pass
class SchedulerStop(CylcError):
"""Scheduler has stopped."""
pass
class PyroRequestHandler(threading.Thread):
"""Pyro request handler."""
def __init__(self, pyro):
threading.Thread.__init__(self)
self.pyro = pyro
self.quit = False
self.log = logging.getLogger('main')
self.log.debug("request handling thread starting")
def run(self):
while True:
self.pyro.handle_requests(timeout=1)
if self.quit:
break
self.log.debug("request handling thread exiting")
class Scheduler(object):
"""Cylc scheduler server."""
EVENT_STARTUP = 'startup'
EVENT_SHUTDOWN = 'shutdown'
EVENT_TIMEOUT = 'timeout'
EVENT_INACTIVITY_TIMEOUT = 'inactivity'
EVENT_STALLED = 'stalled'
SUITE_EVENT_HANDLER = 'suite-event-handler'
SUITE_EVENT_MAIL = 'suite-event-mail'
FS_CHECK_PERIOD = 600.0 # 600 seconds
# Dependency negotation etc. will run after these commands
PROC_CMDS = (
'release_suite',
'release_task',
'kill_tasks',
'set_runahead',
'reset_task_state',
'spawn_tasks',
'trigger_task',
'nudge',
'insert_task',
'reload_suite')
def __init__(self, is_restart, options, args):
self.options = options
self.suite = args[0]
self.suiterc = RegistrationDB(self.options.db).get_suiterc(self.suite)
self.suite_dir = os.path.dirname(self.suiterc)
# For user-defined job submission methods:
sys.path.append(os.path.join(self.suite_dir, 'python'))
self.config = None
self.is_restart = is_restart
self._cli_initial_point_string = None
self._cli_start_point_string = None
start_point_str = None
if len(args) > 1:
start_point_str = args[1]
if getattr(self.options, 'warm', None):
self._cli_start_point_string = start_point_str
else:
self._cli_initial_point_string = start_point_str
self.run_mode = self.options.run_mode
# For persistence of reference test settings across reloads:
self.reference_test_mode = self.options.reftest
self.gen_reference_log = self.options.genref
self.owner = USER
self.host = get_suite_host()
self.port = None
self.port_file = None
self.is_stalled = False
self.stalled_last = False
self.graph_warned = {}
self.suite_env = {}
self.suite_task_env = {}
self.suite_env_dumper = None
self.do_process_tasks = False
self.do_update_state_summary = True
# initialize some items in case of early shutdown
# (required in the shutdown() method)
self.suite_state = None
self.command_queue = None
self.pool = None
self.request_handler = None
self.pyro = None
self._profile_amounts = {}
self._profile_update_times = {}
self.shut_down_cleanly = False
self.shut_down_now = False
# TODO - stop task should be held by the task pool.
self.stop_task = None
self.stop_point = None
self.stop_clock_time = None # When not None, in Unix time
self.stop_clock_time_string = None # Human-readable format.
self.initial_point = None
self.start_point = None
self.final_point = None
self.pool_hold_point = None
self.hold_suite_now = False
self.suite_timer_timeout = 0.0
self.suite_timer_active = False
self.next_kill_issue = None
self.already_timed_out = False
self.kill_on_shutdown = False
self.pri_dao = None
self.pub_dao = None
self.suite_log = None
self.log = None
# FIXME: can this be a local variable?
self.old_user_at_host_set = set()
self.ref_test_allowed_failures = []
def start(self):
"""Start the server."""
self._check_port_file_does_not_exist(self.suite)
self._print_blurb()
GLOBAL_CFG.create_cylc_run_tree(self.suite)
if self.is_restart:
run_dir = GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite run directory')
pri_db_path = os.path.join(
run_dir, CylcSuiteDAO.PRI_DB_FILE_BASE_NAME)
# Backward compat, upgrade database with state file if necessary
old_pri_db_path = os.path.join(
run_dir, 'state', CylcSuiteDAO.DB_FILE_BASE_NAME)
old_state_file_path = os.path.join(run_dir, "state", "state")
if (os.path.exists(old_pri_db_path) and
os.path.exists(old_state_file_path) and
not os.path.exists(pri_db_path)):
copyfile(old_pri_db_path, pri_db_path)
pri_dao = CylcSuiteDAO(pri_db_path)
pri_dao.upgrade_with_state_file(old_state_file_path)
target = os.path.join(run_dir, "state.tar.gz")
cmd = ["tar", "-C", run_dir, "-czf", target, "state"]
if call(cmd) == 0:
rmtree(os.path.join(run_dir, "state"), ignore_errors=True)
else:
try:
os.unlink(os.path.join(run_dir, "state.tar.gz"))
except OSError:
pass
print >> sys.stderr, (
"ERROR: cannot tar-gzip + remove old state/ directory")
else:
pri_dao = CylcSuiteDAO(pri_db_path)
# Vacuum the primary/private database file
sys.stdout.write("Vacuuming the suite db ...")
pri_dao.vacuum()
sys.stdout.write(" done\n")
pri_dao.close()
try:
self._configure_pyro()
if not self.options.no_detach and not cylc.flags.debug:
daemonize(self)
self.configure()
if self.options.profile_mode:
import cProfile
import pstats
prof = cProfile.Profile()
prof.enable()
self.run()
except SchedulerStop as exc:
# deliberate stop
print str(exc)
self.shutdown()
except SchedulerError as exc:
print >> sys.stderr, str(exc)
self.shutdown()
sys.exit(1)
except KeyboardInterrupt as exc:
try:
self.shutdown(str(exc))
except Exception as exc1:
# In case of exceptions in the shutdown method itself.
traceback.print_exc(exc1)
sys.exit(1)
except Exception as exc:
traceback.print_exc()
print >> sys.stderr, "ERROR CAUGHT: cleaning up before exit"
try:
self.shutdown('ERROR: ' + str(exc))
except Exception, exc1:
# In case of exceptions in the shutdown method itself
traceback.print_exc(exc1)
if cylc.flags.debug:
raise
else:
sys.exit(1)
else:
# main loop ends (not used?)
self.shutdown()
if self.options.profile_mode:
prof.disable()
import StringIO
string_stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=string_stream)
stats.sort_stats('cumulative')
stats.print_stats()
print string_stream.getvalue()
print
@staticmethod
def _check_port_file_does_not_exist(suite):
"""Fail if port file exists. Return port file path otherwise."""
port_file_path = os.path.join(
GLOBAL_CFG.get(['pyro', 'ports directory']), suite)
try:
port, host = open(port_file_path).read().splitlines()
except (IOError, ValueError):
# Suite is not likely to be running if port file does not exist
# or if port file does not contain good values of port and host.
return port_file_path
else:
sys.stderr.write(
(
r"""ERROR: port file exists: %(port_file_path)s
If %(suite)s is not running, delete the port file and try again. If it is
running but not responsive, kill any left over suite processes too.""" +
SUITE_SCAN_INFO_TMPL
) % {
"host": host,
"port": port,
"port_file_path": port_file_path,
"suite": suite,
}
)
raise SchedulerError(
"ERROR, port file exists: %s" % port_file_path)
@staticmethod
def _print_blurb():
"""Print copyright and license information."""
logo = (
" ,_, \n"
" | | \n"
",_____,_, ,_| |_____, \n"
"| ,___| | | | | ,___| \n"
"| |___| |_| | | |___, \n"
"\_____\___, |_\_____| \n"
" ,___| | \n"
" \_____| \n"
)
cylc_license = """
The Cylc Suite Engine [%s]
Copyright (C) 2008-2016 NIWA
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
This program comes with ABSOLUTELY NO WARRANTY;
see `cylc warranty`. It is free software, you
are welcome to redistribute it under certain
conditions; see `cylc conditions`.
""" % CYLC_VERSION
logo_lines = logo.splitlines()
license_lines = cylc_license.splitlines()
lmax = max(len(line) for line in license_lines)
for i in range(len(logo_lines)):
print logo_lines[i], ('{0: ^%s}' % lmax).format(license_lines[i])
print
def _configure_pyro(self):
"""Create and configure Pyro daemon."""
self.pyro = PyroDaemon(self.suite, self.suite_dir)
self.port = self.pyro.get_port()
port_file_path = self._check_port_file_does_not_exist(self.suite)
try:
with open(port_file_path, 'w') as handle:
handle.write("%d\n%s\n" % (self.port, self.host))
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
raise SchedulerError(
'ERROR, cannot write port file: %s' % port_file_path)
else:
self.port_file = port_file_path
def configure(self):
"""Configure suite daemon."""
self.log_memory("scheduler.py: start configure")
SuiteProcPool.get_inst()
self.log_memory("scheduler.py: before configure_suite")
self.configure_suite()
self.log_memory("scheduler.py: after configure_suite")
reqmode = self.config.cfg['cylc']['required run mode']
if reqmode:
if reqmode != self.run_mode:
raise SchedulerError(
'ERROR: this suite requires the %s run mode' % reqmode)
if self.gen_reference_log or self.reference_test_mode:
self.configure_reftest()
self.log.info('Suite starting on %s:%s' % (self.host, self.port))
# Note that the following lines must be present at the top of
# the suite log file for use in reference test runs:
self.log.info('Run mode: ' + self.run_mode)
self.log.info('Initial point: ' + str(self.initial_point))
if self.start_point != self.initial_point:
self.log.info('Start point: ' + str(self.start_point))
self.log.info('Final point: ' + str(self.final_point))
self.pool = TaskPool(
self.suite, self.pri_dao, self.pub_dao, self.final_point,
self.pyro, self.log, self.run_mode)
self.request_handler = PyroRequestHandler(self.pyro)
self.request_handler.start()
self.log_memory("scheduler.py: before load_tasks")
if self.is_restart:
self.load_tasks_for_restart()
else:
self.load_tasks_for_run()
self.log_memory("scheduler.py: after load_tasks")
self.pool.put_rundb_suite_params(self.initial_point, self.final_point)
self.configure_suite_environment()
# Write suite contact environment file
suite_run_dir = GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite run directory')
self.suite_env_dumper.dump(suite_run_dir)
# Copy local python modules from source to run directory
for sub_dir in ["python", os.path.join("lib", "python")]:
# TODO - eventually drop the deprecated "python" sub-dir.
suite_py = os.path.join(self.suite_dir, sub_dir)
if (os.path.realpath(self.suite_dir) !=
os.path.realpath(suite_run_dir) and
os.path.isdir(suite_py)):
suite_run_py = os.path.join(suite_run_dir, sub_dir)
try:
rmtree(suite_run_py)
except OSError:
pass
copytree(suite_py, suite_run_py)
# 2) restart only: copy to other accounts with still-running tasks
for user_at_host in self.old_user_at_host_set:
try:
RemoteJobHostManager.get_inst().init_suite_run_dir(
self.suite, user_at_host)
except RemoteJobHostInitError as exc:
self.log.error(str(exc))
self.old_user_at_host_set.clear()
self.already_timed_out = False
if self._get_events_conf(self.EVENT_TIMEOUT):
self.set_suite_timer()
# self.nudge_timer_start = None
# self.nudge_timer_on = False
# self.auto_nudge_interval = 5 # seconds
self.already_inactive = False
if self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT):
self.set_suite_inactivity_timer()
self.log_memory("scheduler.py: end configure")
def load_tasks_for_run(self):
"""Load tasks for a new run."""
if self.start_point is not None:
if self.options.warm:
self.log.info('Warm Start %s' % self.start_point)
else:
self.log.info('Cold Start %s' % self.start_point)
task_list = self.filter_initial_task_list(
self.config.get_task_name_list())
coldstart_tasks = self.config.get_coldstart_task_list()
for name in task_list:
if self.start_point is None:
# No start cycle point at which to load cycling tasks.
continue
try:
itask = get_task_proxy(
name, self.start_point, is_startup=True,
message_queue=self.pool.message_queue)
except TaskProxySequenceBoundsError as exc:
self.log.debug(str(exc))
continue
if name in coldstart_tasks and self.options.warm:
itask.state.set_state(TASK_STATUS_SUCCEEDED)
itask.state.set_prerequisites_all_satisfied()
itask.state.outputs.set_all_completed()
# Load task.
self.pool.add_to_runahead_pool(itask)
def load_tasks_for_restart(self):
"""Load tasks for restart."""
self.pri_dao.select_suite_params(self._load_suite_params)
self.pri_dao.select_broadcast_states(self._load_broadcast_states)
self.pri_dao.select_task_pool_for_restart(self._load_task_pool)
self.pool.poll_task_jobs()
def _load_broadcast_states(self, row_idx, row):
"""Load a setting in the previous broadcast states."""
if row_idx == 0:
print "LOADING broadcast states"
point, namespace, key, value = row
BroadcastServer.get_inst().load_state(point, namespace, key, value)
print BROADCAST_LOAD_FMT.strip() % {
"change": BROADCAST_LOAD_PREFIX,
"point": point,
"namespace": namespace,
"key": key,
"value": value}
def _load_suite_params(self, row_idx, row):
"""Load previous initial/final cycle point."""
if row_idx == 0:
print "LOADING suite parameters"
key, value = row
for key_str, self_attr, option_ignore_attr in [
("initial", "start_point", "ignore_start_point"),
("final", "stop_point", "ignore_stop_point")]:
if key != key_str + "_point" or value is None:
continue
# the suite_params table prescribes a start/stop cycle
# (else we take whatever the suite.rc file gives us)
point = get_point(value)
my_point = getattr(self, self_attr)
if getattr(self.options, option_ignore_attr):
# ignore it and take whatever the suite.rc file gives us
if my_point is not None:
print >> sys.stderr, (
"WARNING: I'm ignoring the old " + key_str +
" cycle point as requested,\n"
"but I can't ignore the one set"
" on the command line or in the suite definition.")
elif my_point is not None:
# Given in the suite.rc file
if my_point != point:
print >> sys.stderr, (
"WARNING: old %s cycle point " +
"%s, overriding suite.rc %s"
) % (key_str, point, my_point)
setattr(self, self_attr, point)
else:
# reinstate from old
setattr(self, self_attr, point)
print "+ %s cycle point = %s" % (key_str, value)
def _load_task_pool(self, row_idx, row):
"""Load a task from previous task pool.
The state of task prerequisites (satisfied or not) and outputs
(completed or not) is determined by the recorded TASK_STATUS:
TASK_STATUS_WAITING - prerequisites and outputs unsatisified
TASK_STATUS_HELD - ditto (only waiting tasks can be held)
TASK_STATUS_QUEUED - prereqs satisfied, outputs not completed
(only tasks ready to run can get queued)
TASK_STATUS_READY - ditto
TASK_STATUS_SUBMITTED - ditto (but see *)
TASK_STATUS_SUBMIT_RETRYING - ditto
TASK_STATUS_RUNNING - ditto (but see *)
TASK_STATUS_FAILED - ditto (tasks must run in order to fail)
TASK_STATUS_RETRYING - ditto (tasks must fail in order to retry)
TASK_STATUS_SUCCEEDED - prerequisites satisfied, outputs completed
(*) tasks reloaded with TASK_STATUS_SUBMITTED or TASK_STATUS_RUNNING
are polled to determine what their true status is.
"""
if row_idx == 0:
print "LOADING task proxies"
cycle, name, spawned, status, submit_num, try_num, user_at_host = row
try:
itask = get_task_proxy(
name,
get_point(cycle),
status,
bool(spawned),
submit_num=submit_num,
is_reload_or_restart=True,
message_queue=self.pool.message_queue)
except TaskNotDefinedError as exc:
if cylc.flags.debug:
traceback.print_exc()
else:
print >> sys.stderr, str(exc)
print >> sys.stderr, (
"WARNING: ignoring task %s " % name +
"from the suite run database file")
print >> sys.stderr, (
"(the task definition has probably been "
"deleted from the suite).")
except Exception:
traceback.print_exc()
print >> sys.stderr, (
"ERROR: could not load task %s " % name)
else:
if status == TASK_STATUS_HELD:
# Only waiting tasks get held. These need to be released
# on restart to avoid the automatic shutdown criterion:
# if all tasks are succeeded or held (e.g. because they
# passed the final cycle point) shut down automatically.
itask.state.set_state(TASK_STATUS_WAITING)
elif status in (TASK_STATUS_SUBMITTED, TASK_STATUS_RUNNING):
itask.state.set_prerequisites_all_satisfied()
# update the task proxy with submit ID etc.
itask.try_number = try_num
itask.user_at_host = user_at_host
self.old_user_at_host_set.add(itask.user_at_host)
if itask.user_at_host is None:
itask.user_at_host = "localhost"
# update timers in case regular polling is configured for itask
if '@' in itask.user_at_host:
host = itask.user_at_host.split('@', 1)[1]
else:
host = itask.user_at_host
itask.submission_poll_timer.set_host(host, set_timer=True)
itask.execution_poll_timer.set_host(host, set_timer=True)
elif status in (TASK_STATUS_SUBMIT_FAILED, TASK_STATUS_FAILED):
itask.state.set_prerequisites_all_satisfied()
elif status in (
TASK_STATUS_QUEUED,
TASK_STATUS_READY,
TASK_STATUS_SUBMIT_RETRYING,
TASK_STATUS_RETRYING):
itask.state.set_prerequisites_all_satisfied()
# reset to waiting as these had not been submitted yet.
itask.state.set_state('waiting')
elif itask.state.status == TASK_STATUS_SUCCEEDED:
itask.state.set_prerequisites_all_satisfied()
# TODO - just poll for outputs in the job status file.
itask.state.outputs.set_all_completed()
itask.summary['job_hosts'][int(submit_num)] = user_at_host
print "+ %s.%s %s" % (name, cycle, status)
self.pool.add_to_runahead_pool(itask)
def process_command_queue(self):
"""Process queued commands."""
queue = self.command_queue.get_queue()
qsize = queue.qsize()
if qsize > 0:
print 'Processing ' + str(qsize) + ' queued command(s)'
else:
return
while True:
try:
name, args = queue.get(False)
except Empty:
break
print ' +', name
cmdstr = name + '(' + ','.join([str(a) for a in args]) + ')'
try:
n_warnings = getattr(self, "command_%s" % name)(*args)
except SchedulerStop:
self.log.info('Command succeeded: ' + cmdstr)
raise
except Exception as exc:
# Don't let a bad command bring the suite down.
self.log.warning(traceback.format_exc())
self.log.warning(str(exc))
self.log.warning('Command failed: ' + cmdstr)
else:
if n_warnings:
self.log.info(
'Command succeeded with %s warning(s): %s' %
(n_warnings, cmdstr))
else:
self.log.info('Command succeeded: ' + cmdstr)
self.do_update_state_summary = True
if name in self.PROC_CMDS:
self.do_process_tasks = True
queue.task_done()
def _task_type_exists(self, name_or_id):
"""Does a task name or id match a known task type in this suite?"""
name = name_or_id
if TaskID.is_valid_id(name_or_id):
name = TaskID.split(name_or_id)[0]
return name in self.config.get_task_name_list()
def info_ping_suite(self):
"""Return True to indicate that the suite is alive!"""
return True
def info_get_cylc_version(self):
"""Return the cylc version running this suite daemon."""
return CYLC_VERSION
def get_standardised_point_string(self, point_string):
"""Return a standardised point string.
Used to process incoming command arguments.
"""
try:
point_string = standardise_point_string(point_string)
except PointParsingError as exc:
# (This is only needed to raise a clearer error message).
raise ValueError(
"Invalid cycle point: %s (%s)" % (point_string, exc))
return point_string
def get_standardised_point(self, point_string):
"""Return a standardised point."""
return get_point(self.get_standardised_point_string(point_string))
def get_standardised_taskid(self, task_id):
"""Return task ID with standardised cycle point."""
name, point_string = TaskID.split(task_id)
return TaskID.get(
name, self.get_standardised_point_string(point_string))
def info_ping_task(self, task_id, exists_only=False):
"""Return True if task exists and running."""
task_id = self.get_standardised_taskid(task_id)
return self.pool.ping_task(task_id, exists_only)
def info_get_task_jobfile_path(self, task_id):
"""Return task job file path."""
task_id = self.get_standardised_taskid(task_id)
return self.pool.get_task_jobfile_path(task_id)
def info_get_suite_info(self):
"""Return a dict containing the suite title and description."""
return {'title': self.config.cfg['title'],
'description': self.config.cfg['description']}
def info_get_task_info(self, name):
"""Return info of a task."""
try:
return self.config.describe(name)
except KeyError:
return {}
def info_get_all_families(self, exclude_root=False):
"""Return info of all families."""
fams = self.config.get_first_parent_descendants().keys()
if exclude_root:
return fams[:-1]
else:
return fams
def info_get_triggering_families(self):
"""Return info of triggering families."""
return self.config.triggering_families
def info_get_first_parent_descendants(self):
"""Families for single-inheritance hierarchy based on first parents"""
return deepcopy(self.config.get_first_parent_descendants())
def info_get_first_parent_ancestors(self, pruned=False):
"""Single-inheritance hierarchy based on first parents"""
return deepcopy(self.config.get_first_parent_ancestors(pruned))
def info_get_graph_raw(self, cto, ctn, group_nodes, ungroup_nodes,
ungroup_recursive, group_all, ungroup_all):
"""Return raw graph."""
rgraph = self.config.get_graph_raw(
cto, ctn, group_nodes, ungroup_nodes, ungroup_recursive, group_all,
ungroup_all)
return (
rgraph, self.config.suite_polling_tasks, self.config.leaves,
self.config.feet)
def info_get_task_requisites(self, name, point_string):
"""Return prerequisites of a task."""
return self.pool.get_task_requisites(
TaskID.get(name, self.get_standardised_point_string(point_string)))
def command_set_stop_cleanly(self, kill_active_tasks=False):
"""Stop job submission and set the flag for clean shutdown."""
SuiteProcPool.get_inst().stop_job_submission()
TaskProxy.stop_sim_mode_job_submission = True
self.shut_down_cleanly = True
self.kill_on_shutdown = kill_active_tasks
self.next_kill_issue = time.time()
def command_stop_now(self):
"""Shutdown immediately."""
proc_pool = SuiteProcPool.get_inst()
proc_pool.stop_job_submission()
TaskProxy.stop_sim_mode_job_submission = True
proc_pool.terminate()
raise SchedulerStop("Stopping NOW")
def command_set_stop_after_point(self, point_string):
"""Set stop after ... point."""
self.set_stop_point(self.get_standardised_point_string(point_string))
def command_set_stop_after_clock_time(self, arg):
"""Set stop after clock time.
format: ISO 8601 compatible or YYYY/MM/DD-HH:mm (backwards comp.)
"""
parser = isodatetime.parsers.TimePointParser()
try:
stop_point = parser.parse(arg)
except ValueError as exc:
try:
stop_point = parser.strptime(arg, "%Y/%m/%d-%H:%M")
except ValueError:
raise exc # Raise the first (prob. more relevant) ValueError.
stop_time_in_epoch_seconds = int(stop_point.get(
"seconds_since_unix_epoch"))
self.set_stop_clock(stop_time_in_epoch_seconds, str(stop_point))
def command_set_stop_after_task(self, task_id):
"""Set stop after a task."""
task_id = self.get_standardised_taskid(task_id)
if TaskID.is_valid_id(task_id):
self.set_stop_task(task_id)
def command_release_task(self, items, compat=None, _=None):
"""Release tasks."""
return self.pool.release_tasks(items, compat)
def command_poll_tasks(self, items, compat=None, _=None):
"""Poll all tasks or a task/family if options are provided."""
return self.pool.poll_task_jobs(items, compat)
def command_kill_tasks(self, items, compat=None, _=False):
"""Kill all tasks or a task/family if options are provided."""
return self.pool.kill_task_jobs(items, compat)
def command_release_suite(self):
"""Release all task proxies in the suite."""
self.release_suite()
def command_hold_task(self, items, compat=None, _=False):
"""Hold selected task proxies in the suite."""
return self.pool.hold_tasks(items, compat)
def command_hold_suite(self):
"""Hold all task proxies in the suite."""
self.hold_suite()
def command_hold_after_point_string(self, point_string):
"""Hold tasks AFTER this point (itask.point > point)."""
point = self.get_standardised_point(point_string)
self.hold_suite(point)
self.log.info(
"The suite will pause when all tasks have passed %s" % point)
def command_set_verbosity(self, lvl):
"""Remove suite verbosity."""
self.log.setLevel(lvl)
cylc.flags.debug = (lvl == logging.DEBUG)
return True, 'OK'
def command_remove_cycle(self, point_string, spawn=False):
"""Remove tasks in a cycle."""
return self.pool.remove_tasks(point_string + "/*", spawn)
def command_remove_task(self, items, compat=None, _=None, spawn=False):
"""Remove tasks."""
return self.pool.remove_tasks(items, spawn, compat)
def command_insert_task(
self, items, compat=None, _=None, stop_point_string=None):
"""Insert tasks."""
return self.pool.insert_tasks(items, stop_point_string, compat)
def command_nudge(self):
"""Cause the task processing loop to be invoked"""
pass
def command_reload_suite(self):
"""Reload suite configuration."""
self.log.info("Reloading the suite definition.")
old_tasks = set(self.config.get_task_name_list())
self.configure_suite(reconfigure=True)
self.pool.reconfigure(self.final_point)
# Log tasks that have been added by the reload, removed tasks are
# logged by the TaskPool.
add = set(self.config.get_task_name_list()) - old_tasks
for task in add:
logging.getLogger("main").log(
logging.WARNING, "Added task: '%s'" % (task,))
self.configure_suite_environment()
if self.gen_reference_log or self.reference_test_mode:
self.configure_reftest(recon=True)
self.pool.put_rundb_suite_params(self.initial_point, self.final_point)
self.do_update_state_summary = True
def command_set_runahead(self, *args):
"""Set runahead limit."""
self.pool.set_runahead(*args)
def set_suite_timer(self):
"""Set suite's timeout timer."""
self.suite_timer_timeout = time.time() + (
self._get_events_conf(self.EVENT_TIMEOUT)
)
if cylc.flags.verbose:
print "%s suite timer starts NOW: %s" % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_TIMEOUT)),
get_current_time_string())
self.suite_timer_active = True
def set_suite_inactivity_timer(self, reset=False):
"""Set suite's inactivity timer."""
self.suite_inactivity_timeout = time.time() + (
self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT)
)
if cylc.flags.verbose:
print "%s suite inactivity timer starts NOW: %s" % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT)),
get_current_time_string())
def load_suiterc(self, reconfigure):
"""Load and log the suite definition."""
SuiteConfig._FORCE = True # Reset the singleton!
self.config = SuiteConfig.get_inst(
self.suite, self.suiterc,
self.options.templatevars,
self.options.templatevars_file, run_mode=self.run_mode,
cli_initial_point_string=self._cli_initial_point_string,
cli_start_point_string=self._cli_start_point_string,
cli_final_point_string=self.options.final_point_string,
is_restart=self.is_restart, is_reload=reconfigure,
mem_log_func=self.log_memory
)
# Dump the loaded suiterc for future reference.
cfg_logdir = GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite config log directory')
time_str = get_current_time_string(
override_use_utc=True, use_basic_format=True,
display_sub_seconds=False
)
if reconfigure:
load_type = "reload"
elif self.is_restart:
load_type = "restart"
else:
load_type = "run"
base_name = "%s-%s.rc" % (time_str, load_type)
file_name = os.path.join(cfg_logdir, base_name)
try:
with open(file_name, "wb") as handle:
handle.write("# cylc-version: %s\n" % CYLC_VERSION)
printcfg(self.config.cfg, none_str=None, handle=handle)
except IOError as exc:
sys.stderr.write(str(exc) + "\n")
raise SchedulerError("Unable to log the loaded suite definition")
def _load_initial_cycle_point(self, _, row):
"""Load previous initial cycle point.
For restart, it may be missing from "suite.rc", but was specified as a
command line argument on cold/warm start.
"""
key, value = row
if key == "initial_point":
self._cli_initial_point_string = value
self.do_process_tasks = True
def configure_suite(self, reconfigure=False):
"""Load and process the suite definition."""
if reconfigure:
self.pri_dao.take_checkpoints(
"reload-init", other_daos=[self.pub_dao])
elif self.is_restart:
# This logic handles the lack of initial cycle point in "suite.rc".
# Things that can't change on suite reload.
run_dir = GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite run directory')
pri_db_path = os.path.join(
run_dir, CylcSuiteDAO.PRI_DB_FILE_BASE_NAME)
self.pri_dao = CylcSuiteDAO(pri_db_path)
self.pri_dao.select_suite_params(self._load_initial_cycle_point)
# Take checkpoint and commit immediately so that checkpoint can be
# copied to the public database.
self.pri_dao.take_checkpoints("restart")
self.pri_dao.execute_queued_items()
self.load_suiterc(reconfigure)
# Initial and final cycle times - command line takes precedence.
# self.config already alters the 'initial cycle point' for CLI.
self.initial_point = self.config.initial_point
self.start_point = self.config.start_point
self.final_point = get_point(
self.options.final_point_string or
self.config.cfg['scheduling']['final cycle point']
)
if self.final_point is not None:
self.final_point.standardise()
if (not self.initial_point and not self.is_restart):
sys.stderr.write(
'WARNING: No initial cycle point provided ' +
' - no cycling tasks will be loaded.\n')
if self.run_mode != self.config.run_mode:
self.run_mode = self.config.run_mode
if reconfigure:
BroadcastServer.get_inst().linearized_ancestors = (
self.config.get_linearized_ancestors())
else:
# Things that can't change on suite reload.
run_dir = GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite run directory')
pri_db_path = os.path.join(
run_dir, CylcSuiteDAO.PRI_DB_FILE_BASE_NAME)
pub_db_path = os.path.join(
run_dir, CylcSuiteDAO.PUB_DB_FILE_BASE_NAME)
if not self.is_restart:
# Remove database created by previous runs
try:
os.unlink(pri_db_path)
except OSError:
# Just in case the path is a directory!
rmtree(pri_db_path, ignore_errors=True)
# Ensure that:
# * public database is in sync with private database
# * private database file is private
self.pri_dao = CylcSuiteDAO(pri_db_path)
os.chmod(pri_db_path, 0600)
self.pub_dao = CylcSuiteDAO(pub_db_path, is_public=True)
self._copy_pri_db_to_pub_db()
pub_db_path_symlink = os.path.join(
run_dir, CylcSuiteDAO.DB_FILE_BASE_NAME)
try:
source = os.readlink(pub_db_path_symlink)
except OSError:
source = None
if source != CylcSuiteDAO.PUB_DB_FILE_BASE_NAME:
try:
os.unlink(pub_db_path_symlink)
except OSError:
pass
os.symlink(
CylcSuiteDAO.PUB_DB_FILE_BASE_NAME, pub_db_path_symlink)
if self.config.cfg['scheduling']['hold after point']:
self.pool_hold_point = get_point(
self.config.cfg['scheduling']['hold after point'])
if self.options.hold_point_string:
self.pool_hold_point = get_point(
self.options.hold_point_string)
if self.pool_hold_point:
print "Suite will hold after " + str(self.pool_hold_point)
suite_id = SuiteIdServer.get_inst(self.suite, self.owner)
self.pyro.connect(suite_id, PYRO_SUITEID_OBJ_NAME)
bcast = BroadcastServer.get_inst(
self.config.get_linearized_ancestors())
self.pyro.connect(bcast, PYRO_BCAST_OBJ_NAME)
self.command_queue = SuiteCommandServer()
self.pyro.connect(self.command_queue, PYRO_CMD_OBJ_NAME)
ets = ExtTriggerServer.get_inst()
self.pyro.connect(ets, PYRO_EXT_TRIG_OBJ_NAME)
info_commands = {}
for attr_name in dir(self):
attr = getattr(self, attr_name)
if callable(attr) and attr_name.startswith('info_'):
info_commands[attr_name.replace('info_', '')] = attr
self.pyro.connect(
SuiteInfoServer(info_commands), PYRO_INFO_OBJ_NAME)
self.suite_log = suite_log(self.suite)
self.log = self.suite_log.get_log()
if cylc.flags.debug:
self.suite_log.pimp(logging.DEBUG)
else:
self.suite_log.pimp(logging.INFO)
self.pyro.connect(
SuiteLogServer(self.suite_log), PYRO_LOG_OBJ_NAME)
self.suite_state = StateSummaryServer.get_inst(self.run_mode)
self.pyro.connect(self.suite_state, PYRO_STATE_OBJ_NAME)
def configure_suite_environment(self):
"""Configure suite environment."""
# static cylc and suite-specific variables:
self.suite_env = {
'CYLC_UTC': str(cylc.flags.utc),
'CYLC_CYCLING_MODE': str(cylc.flags.cycling_mode),
'CYLC_MODE': 'scheduler',
'CYLC_DEBUG': str(cylc.flags.debug),
'CYLC_VERBOSE': str(cylc.flags.verbose),
'CYLC_DIR_ON_SUITE_HOST': os.environ['CYLC_DIR'],
'CYLC_SUITE_NAME': self.suite,
'CYLC_SUITE_REG_NAME': self.suite, # DEPRECATED
'CYLC_SUITE_HOST': str(self.host),
'CYLC_SUITE_OWNER': self.owner,
'CYLC_SUITE_PORT': str(self.pyro.get_port()),
# DEPRECATED
'CYLC_SUITE_REG_PATH': RegPath(self.suite).get_fpath(),
'CYLC_SUITE_DEF_PATH_ON_SUITE_HOST': self.suite_dir,
# may be "None"
'CYLC_SUITE_INITIAL_CYCLE_POINT': str(self.initial_point),
# may be "None"
'CYLC_SUITE_FINAL_CYCLE_POINT': str(self.final_point),
# may be "None"
'CYLC_SUITE_INITIAL_CYCLE_TIME': str(self.initial_point),
# may be "None"
'CYLC_SUITE_FINAL_CYCLE_TIME': str(self.final_point),
# needed by the test battery
'CYLC_SUITE_LOG_DIR': self.suite_log.get_dir(),
}
# Contact details for remote tasks, written to file on task
# hosts because the details can change on restarting a suite.
self.suite_env_dumper = CylcSuiteEnv(self.suite_env)
self.suite_env_dumper.suite_cylc_version = CYLC_VERSION
# Set local values of variables that are potenitally task-specific
# due to different directory paths on different task hosts. These
# are overridden by tasks prior to job submission, but in
# principle they could be needed locally by event handlers:
self.suite_task_env = {
'CYLC_SUITE_RUN_DIR': GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite run directory'),
'CYLC_SUITE_WORK_DIR': GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite work directory'),
'CYLC_SUITE_SHARE_DIR': GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite share directory'),
'CYLC_SUITE_SHARE_PATH': '$CYLC_SUITE_SHARE_DIR', # DEPRECATED
'CYLC_SUITE_DEF_PATH': self.suite_dir
}
# (global config auto expands environment variables in local paths)
# Pass these to the job script generation code.
JobFile.get_inst().set_suite_env(self.suite_env)
# And pass contact env to the task module
# make suite vars available to [cylc][environment]:
for var, val in self.suite_env.items():
os.environ[var] = val
for var, val in self.suite_task_env.items():
os.environ[var] = val
cenv = copy(self.config.cfg['cylc']['environment'])
for var, val in cenv.items():
cenv[var] = os.path.expandvars(val)
# path to suite bin directory for suite and task event handlers
cenv['PATH'] = os.pathsep.join([
os.path.join(self.suite_dir, 'bin'), os.environ['PATH']])
# Make [cylc][environment] available to task event handlers in worker
# processes,
TaskProxy.event_handler_env = cenv
# and to suite event handlers in this process.
for var, val in cenv.items():
os.environ[var] = val
def configure_reftest(self, recon=False):
"""Configure the reference test."""
if self.gen_reference_log:
self.config.cfg['cylc']['log resolved dependencies'] = True
elif self.reference_test_mode:
rtc = self.config.cfg['cylc']['reference test']
req = rtc['required run mode']
if req and req != self.run_mode:
raise SchedulerError(
'ERROR: suite allows only ' + req + ' reference tests')
handlers = self._get_events_conf('shutdown handler')
if handlers:
sys.stderr.write(
'WARNING: shutdown handlers replaced by reference test\n')
self.config.cfg['cylc']['events']['shutdown handler'] = [
rtc['suite shutdown event handler']]
self.config.cfg['cylc']['log resolved dependencies'] = True
self.config.cfg['cylc']['events'][
'abort if shutdown handler fails'] = True
if not recon:
spec = LogSpec(os.path.join(self.config.fdir, 'reference.log'))
self.initial_point = get_point(spec.get_initial_point_string())
self.start_point = get_point(
spec.get_start_point_string()) or self.initial_point
self.final_point = get_point(spec.get_final_point_string())
self.ref_test_allowed_failures = rtc['expected task failures']
if (not rtc['allow task failures'] and
not self.ref_test_allowed_failures):
self.config.cfg['cylc']['abort if any task fails'] = True
self.config.cfg['cylc']['events']['abort on timeout'] = True
timeout = rtc[self.run_mode + ' mode suite timeout']
if not timeout:
raise SchedulerError(
'ERROR: timeout not defined for %s reference tests' % (
self.run_mode))
self.config.cfg['cylc']['events'][self.EVENT_TIMEOUT] = (
timeout)
self.config.cfg['cylc']['events']['reset timer'] = False
def run_event_handlers(self, event, message):
"""Run a suite event handler."""
# Run suite event hooks in simulation and dummy mode ONLY if enabled
for mode_name in ['simulation', 'dummy']:
key = mode_name + ' mode'
if (self.run_mode == mode_name and
self.config.cfg['cylc'][key]['disable suite event hooks']):
return
# Email notification
if event in self._get_events_conf('mail events', []):
# SMTP server
env = dict(os.environ)
mail_smtp = self._get_events_conf('mail smtp')
if mail_smtp:
env['smtp'] = mail_smtp
subject = '[suite %(event)s] %(suite)s' % {
'suite': self.suite, 'event': event}
ctx = SuiteProcContext(
(self.SUITE_EVENT_HANDLER, event),
[
'mail',
'-s', subject,
'-r', self._get_events_conf(
'mail from', 'notifications@' + get_suite_host()),
self._get_events_conf('mail to', USER),
],
env=env,
stdin_str=subject + '\n')
if SuiteProcPool.get_inst().is_closed():
# Run command in foreground if process pool is closed
SuiteProcPool.get_inst().run_command(ctx)
self._run_event_handlers_callback(ctx)
else:
# Run command using process pool otherwise
SuiteProcPool.get_inst().put_command(
ctx, self._run_event_mail_callback)
# Look for event handlers
# 1. Handlers for specific event
# 2. General handlers
handlers = self._get_events_conf('%s handler' % event)
if (not handlers and
event in self._get_events_conf('handler events', [])):
handlers = self._get_events_conf('handlers')
if not handlers:
return
for i, handler in enumerate(handlers):
cmd_key = ('%s-%02d' % (self.SUITE_EVENT_HANDLER, i), event)
# Handler command may be a string for substitution
cmd = handler % {
'event': quote(event),
'suite': quote(self.suite),
'message': quote(message),
}
if cmd == handler:
# Nothing substituted, assume classic interface
cmd = "%s '%s' '%s' '%s'" % (
handler, event, self.suite, message)
ctx = SuiteProcContext(
cmd_key, cmd, env=dict(os.environ), shell=True)
abort_on_error = self._get_events_conf(
'abort if %s handler fails' % event)
if abort_on_error or SuiteProcPool.get_inst().is_closed():
# Run command in foreground if abort on failure is set or if
# process pool is closed
SuiteProcPool.get_inst().run_command(ctx)
self._run_event_handlers_callback(
ctx, abort_on_error=abort_on_error)
else:
# Run command using process pool otherwise
SuiteProcPool.get_inst().put_command(
ctx, self._run_event_handlers_callback)
def _run_event_handlers_callback(self, ctx, abort_on_error=False):
"""Callback on completion of a suite event handler."""
if ctx.ret_code:
self.log.warning(str(ctx))
sys.stderr.write(
'ERROR: %s EVENT HANDLER FAILED\n' % ctx.cmd_key[1])
if (ctx.cmd_key[1] == self.EVENT_SHUTDOWN and
self.reference_test_mode):
sys.stderr.write('ERROR: SUITE REFERENCE TEST FAILED\n')
if abort_on_error:
raise SchedulerError(ctx.err)
else:
self.log.info(str(ctx))
if (ctx.cmd_key[1] == self.EVENT_SHUTDOWN and
self.reference_test_mode):
sys.stdout.write('SUITE REFERENCE TEST PASSED\n')
def _run_event_mail_callback(self, ctx):
"""Callback the mail command for notification of a suite event."""
if ctx.ret_code:
self.log.warning(str(ctx))
else:
self.log.info(str(ctx))
def run(self):
"""Main loop."""
if self.pool_hold_point is not None:
self.hold_suite(self.pool_hold_point)
if self.options.start_held:
self.log.info("Held on start-up (no tasks will be submitted)")
self.hold_suite()
self.run_event_handlers(self.EVENT_STARTUP, 'suite starting')
self.log_memory("scheduler.py: begin run while loop")
proc_pool = SuiteProcPool.get_inst()
next_fs_check = time.time() + self.FS_CHECK_PERIOD
suite_run_dir = GLOBAL_CFG.get_derived_host_item(
self.suite, 'suite run directory')
while True: # MAIN LOOP
# Periodic check that the suite directory still exists
# - designed to catch stalled suite daemons where the suite
# directory has been deleted out from under itself
if time.time() > next_fs_check:
if not os.path.exists(suite_run_dir):
os.kill(os.getpid(), signal.SIGKILL)
else:
next_fs_check = time.time() + self.FS_CHECK_PERIOD
# PROCESS ALL TASKS whenever something has changed that might
# require renegotiation of dependencies, etc.
if self.shut_down_now:
warned = False
while not proc_pool.is_dead():
proc_pool.handle_results_async()
if not warned:
print("Waiting for the command process " +
"pool to empty for shutdown")
print("(you can \"stop now\" to shut " +
"down immediately if you like).")
warned = True
self.process_command_queue()
time.sleep(0.5)
raise SchedulerStop("Finished")
tinit = time.time()
if self.pool.do_reload:
self.pool.reload_taskdefs()
self.do_update_state_summary = True
self.process_command_queue()
self.pool.release_runahead_tasks()
proc_pool.handle_results_async()
# External triggers must be matched now. If any are matched pflag
# is set to tell process_tasks() that task processing is required.
self.pool.match_ext_triggers()
if self.process_tasks():
if cylc.flags.debug:
self.log.debug("BEGIN TASK PROCESSING")
main_loop_start_time = time.time()
changes = 0
self.pool.match_dependencies()
if not self.shut_down_cleanly:
changes += self.pool.submit_tasks()
changes += self.pool.spawn_all_tasks()
changes += self.pool.remove_spent_tasks()
changes += self.pool.remove_suiciding_tasks()
if changes:
self.do_update_state_summary = True
BroadcastServer.get_inst().expire(self.pool.get_min_point())
if cylc.flags.debug:
seconds = time.time() - main_loop_start_time
self.log.debug(
"END TASK PROCESSING (took " + str(seconds) + " sec)")
self.pool.process_queued_task_messages()
self.pool.process_queued_task_event_handlers()
self.process_command_queue()
if cylc.flags.iflag or self.do_update_state_summary:
cylc.flags.iflag = False
self.do_update_state_summary = False
self.pool.put_rundb_task_pool()
self.update_state_summary()
try:
self.pool.process_queued_db_ops()
except OSError as err:
self.shutdown(str(err))
raise
# If public database is stuck, blast it away by copying the content
# of the private database into it.
if self.pub_dao.n_tries >= self.pub_dao.MAX_TRIES:
try:
self._copy_pri_db_to_pub_db()
except (IOError, OSError) as exc:
# Something has to be very wrong here, so stop the suite
self.shutdown(str(exc))
raise
else:
# No longer stuck
self.log.warning(
"%(pub_db_name)s: recovered from %(pri_db_name)s" % {
"pub_db_name": self.pub_dao.db_file_name,
"pri_db_name": self.pri_dao.db_file_name})
self.pub_dao.n_tries = 0
if self._get_events_conf(self.EVENT_TIMEOUT):
self.check_suite_timer()
if self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT):
self.check_suite_inactive()
if self.config.cfg['cylc']['abort if any task fails']:
if self.pool.any_task_failed():
raise SchedulerError(
'Task(s) failed and "abort if any task fails" is set')
# the run is a reference test, and unexpected failures occured
if self.reference_test_mode:
if len(self.ref_test_allowed_failures) > 0:
for itask in self.pool.get_failed_tasks():
if (itask.identity not in
self.ref_test_allowed_failures):
sys.stderr.write(str(itask.identity) + "\n")
raise SchedulerError(
'Failed task is not in allowed failures list')
# check submission and execution timeout and polling timers
if self.run_mode != 'simulation':
self.pool.check_task_timers()
if (self.config.cfg['cylc']['disable automatic shutdown'] or
self.options.no_auto_shutdown):
auto_stop = False
else:
auto_stop = self.pool.check_auto_shutdown()
if self.stop_clock_done() or self.stop_task_done() or auto_stop:
self.command_set_stop_cleanly()
if ((self.shut_down_cleanly or auto_stop) and
self.pool.no_active_tasks()):
self.update_state_summary()
proc_pool.close()
self.shut_down_now = True
if (self.shut_down_cleanly and self.kill_on_shutdown):
if self.pool.has_unkillable_tasks_only():
if not self.pool.no_active_tasks():
self.log.warning(
'some tasks were not killable at shutdown')
self.update_state_summary()
proc_pool.close()
self.shut_down_now = True
else:
if time.time() > self.next_kill_issue:
self.pool.poll_task_jobs()
self.pool.kill_task_jobs()
self.next_kill_issue = time.time() + 10.0
if self.options.profile_mode:
now = time.time()
self._update_profile_info("scheduler loop dt (s)", now - tinit,
amount_format="%.3f")
self._update_cpu_usage()
if (int(now) % 60 == 0):
# Only get this every minute.
self.log_memory("scheduler.py: loop: " +
get_current_time_string())
if (self._get_events_conf(self.EVENT_TIMEOUT) is not None and
not (self.shut_down_cleanly or auto_stop)):
self.check_suite_stalled()
time.sleep(1)
self.log_memory("scheduler.py: end main loop")
# END MAIN LOOP
def update_state_summary(self):
"""Update state summary, e.g. for GUI."""
self.suite_state.update(
self.pool.get_tasks(), self.pool.get_rh_tasks(),
self.pool.get_min_point(), self.pool.get_max_point(),
self.pool.get_max_point_runahead(), self.paused(),
self.will_pause_at(), self.shut_down_cleanly, self.will_stop_at(),
self.config.ns_defn_order, self.pool.do_reload)
def check_suite_timer(self):
"""Check if suite has timed out or not."""
if self.already_timed_out or not self.is_stalled:
return
if time.time() > self.suite_timer_timeout:
self.already_timed_out = True
message = 'suite timed out after %s' % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_TIMEOUT))
)
self.log.warning(message)
self.run_event_handlers(self.EVENT_TIMEOUT, message)
if self._get_events_conf('abort on timeout'):
raise SchedulerError('Abort on suite timeout is set')
def check_suite_inactive(self):
if self.already_inactive:
return
if time.time() > self.suite_inactivity_timeout:
self.already_inactive = True
message = 'suite timed out after inactivity for %s' % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT))
)
self.log.warning(message)
self.run_event_handlers(self.EVENT_INACTIVITY_TIMEOUT, message)
if self._get_events_conf('abort on inactivity'):
raise SchedulerError('Abort on suite inactivity is set')
def check_suite_stalled(self):
"""Check if suite is stalled or not."""
if self.is_stalled:
return
# Suite should only be considered stalled if two consecutive
# scheduler loops meet the criteria. This caters for pauses between
# tasks succeeding and those triggering off them moving to ready
# e.g. foo[-P1D] => foo
if self.stalled_last and self.pool.pool_is_stalled():
self.is_stalled = True
message = 'suite stalled'
self.log.warning(message)
self.run_event_handlers(self.EVENT_STALLED, message)
self.pool.report_stalled_task_deps()
if self._get_events_conf('abort on stalled'):
raise SchedulerError('Abort on suite stalled is set')
# start suite timer
self.set_suite_timer()
else:
self.stalled_last = self.pool.pool_is_stalled()
# De-activate suite timeout timer if not stalled
if self.suite_timer_active and not self.stalled_last:
self.suite_timer_active = False
if cylc.flags.verbose:
print "%s suite timer stopped NOW: %s" % (
get_seconds_as_interval_string(
self._get_events_conf(self.EVENT_TIMEOUT)),
get_current_time_string())
def process_tasks(self):
"""Return True if waiting tasks are ready."""
# do we need to do a pass through the main task processing loop?
process = False
if self.do_process_tasks:
# this flag is turned on by commands that change task state
process = True
self.do_process_tasks = False # reset
if cylc.flags.pflag:
process = True
cylc.flags.pflag = False # reset
if (self._get_events_conf(self.EVENT_INACTIVITY_TIMEOUT) and
self._get_events_conf('reset inactivity timer')):
self.set_suite_inactivity_timer()
# New suite activity, so reset the stalled flag.
self.stalled_last = False
self.is_stalled = False
if self.pool.waiting_tasks_ready():
process = True
if self.run_mode == 'simulation' and self.pool.sim_time_check():
process = True
# if not process:
# # If we neglect to set cylc.flags.pflag on some event that
# # makes re-negotiation of dependencies necessary then if
# # that event ever happens in isolation the suite could stall
# # unless manually nudged ("cylc nudge SUITE"). If this
# # happens turn on debug logging to see what happens
# # immediately before the stall,
# # then set cylc.flags.pflag = True in
# # the corresponding code section. Alternatively,
# # for an undiagnosed stall you can uncomment this section to
# # stimulate task processing every few seconds even during
# # lulls in activity. THIS SHOULD NOT BE NECESSARY, HOWEVER.
# if not self.nudge_timer_on:
# self.nudge_timer_start = now()
# self.nudge_timer_on = True
# else:
# timeout = self.nudge_timer_start + \
# datetime.timedelta(seconds=self.auto_nudge_interval)
# if now() > timeout:
# process = True
# self.nudge_timer_on = False
return process
def shutdown(self, reason=''):
"""Shutdown the suite."""
msg = "Suite shutting down at " + get_current_time_string()
if reason:
msg += ' (' + reason + ')'
print msg
# The getattr() calls and if tests below are used in case the
# suite is not fully configured before the shutdown is called.
if getattr(self, "log", None) is not None:
self.log.info(msg)
if self.gen_reference_log:
print '\nCOPYING REFERENCE LOG to suite definition directory'
copyfile(
self.suite_log.get_path(),
os.path.join(self.config.fdir, 'reference.log'))
proc_pool = SuiteProcPool.get_inst()
if proc_pool:
if not proc_pool.is_dead():
# e.g. KeyboardInterrupt
proc_pool.terminate()
proc_pool.join()
proc_pool.handle_results_async()
if self.pool:
self.pool.shutdown()
if self.request_handler:
self.request_handler.quit = True
self.request_handler.join()
for iface in [self.command_queue,
SuiteIdServer.get_inst(), StateSummaryServer.get_inst(),
ExtTriggerServer.get_inst(), BroadcastServer.get_inst()]:
try:
self.pyro.disconnect(iface)
except KeyError:
# Wasn't connected yet.
pass
if self.pyro:
self.pyro.shutdown()
try:
os.unlink(self.port_file)
except OSError as exc:
sys.stderr.write(
"WARNING, failed to remove port file: %s\n%s\n" % (
self.port_file, exc))
# disconnect from suite-db, stop db queue
if getattr(self, "db", None) is not None:
self.pri_dao.close()
self.pub_dao.close()
if getattr(self, "config", None) is not None:
# run shutdown handlers
self.run_event_handlers(self.EVENT_SHUTDOWN, reason)
print "DONE" # main thread exit
def set_stop_point(self, stop_point_string):
"""Set stop point."""
stop_point = get_point(stop_point_string)
self.stop_point = stop_point
self.log.info("Setting stop cycle point: %s" % stop_point_string)
self.pool.set_stop_point(self.stop_point)
def set_stop_clock(self, unix_time, date_time_string):
"""Set stop clock time."""
self.log.info("Setting stop clock time: %s (unix time: %s)" % (
date_time_string, unix_time))
self.stop_clock_time = unix_time
self.stop_clock_time_string = date_time_string
def set_stop_task(self, task_id):
"""Set stop after a task."""
name = TaskID.split(task_id)[0]
if name in self.config.get_task_name_list():
task_id = self.get_standardised_taskid(task_id)
self.log.info("Setting stop task: " + task_id)
self.stop_task = task_id
else:
self.log.warning(
"Requested stop task name does not exist: %s" % name)
def stop_task_done(self):
"""Return True if stop task has succeeded."""
id_ = self.stop_task
if (id_ is None or not self.pool.task_succeeded(id_)):
return False
self.log.info("Stop task " + id_ + " finished")
return True
def hold_suite(self, point=None):
"""Hold all tasks in suite."""
if point is None:
self.hold_suite_now = True
self.pool.hold_all_tasks()
else:
self.log.info("Setting suite hold cycle point: " + str(point))
self.pool.set_hold_point(point)
def release_suite(self):
"""Release (un-hold) all tasks in suite."""
if self.hold_suite_now:
self.log.info("RELEASE: new tasks will be queued when ready")
self.hold_suite_now = False
self.pool.set_hold_point(None)
self.pool.release_all_tasks()
def will_stop_at(self):
"""Return stop point, if set."""
if self.stop_point:
return str(self.stop_point)
elif self.stop_clock_time is not None:
return self.stop_clock_time_string
elif self.stop_task:
return self.stop_task
elif self.final_point:
return self.final_point
else:
return None
def clear_stop_times(self):
"""Clear attributes associated with stop time."""
self.stop_point = None
self.stop_clock_time = None
self.stop_clock_time_string = None
self.stop_task = None
def paused(self):
"""Is the suite paused?"""
return self.hold_suite_now
def will_pause_at(self):
"""Return self.pool.get_hold_point()."""
return self.pool.get_hold_point()
def command_trigger_task(self, items, compat=None, _=None):
"""Trigger tasks."""
return self.pool.trigger_tasks(items, compat)
def command_dry_run_task(self, items, compat=None):
"""Dry-run a task, e.g. edit run."""
return self.pool.dry_run_task(items, compat)
def command_reset_task_state(self, items, compat=None, state=None, _=None):
"""Reset the state of tasks."""
return self.pool.reset_task_states(items, state, compat)
def command_spawn_tasks(self, items, compat=None, _=None):
"""Force spawn task successors."""
return self.pool.spawn_tasks(items, compat)
def filter_initial_task_list(self, inlist):
"""Return list of initial tasks after applying a filter."""
included_by_rc = self.config.cfg[
'scheduling']['special tasks']['include at start-up']
excluded_by_rc = self.config.cfg[
'scheduling']['special tasks']['exclude at start-up']
outlist = []
for name in inlist:
if name in excluded_by_rc:
continue
if len(included_by_rc) > 0:
if name not in included_by_rc:
continue
outlist.append(name)
return outlist
def stop_clock_done(self):
"""Return True if wall clock stop time reached."""
if (self.stop_clock_time is not None and
time.time() > self.stop_clock_time):
time_point = (
isodatetime.data.get_timepoint_from_seconds_since_unix_epoch(
self.stop_clock_time
)
)
self.log.info("Wall clock stop time reached: " + str(time_point))
self.stop_clock_time = None
return True
else:
return False
def _copy_pri_db_to_pub_db(self):
"""Copy content of primary database file to public database file.
Use temporary file to ensure that we do not end up with a partial file.
"""
temp_pub_db_file_name = None
self.pub_dao.close()
try:
self.pub_dao.conn = None # reset connection
open(self.pub_dao.db_file_name, "a").close() # touch
st_mode = os.stat(self.pub_dao.db_file_name).st_mode
temp_pub_db_file_name = mkstemp(
prefix=self.pub_dao.PUB_DB_FILE_BASE_NAME,
dir=os.path.dirname(self.pub_dao.db_file_name))[1]
copyfile(
self.pri_dao.db_file_name, temp_pub_db_file_name)
os.rename(temp_pub_db_file_name, self.pub_dao.db_file_name)
os.chmod(self.pub_dao.db_file_name, st_mode)
except (IOError, OSError):
if temp_pub_db_file_name:
os.unlink(temp_pub_db_file_name)
raise
def log_memory(self, message):
"""Print a message to standard out with the current memory usage."""
if not self.options.profile_mode:
return
proc = Popen(["ps", "h", "-orss", str(os.getpid())], stdout=PIPE)
memory = int(proc.communicate()[0])
print "PROFILE: Memory: %d KiB: %s" % (memory, message)
def _update_profile_info(self, category, amount, amount_format="%s"):
"""Update the 1, 5, 15 minute dt averages for a given category."""
tnow = time.time()
self._profile_amounts.setdefault(category, [])
amounts = self._profile_amounts[category]
amounts.append((tnow, amount))
self._profile_update_times.setdefault(category, None)
last_update = self._profile_update_times[category]
if last_update is not None and tnow < last_update + 60:
return
self._profile_update_times[category] = tnow
averages = {1: [], 5: [], 15: []}
for then, amount in list(amounts):
age = (tnow - then) / 60.0
if age > 15:
amounts.remove((then, amount))
continue
for minute_num in averages.keys():
if age <= minute_num:
averages[minute_num].append(amount)
output_text = "PROFILE: %s:" % category
for minute_num, minute_amounts in sorted(averages.items()):
averages[minute_num] = sum(minute_amounts) / len(minute_amounts)
output_text += (" %d: " + amount_format) % (
minute_num, averages[minute_num])
self.log.info(output_text)
def _update_cpu_usage(self):
"""Obtain CPU usage statistics."""
proc = Popen(["ps", "-o%cpu= ", str(os.getpid())], stdout=PIPE)
try:
cpu_frac = float(proc.communicate()[0])
except (TypeError, OSError, IOError, ValueError) as exc:
self.log.warning("Cannot get CPU % statistics: %s" % exc)
return
self._update_profile_info("CPU %", cpu_frac, amount_format="%.1f")
def _get_events_conf(self, key, default=None):
"""Return a named event hooks configuration."""
for getter in [
self.config.cfg['cylc']['events'],
GLOBAL_CFG.get(['cylc', 'events'])]:
try:
value = getter[key]
except KeyError:
pass
else:
if value is not None:
return value
return default
| benfitzpatrick/cylc | lib/cylc/scheduler.py | Python | gpl-3.0 | 77,150 | [
"BLAST"
] | 21bfde3db42151b8448673031685314f0f39a54eb389942ff675d8c31b82394e |
# -*- coding: utf-8 -*-
import datetime
import os
import json
from unittest import TestCase
from ehb_client.requests.subject_request_handler import SubjectRequestHandler, \
Subject
from ehb_client.requests.external_system_request_handler import ExternalSystem, \
ExternalSystemRequestHandler
from ehb_client.requests.external_record_request_handler import ExternalRecordRequestHandler, \
ExternalRecord
from ehb_client.requests.organization_request_handler import Organization, \
OrganizationRequestHandler
from ehb_client.requests.group_request_handler import Group, \
GroupRequestHandler
from ehb_client.requests.exceptions import PageNotFound, ErrorConstants, \
RequestedRangeNotSatisfiable, NotAuthorized
class RequestResources(object):
host = os.environ.get('EHB_HOST', 'localhost:8000')
isSecure = False
root_path = ''
api_key = os.environ.get('EHB_KEY', '680b8740f60ec65af0163ee7c7959bde')
class ehbTestClass(TestCase):
'''
The ehbTestClass is a base class for all ehb-client tests. Its main purpose
is to create test objects for our different request handlers to manipulate,
and to tear down those objects when the tests are finished.
In the rare case that the setUp of a test class fails you may be left with
artifacts in the eHB. Make sure to delete any "test" organizations left over
and try running the tests again.
'''
def setUp(self):
# Initialize Request Handlers
self.er_rh = ExternalRecordRequestHandler(
RequestResources.host,
RequestResources.root_path,
RequestResources.isSecure,
RequestResources.api_key)
self.es_rh = ExternalSystemRequestHandler(
RequestResources.host,
RequestResources.root_path,
RequestResources.isSecure,
RequestResources.api_key)
self.s_rh = SubjectRequestHandler(
RequestResources.host,
RequestResources.root_path,
RequestResources.isSecure,
RequestResources.api_key)
self.g_rh = GroupRequestHandler(
RequestResources.host,
RequestResources.root_path,
RequestResources.isSecure,
RequestResources.api_key)
self.o_rh = OrganizationRequestHandler(
RequestResources.host,
RequestResources.root_path,
RequestResources.isSecure,
RequestResources.api_key)
self.es_name1 = "testESNameABCD"
self.es_name2 = "testESName1234"
self.org_subj_id_1 = "testmrn0001"
self.org_subj_id_2 = "testmrn0002"
self.org_subj_id_3 = "testmrn002"
self.recid1 = "testrecid0001"
self.recid2 = "testrecid0002"
self.recid3 = "testrecid0003"
self.recid4 = "testrecid0004"
self.path1 = "testpath1"
self.path2 = "testpath2"
self.path3 = 'testpath3'
self.org_name_1 = "testOrg001"
self.org_name_2 = "testOrg002"
self.subj_id_label_1 = 'org_1_id'
try:
# Create the Organizations
self.o1 = Organization(name=self.org_name_1,
subject_id_label=self.subj_id_label_1)
self.o2 = Organization(name=self.org_name_2,
subject_id_label=self.subj_id_label_1)
r = self.o_rh.create(self.o1, self.o2)
# Create Subjects
dob = datetime.date.today()
self.s1 = Subject(first_name='FIRST_ONE', last_name="LAST_ONE",
organization_id=self.o1.id,
organization_subject_id=self.org_subj_id_1, dob=dob)
self.s2 = Subject(first_name='FIRST_TWO', last_name="LAST_TWO",
organization_id=self.o1.id,
organization_subject_id=self.org_subj_id_2, dob=dob)
self.s3 = Subject(first_name='FIRST_THREE', last_name="LAST_THREE",
organization_id=self.o2.id,
organization_subject_id=self.org_subj_id_3, dob=dob)
r = self.s_rh.create(self.s1, self.s2, self.s3)
# Create External Systems
self.es1 = ExternalSystem(name=self.es_name1, url="http://test.com/",
description='A test system')
self.es2 = ExternalSystem(name=self.es_name2, url="http://testTwo.com/",
description='Another test system')
r = self.es_rh.create(self.es1, self.es2)
# Create External record
self.er1 = ExternalRecord(record_id=self.recid1, subject_id=self.s1.id,
external_system_id=self.es1.id, path=self.path1,
label_id=1)
self.er2 = ExternalRecord(record_id=self.recid2, subject_id=self.s1.id,
external_system_id=self.es1.id, path=self.path2,
label_id=1)
r = self.er_rh.create(self.er1, self.er2)
self.er1 = r[0]['external_record']
self.er2 = r[1]['external_record']
# # Create External record link
r = self.er_rh.link(self.er1, self.er2, 1)
# Create test groups
# Subject Group
self.g1 = Group(name='testgroup1', is_locking=False,
client_key='ck', description='A test group')
# ExternalRecord Group
self.g2 = Group(name='exrecgroup', is_locking=False,
client_key='ck', description='An external record group')
r = self.g_rh.create(self.g1, self.g2)
# Add subject to test group
r = self.g_rh.add_subjects(self.g1, [self.s1])
# Add external record to test group
r = self.g_rh.add_records(self.g2, [self.er1])
except:
pass
def tearDown(self):
try:
self.es_rh.delete(name=self.es_name1)
except PageNotFound:
pass
try:
self.es_rh.delete(name=self.es_name2)
except PageNotFound:
pass
try:
self.es_rh.delete(name='testESNameCreate')
except PageNotFound:
pass
try:
o1 = self.o_rh.get(name=self.org_name_1)
self.s_rh.delete(organization_id=o1.id,
organization_subject_id=self.org_subj_id_1)
self.s_rh.delete(organization_id=o1.id,
organization_subject_id=self.org_subj_id_2)
except PageNotFound:
pass
try:
o2 = self.o_rh.get(name=self.org_name_2)
self.s_rh.delete(organization_id=o2.id,
organization_subject_id=self.org_subj_id_3)
except PageNotFound:
pass
try:
self.er_rh.delete(external_system_name=self.es_name1)
except PageNotFound:
pass
try:
self.er_rh.delete(external_system_name=self.es_name2)
except PageNotFound:
pass
try:
self.o_rh.delete(name=self.org_name_1)
except PageNotFound:
pass
try:
self.o_rh.delete(name=self.org_name_2)
except PageNotFound:
pass
try:
self.o_rh.delete(name='testorg1')
except PageNotFound:
pass
try:
self.o_rh.delete(name='testorg2')
except PageNotFound:
pass
try:
self.o_rh.delete(name='testorg99')
except PageNotFound:
pass
try:
self.er_rh.create(self.er1)
except PageNotFound:
pass
try:
self.g_rh.delete(name='testgroup1', client_key='ck')
except PageNotFound:
pass
try:
self.g_rh.delete(name='exrecgroup', client_key='ck')
except PageNotFound:
pass
try:
self.g_rh.delete(name='testgroup2', client_key='ck')
except PageNotFound:
pass
try:
self.g_rh.delete(name='testgroup3', client_key='ck')
except PageNotFound:
pass
try:
self.g_rh.delete(name='testgroup1xyz', client_key='ck')
except PageNotFound:
pass
def createAndCheck(self, rh, *records):
r = rh.create(*records)
b = True
for i in range(r.__len__()):
b = b and r[i].get('success')
self.assertTrue(b, 'External record create failed')
class TestExternalRecordHandler(ehbTestClass):
def testExternalRecordUpdate(self):
'''
Try to update an existing ExternalRecord by updating it's record_id.
'''
self.er1.record_id = self.recid3
r = self.er_rh.update(self.er1)[0]
self.assertTrue(r.get('success'), 'Single External Record update failed')
def testExternalRecordDupeCreate(self):
'''
Try to create another ExternalRecord with the same record id in the
same external system with the same path, expect this to return failure
in status
'''
er2 = ExternalRecord(record_id=self.er1.record_id,
subject_id=self.er1.subject_id,
external_system_id=self.er1.external_system_id,
path=self.er1.path)
r = self.er_rh.create(er2)[0]
errors = r.get('errors')
self.assertTrue(len(errors) == 1, 'There should be only one error')
self.assertTrue(errors[0], ErrorConstants.ERROR_RECORD_ID_ALREADY_IN_EXTERNAL_SYSTEM)
self.assertFalse(
r.get('success'),
'Should not be able to create an ExternalRecord with same record_id, system, and path')
def testExternalRecordDiffSubjects(self):
'''
Try to create another ExternalRecord with the same record id in the
same external system with the same path but different subjects,
expect this to return failure in status
'''
er = ExternalRecord(record_id=self.er1.record_id, subject_id=self.s2.id,
external_system_id=self.er1.external_system_id,
path=self.er1.path)
r = self.er_rh.create(er)[0]
errors = r.get('errors')
self.assertTrue(len(errors) == 1, 'Exactly one error expected')
self.assertTrue(errors[0] == ErrorConstants.ERROR_RECORD_ID_ALREADY_IN_EXTERNAL_SYSTEM)
self.assertFalse(
r.get('success'),
'Should not be able to create an ExternalRecord with same record_id, system, and path')
def testExternalRecordDelete(self):
'''
Try to delete an existing ExternalRecord
If successful the request handler will not return anything so we check
for that None value with assertFalse
'''
r = self.er_rh.delete(id=self.er1.id)
self.assertFalse(r, "Should be able to delete an ExternalRecord")
def testExternalRecordFailedDelete(self):
'''
Try to delete an ExternalRecord that does not exists. Expect an
exception of type PageNotFound
'''
try:
self.assertRaises(self.er_rh.delete(id='bad_id'), PageNotFound)
except PageNotFound:
pass
def testExternalRecordFailedDelete2(self):
'''
Try to delete an ExternalRecord that does not exists. Expect an
exception of type PageNotFound
'''
try:
self.assertRaises(self.er_rh.delete(
external_system_id='bad_system',
subject_id=-1
), PageNotFound)
except PageNotFound:
pass
def testExternalRecordCreateMulti(self):
'''
Try creating multiple ExternalRecords at one time. Expect success.
'''
er3 = ExternalRecord(record_id=self.recid3, subject_id=self.s3.id,
external_system_id=self.es1.id, path=self.path1)
er4 = ExternalRecord(record_id=self.recid4, subject_id=self.s1.id,
external_system_id=self.es2.id, path=self.path2)
er5 = ExternalRecord(record_id=self.recid4, subject_id=self.s1.id,
external_system_id=self.es2.id, path=self.path1)
er6 = ExternalRecord(record_id=self.recid1, subject_id=self.s3.id,
external_system_id=self.es2.id, path=self.path3)
self.createAndCheck(self.er_rh, er3, er4, er5, er6)
def testExternalRecordGetByExternalSystemID(self):
'''
Try getting ExternalRecords by ExternalSystem ID
'''
r = self.er_rh.get(external_system_id=self.es1.id)
self.assertTrue(len(r) == 2)
def testExternalRecordGetByExternalSystemURL(self):
'''
Try getting ExternalRecords by ExternalSystem URL
'''
r = self.er_rh.get(external_system_url=self.es1.url)
self.assertTrue(len(r) == 2)
def testExternalRecordGetByExternalSystemName(self):
'''
Try getting ExternalRecords by ExternalSystem Name
'''
r = self.er_rh.get(external_system_name=self.es1.name)
self.assertTrue(len(r) == 2)
def testExternalRecordGetBySubjectID(self):
'''
Try getting ExternalRecords by Subject ID
'''
r = self.er_rh.get(subject_id=self.s1.id)
self.assertTrue(len(r) == 2)
def testExternalRecordGetBySubjectOrg(self):
'''
Try getting ExternalRecords by Subject organization ID and their
organization subject id (aka MRN)
'''
r = self.er_rh.get(subject_org=self.s1.organization_id,
subject_org_id=self.s1.organization_subject_id)
self.assertTrue(len(r) == 2)
def testExternalRecordGetByPath(self):
'''
Try getting ExternalRecords by path
'''
r = self.er_rh.get(path=self.path1)
self.assertTrue(len(r) == 1)
def testExternalRecordUpdateMulti(self):
'''
Try updating multiple records
'''
self.er1.record_id = 'testrecid000x'
self.er2.record_id = 'testrecid000y'
r = self.er_rh.update(self.er1, self.er2)
self.assertTrue(r[0].get('success'))
self.assertTrue(r[1].get('success'))
def testExternalRecordUpdateBadId(self):
'''
Try updating an ExternalRecord with the ID of an existing record.
Expect this to fail.
'''
self.er2.record_id = self.er1.record_id
self.er2.path = self.er1.path
r = self.er_rh.update(self.er2)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1, 'Exactly one error was expected')
self.assertTrue(errors[0] == ErrorConstants.ERROR_RECORD_ID_ALREADY_IN_EXTERNAL_SYSTEM)
def testExternalRecordUpdateNoRecord(self):
'''
Try to update an ExternalRecord that does not exist. Expect this to fail.
'''
self.er2.id = -1
r = self.er_rh.update(self.er2)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors), 'Exactly one error was expected')
self.assertTrue(errors[0] == ErrorConstants.ERROR_RECORD_ID_NOT_FOUND)
def testExternalRecordGetById(self):
'''
Try to retrieve an ExternalRecord by its ID
'''
r = self.er_rh.get(id=self.er1.id)
self.assertTrue(r.id == self.er1.id)
def testExternalRecordGetLinksById(self):
'''
Try to retrieve an ExternalRecord links by the primary ExternalRecord's
ID
'''
r = self.er_rh.get(id=self.er1.id, links=True)
self.assertEqual(len(r), 1)
self.assertEqual(r[0]['external_record']['id'], 4)
def testExternalRecordDeleteLinks(self):
'''
Try to retrieve an ExternalRecord links by the primary ExternalRecord's
ID
'''
r = self.er_rh.unlink(self.er1, 1)
self.assertTrue(r['success'])
def testExternalRecordDeleteByPath(self):
'''
Try to delete ExternalRecords by path. Expect subsequent query to fail
because all records have been deleted.
'''
self.er_rh.delete(path=self.path1)
try:
self.assertRaises(self.er_rh.get(path=self.path1), PageNotFound)
except PageNotFound:
pass
def testExternalRecordDeleteByExternalSystem(self):
'''
Try to delete ExternalRecords by ExternalSystem. Expect subsequent query
to fail because all records have been deleted.
'''
self.er_rh.delete(external_system_id=self.es1.id)
try:
self.assertRaises(
self.es_rh.external_records(external_system_id=self.es1.id),
PageNotFound)
except PageNotFound:
pass
def testExternalRecordDeleteBySubject(self):
'''
Try to delete ExternalRecords by Subject. Expect subsequent query for
records associated with that subject to fail as they have been deleted.
'''
self.er_rh.delete(subject_id=self.s1.id)
try:
self.assertRaises(
self.es_rh.external_records(external_system_id=self.es1.id),
PageNotFound)
except PageNotFound:
pass
def testExternalRecordDeleteByExSysURL(self):
'''
Try to delete ExternalRecords based on an ExternalSystem's URL.
Expect subsequent query for records associated with that ExternalSystem
to fail as they have been deleted.
'''
self.er_rh.delete(external_system_url=self.es1.url)
try:
self.assertRaises(
self.er_rh.get(external_system_url=self.es1.url),
PageNotFound)
except PageNotFound:
pass
class TestExternalSystemHandler(ehbTestClass):
def testExternalSystemSubjectsByID(self):
'''
Try getting subjects associated with an external system by that
external systems ID.
'''
r = self.es_rh.subjects(external_system_id=self.es1.id)
self.assertTrue(len(r) == 2)
def testExternalSystemSubjectsByPath(self):
'''
Try getting subjects associated with an external system by path.
'''
r = self.es_rh.subjects(external_system_id=self.es1.id, path=self.path1)
self.assertTrue(len(r) == 1)
def testExternalSystemSubjectsByOrgID(self):
'''
Try getting subjects associated with an external system by org id.
'''
r = self.es_rh.subjects(external_system_id=self.es1.id,
organization_id=self.s1.organization_id)
self.assertTrue(len(r) == 2)
def testExternalSystemSubjectsByPathAndOrg(self):
'''
Try getting subjects associated with an external system by org id
and path.
'''
r = self.es_rh.subjects(external_system_id=self.es1.id,
path=self.path1,
organization_id=self.s1.organization_id)
self.assertTrue(len(r) == 1)
self.assertTrue(r[0].id == self.s1.id)
def testExternalSystemExternalRecordsByExSys(self):
'''
Try getting ExternalRecords associated with an external system by
external system id.
'''
r = self.es_rh.external_records(external_system_id=self.es1.id)
self.assertTrue(len(r) == 2)
def testExternalSystemExternalRecordsByExSysPath(self):
'''
Try getting ExternalRecords associated with an external system by
external system id and path.
'''
r = self.es_rh.external_records(external_system_id=self.es1.id,
path=self.path1)
self.assertTrue(len(r) == 1)
def testExternalSystemExternalRecordsByExSysOrg(self):
'''
Try getting ExternalRecords associated with an external system by
external system id and organization id.
'''
r = self.es_rh.external_records(external_system_id=self.es1.id,
organization_id=self.o1.id)
self.assertTrue(len(r) == 2)
def testExternalSystemExternalRecordsByExSysSubject(self):
'''
Try getting ExternalRecords associated with an external system by
external system id and subject id.
'''
r = self.es_rh.external_records(external_system_id=self.es1.id,
subject_id=self.s1.id)
self.assertTrue(len(r) == 2)
def testExternalSystemExternalRecordsByExSysSubjectPath(self):
'''
Try getting ExternalRecords associated with an external system by
external system id, subject id, and path.
'''
r = self.es_rh.external_records(external_system_id=self.es1.id,
subject_id=self.s1.id,
path=self.path1)
self.assertTrue(len(r) == 1)
def testExternalSystemCreate(self):
'''
Try creating an ExternalSystem
'''
es1 = ExternalSystem(name='testESNameCreate',
url='http://testcreate.com',
description="a test system")
r = self.es_rh.create(es1)[0]
self.assertTrue(r.get('success'))
def testExternalSystemCreateDupe(self):
'''
Try creating an ExternalSystem with the same name as an existing
ExternalSystem. Expect this to fail.
'''
es = ExternalSystem(name=self.es_name1, url='http://testagain.com/',
description="a test system")
r = self.es_rh.create(es)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(
errors[0] == ErrorConstants.ERROR_EXTERNAL_SYSTEM_NAME_EXISTS
)
def testExternalSystemCreateDupe2(self):
'''
Try creating an ExternalSystem with the same URL as an existing
ExternalSystem. Expect this to fail.
'''
es = ExternalSystem(name='xxx', url='http://test.com/',
description="a test system")
r = self.es_rh.create(es)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(
errors[0] == ErrorConstants.ERROR_EXTERNAL_SYSTEM_URL_EXISTS
)
def testExternalSystemUpdate(self):
'''
Try updating an ExternalSystem's description. Expect success
'''
self.es1.description = 'This is a test system'
r = self.es_rh.update(self.es1)[0]
self.assertTrue(r.get('success'))
def testExternalSystemDelete(self):
'''
Try deleting an ExternalSystem by id. Expect subsequent query for this
ExternalSystem to fail.
'''
self.es_rh.delete(id=self.es1.id)
try:
self.assertRaises(self.es_rh.get(id=self.es1.id), PageNotFound)
except PageNotFound:
pass
def testExternalSystemDeleteBadID(self):
'''
Try deleting an ExternalSystem with a non-existent ID. Expect this to
fail.
'''
try:
self.assertRaises(self.es_rh.delete(id='badid'), PageNotFound)
except PageNotFound:
pass
def testExternalSystemDeleteBadName(self):
'''
Try deleting an ExternalSystem with a non-existent name. Expect this to
fail.
'''
try:
self.assertRaises(self.es_rh.delete(name='badname'), PageNotFound)
except PageNotFound:
pass
def testExternalSystemDeleteBadUrl(self):
'''
Try deleting an ExternalSystem with a non-existent url. Expect this to
fail.
'''
try:
self.assertRaises(self.es_rh.delete(url='badurl'), PageNotFound)
except PageNotFound:
pass
def testExternalSystemMultiCreate(self):
'''
Try creating multiple ExternalSystem objects at the same time
'''
es1 = ExternalSystem(name='ES1Multi', url='http://testest1.com', description='A test system')
es2 = ExternalSystem(name='ES2Multi', url='http://testest2.com', description='A test system 2')
r = self.es_rh.create(es1, es2)
self.assertTrue(r[0].get('success') and r[1].get('success'))
self.es_rh.delete(name='ES1Multi')
self.es_rh.delete(name='ES2Multi')
def testExternalSystemMultiUpdate(self):
'''
Try updating multiple ExternalSystem objects
'''
self.es1.description = 'Im new and informative'
self.es2.description = 'Im new and exciting'
r = self.es_rh.update(self.es1, self.es2)
self.assertTrue(r[0].get('success') and r[1].get('success'))
def testExternalSystemUpdateBadName(self):
'''
Try updating an ExternalSystem with the name of an existing ExternalSystem
Expect this to fail
'''
self.es2.name = self.es1.name
r = self.es_rh.update(self.es2)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(errors[0] == ErrorConstants.ERROR_EXTERNAL_SYSTEM_NAME_EXISTS)
def testExternalSystemUpdateBadUrl(self):
'''
Try updating an ExternalSystem with the url of an existing ExternalSystem
Expect this to fail
'''
self.es2.url = self.es1.url
r = self.es_rh.update(self.es2)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(errors[0] == ErrorConstants.ERROR_EXTERNAL_SYSTEM_URL_EXISTS)
def testExternalSystemUpdateNoES(self):
'''
Try updating an ExternalSystem that does not exist
Expect this to fail
'''
self.es2.id = -1
r = self.es_rh.update(self.es2)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(errors[0] == ErrorConstants.ERROR_RECORD_ID_NOT_FOUND)
def testExternalSystemGetByID(self):
'''
Try to retrieve an ExternalSystem by ID
'''
es = self.es_rh.get(id=self.es1.id)
self.assertTrue(es.id == self.es1.id)
def testExternalSystemGetByName(self):
'''
Try to retrieve an ExternalSystem by name
'''
es = self.es_rh.get(name=self.es1.name)
self.assertTrue(es.name == self.es1.name)
def testExternalSystemGetByID(self):
'''
Try to retrieve an ExternalSystem by ID
'''
es = self.es_rh.get(id=self.es1.id)
self.assertTrue(es.id == self.es1.id)
def testExternalSystemGetByBadID(self):
'''
Try to retrieve an ExternalSystem by bad ID
Expect this to fail.
'''
try:
self.assertRaises(self.es_rh.get(id=-1), PageNotFound)
except PageNotFound:
pass
def testExternalSystemGetByBadName(self):
'''
Try to retrieve an ExternalSystem by bad name
Expect this to fail.
'''
try:
self.assertRaises(self.es_rh.get(name='nonexistent'), PageNotFound)
except PageNotFound:
pass
def testExternalSystemGetByBadUrl(self):
'''
Try to retrieve an ExternalSystem by bad name
Expect this to fail.
'''
try:
self.assertRaises(self.es_rh.get(url='http://example.com/bad'), PageNotFound)
except PageNotFound:
pass
class TestSubjectRequestHandler(ehbTestClass):
def testSubjectCreate(self):
'''
Try to create a Subject
'''
dob = datetime.datetime.now()
s = Subject(
first_name='FIRST_ONE',
last_name="LAST_ONE",
organization_id=self.o1.id,
organization_subject_id='123456',
dob=dob)
r = self.s_rh.create(s)[0]
self.assertTrue(r.get('success'))
def testSubjectCreateFailure(self):
'''
Try to create a Subject with an existing mrn. Expect this to fail.
'''
dob = datetime.datetime.now()
s = Subject(
first_name='TEST',
last_name='ONE',
organization_id=self.o1.id,
organization_subject_id=self.s1.organization_subject_id,
dob=dob
)
r = self.s_rh.create(s)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(errors[0] == ErrorConstants.ERROR_SUBJECT_ORG_ID_EXISTS)
def testSubjectUpdate(self):
'''
Try to update a Subject
'''
self.s1.old_subject = self.s1
self.s1.first_name = 'NEW NAME'
r = self.s_rh.update(self.s1)[0]
self.assertTrue(r.get('success'))
def testSubjectDelete(self):
'''
Try to delete a Subject expect subsequent retrieval of Subject to fail
'''
self.s_rh.delete(id=self.s1.id)
try:
self.assertRaises(self.s_rh.get(id=self.s1.id), PageNotFound)
except PageNotFound:
pass
def testSubjectDeleteFail(self):
'''
Try to delete a non-existent Subject. Expect this to throw a PageNotFound
error.
'''
try:
self.assertRaises(self.s_rh.delete(
organization_subject_id='badid',
organization_id=self.s1.organization_id
), PageNotFound)
except PageNotFound:
pass
def testSubjectMultiCreate(self):
'''
Try to create multiple Subjects with one request.
'''
dob = datetime.datetime.now()
s1 = Subject(first_name='SUBJECT', last_name="ONE",
organization_id=self.o1.id,
organization_subject_id='1111111', dob=dob)
s2 = Subject(first_name='SUBJECT', last_name="TWO",
organization_id=self.o1.id,
organization_subject_id='2222222', dob=dob)
r = self.s_rh.create(s1, s2)
b = r[0].get('success') and r[1].get('success')
self.assertTrue(b)
def testSubjectMultiUpdate(self):
'''
Try to update multiple Subject records with one request.
'''
self.s1.old_subject = self.s1
self.s1.first_name = 'ONE_FIRST'
self.s2.old_subject = self.s2
self.s2.first_name = 'TWO_FIRST'
r = self.s_rh.update(self.s1, self.s2)
b = r[0].get('success') and r[1].get('success')
self.assertTrue(b)
def testSubjectDupeOrgIdFail(self):
'''
Try to update an existing Subject record with the organization_subject_id
of another Subject record. Expect this to fail
'''
self.s1.organization_subject_id = self.s2.organization_subject_id
self.s1.old_subject = self.s1
r = self.s_rh.update(self.s1)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(errors[0] == ErrorConstants.ERROR_SUBJECT_ORG_ID_EXISTS)
def testSubjectUpdateNonExistentSubject(self):
'''
Try to update a non existent Subject. Expect this to fail.
'''
self.s1.id = -1
self.s1.old_subject = self.s1
r = self.s_rh.update(self.s1)[0]
errors = r.get('errors')
self.assertFalse(r.get('success'))
self.assertTrue(len(errors) == 1)
self.assertTrue(errors[0] == ErrorConstants.ERROR_RECORD_ID_NOT_FOUND)
def testSubjectGetByID(self):
'''
Try to retrieve a Subject object by ID
'''
s = self.s_rh.get(id=self.s1.id)
self.assertTrue(s == self.s1)
def testSubjectGetByOrgAndOrgID(self):
'''
Try to retrieve Subject object by combination of Org and Org Id
'''
s = self.s_rh.get(
organization_id=self.s1.organization_id,
organization_subject_id=self.s1.organization_subject_id)
self.assertTrue(s == self.s1)
def testSubjectGetByIDFail(self):
'''
Try to retrieve Subject by non-existent ID. Expect this to fail.
'''
try:
self.assertRaises(self.s_rh.get(id=-1), PageNotFound)
except PageNotFound:
pass
def testSubjectGetByOrgandOrgIDFail(self):
'''
Try to retrieve a Subject by non-existent Org ID and Organization Sub ID
'''
try:
self.assertRaises(self.s_rh.get(organization_id=-1,
organization_subject_id='badid'))
except PageNotFound:
pass
class TestGroupRequestHandler(ehbTestClass):
def testGroupCreate(self):
'''
Try to create a Group
'''
g1 = Group(name='testgroup2', is_locking=False,
client_key='ck', description='A test group')
r = self.g_rh.create(g1)[0]
self.assertTrue(r.get('success'))
def testGroupCreateMulti(self):
'''
Try to create a multiple Groups with one request
'''
g1 = Group(name='testgroup2', is_locking=False,
client_key='ck', description='A test group')
g2 = Group(name='testgroup3', is_locking=False,
client_key='ck', description='Another test group')
r = self.g_rh.create(g1, g2)
b = r[0].get('success') and r[1].get('success')
self.assertTrue(b)
def testGroupHaseHBKey(self):
'''
A created Group should have an ehb key
'''
g = self.g_rh.get(name=self.g1.name)
self.assertTrue(self.g1.ehb_key)
def testGroupGetByID(self):
'''
Try to retrieve a Group by id.
'''
g = self.g_rh.get(id=self.g1.id)
self.assertEqual(self.g1.id, g.id)
def testGroupGetByName(self):
'''
Try to retrieve a Group by name.
'''
g = self.g_rh.get(name=self.g1.name)
self.assertEqual(g.id, self.g1.id)
def testGroupGetByIDFail(self):
'''
Try to retrieve a Group with a non-existent ID. Expect this to fail
'''
try:
self.assertRaises(self.g_rh.get(id=-1), RequestedRangeNotSatisfiable)
except RequestedRangeNotSatisfiable:
pass
def testGroupGetByNameFail(self):
'''
Try to retrieve a Group with a non-existent ID. Expect this to fail
'''
try:
self.assertRaises(self.g_rh.get(name='badname'), RequestedRangeNotSatisfiable)
except RequestedRangeNotSatisfiable:
pass
def testGroupUpdate(self):
'''
Try to update a group
'''
self.g1.current_client_key(self.g1.client_key)
new_name = self.g1.name + 'xyz'
self.g1.name = new_name
r = self.g_rh.update(self.g1)[0]
self.assertTrue(r.get('success'))
def testGroupAddSubjects(self):
'''
Try to add subjects to a Group.
'''
r = self.g_rh.add_subjects(self.g1, [self.s2])[0]
self.assertTrue(r.get('success'))
def testGroupGetSubjects(self):
'''
Try to retrieve Subjects from a Group.
'''
r = self.g_rh.get_subjects(self.g1)
self.assertEqual(r[0], self.s1)
self.assertEqual(len(r), 1)
def testGroupRemoveSubjects(self):
'''
Try to remove a Subject from a Group.
'''
self.g_rh.remove_subject(self.g1, self.s1)
r = self.g_rh.get_subjects(self.g1)
self.assertEqual(len(r), 0)
def testGroupAddExternalRecords(self):
'''
Try to add an ExternalRecord to a Group.
'''
r = self.g_rh.add_records(self.g2, [self.er2])[0]
self.assertTrue(r.get('success'))
def testGroupGetExternalRecords(self):
'''
Try to retrieve the ExternalRecords of a Group.
'''
r = self.g_rh.get_records(self.g2)
self.assertEqual(r[0].record_id, self.er1.record_id)
def testGroupRemoveExternalRecords(self):
'''
Try to remove ExternalRecords from a Group.
'''
self.g_rh.remove_record(self.g2, self.er1)
r = self.g_rh.get_records(self.g2)
self.assertEqual(len(r), 0)
def testGroupDeleteByID(self):
'''
Try to remove a Group by ID. Expect subsequent request for that Group to fail.
'''
self.g_rh.delete(id=self.g1.id, client_key=self.g1.client_key)
try:
self.assertRaises(self.g_rh.get(id=self.g1.id), RequestedRangeNotSatisfiable)
except RequestedRangeNotSatisfiable:
pass
def testGroupDeleteByName(self):
'''
Try to remove a Group by name. Expect subsequent request for that Group to fail.
'''
self.g_rh.delete(name=self.g1.name, client_key=self.g1.client_key)
try:
self.assertRaises(self.g_rh.get(name=self.g1.name), RequestedRangeNotSatisfiable)
except RequestedRangeNotSatisfiable:
pass
class TestOrganizationRequestHandler(ehbTestClass):
def testOrganizationCreate(self):
'''
Try to create an Organization.
'''
o = Organization(name='testorg99', subject_id_label='subject')
r = self.o_rh.create(o)[0]
self.assertTrue(r.get('success'))
def testOrganizationCreateMulti(self):
'''
Try to create multiple Organizations with one request.
'''
pass
def testOrganizationCreateFailDupe(self):
'''
Try to create an Organization with a duplicate name. Expect this to fail.
'''
pass
def testOrganizationUpdate(self):
'''
Try to update the subject_id_label of an Organization
'''
pass
def testOrganizationDelete(self):
'''
Try to delete an Organization. Expect subsequent request for that
Organization to fail.
'''
pass
| chop-dbhi/ehb-client | ehb_client/tests/integ_tests/cases/request/tests.py | Python | bsd-2-clause | 38,676 | [
"exciting"
] | f3aff7ed87d8f0ece63b2976807b661db71b89bf45a61ba2c69ecf9ff8915410 |
from argparse import ArgumentParser, FileType
from io import StringIO
from operator import attrgetter
from subprocess import run
import sys
from pysam import AlignmentFile
def parse_args():
parser = ArgumentParser(description="Find reads that map to a position.")
parser.add_argument("bam_file", type=FileType('rb'))
parser.add_argument('target_region', help='Positions to look for (e.g., HCV-1a:100-200).')
return parser.parse_args()
def main():
args = parse_args()
with args.bam_file:
bam_reader = AlignmentFile(args.bam_file)
if not bam_reader.has_index():
print('Adding index...')
index_args = ['samtools', 'index', args.bam_file.name]
run(index_args, check=True)
args.bam_file.seek(0) # Go back to start of header.
bam_reader = AlignmentFile(args.bam_file)
bam_reader.check_index()
x = bam_reader.parse_region(region=args.target_region)
sequences = sorted(bam_reader.fetch(region=args.target_region),
key=attrgetter('qname'))
print(len(sequences))
for seq in sequences:
print(seq.qname)
main()
| cfe-lab/MiCall | micall/utils/find_reads_in_sam.py | Python | agpl-3.0 | 1,200 | [
"pysam"
] | 52e1860eb5e33a5d44d54b64f46385f5ab8ce1ed0a2e044e0d0c19d179180e64 |
#!/usr/bin/env python
# Full license can be found in License.md
# Full author list can be found in .zenodo.json file
# DOI:10.5281/zenodo.1199703
# ----------------------------------------------------------------------------
"""General Instrument for loading pysat-written netCDF files.
Properties
----------
platform
'pysat', will be updated if file contains a platform attribute
name
'netcdf', will be updated if file contains a name attribute
tag
'', will be updated if file contains a tag attribute
inst_id
'', will be updated if file contains an inst_id attribute
Note
----
Only tested against pysat created netCDF files
Examples
--------
::
import pysat
# Load a test Instrument
inst = pysat.Instrument("pysat", "testing")
inst.load(date=inst.inst_module._test_dates[''][''])
# Create a NetCDF file
fname = "test_pysat_file_%Y%j.nc"
inst.to_netcdf4(fname=inst.date.strftime(fname))
# Load the NetCDF file
file_inst = pysat.Instrument(
"pysat", "netcdf", temporary_file_list=True, directory_format="./",
file_format="test_pysat_file_{year:04}{day:03}.nc")
file_inst.load(date=inst.inst_module._test_dates[''][''])
"""
import datetime as dt
import numpy as np
import warnings
import pysat
logger = pysat.logger
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'pysat'
name = 'netcdf'
tags = {'': ''}
inst_ids = {'': tag for tag in tags.keys()}
# ----------------------------------------------------------------------------
# Instrument testing attributes
_test_dates = {'': {'': dt.datetime(2009, 1, 1)}}
_test_download = {'': {'': False}}
_test_download_ci = {'': {'': False}}
# ----------------------------------------------------------------------------
# Instrument methods
def init(self, pandas_format=True):
"""Initialize the Instrument object with instrument specific values."""
self.acknowledgements = "Acknowledgements missing from file"
self.references = "References missing from file"
self.pandas_format = pandas_format
return
def clean(self):
"""Clean the file data."""
return
def preprocess(self):
"""Extract Instrument attrs from file attrs loaded to `Meta.header`."""
if hasattr(self.meta, "header"):
for iattr in ['platform', 'name', 'tag', 'inst_id', 'acknowledgements',
'references']:
if hasattr(self.meta.header, iattr):
setattr(self, iattr, getattr(self.meta.header, iattr))
return
# ----------------------------------------------------------------------------
# Instrument functions
def list_files(tag='', inst_id='', data_path=None, format_str=None):
"""Produce a list of pysat-written NetCDF files.
Parameters
----------
tag : str
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself. (default='')
inst_id : str
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself. (default='')
data_path : str or NoneType
Full path to directory containing files to be loaded. This
is provided by pysat. The user may specify their own data path
at Instrument instantiation and it will appear here. (default=None)
format_str : str or NoneType
String template used to parse the datasets filenames. If a user
supplies a template string at Instrument instantiation
then it will appear here, otherwise defaults to None. If None is
supplied, expects files with the format 'platform_name_YYYY_MM_DD.nc'
(default=None)
Returns
-------
pandas.Series
Series of filename strings, including the path, indexed by datetime.
"""
if format_str is None:
# User did not supply an alternative format template string
format_str = '_'.join([platform, name, '{year:04d}', '{month:02d}',
'{day:02d}.nc'])
# Use the pysat provided function to grab list of files from the
# local file system that match the format defined above
file_list = pysat.Files.from_os(data_path=data_path, format_str=format_str)
return file_list
def download(date_array, tag, inst_id, data_path=None):
"""Download data from the remote repository; not supported.
Parameters
----------
date_array : array-like
list of datetimes to download data for. The sequence of dates need not
be contiguous.
tag : str
Tag identifier used for particular dataset. This input is provided by
pysat. (default='')
inst_id : str
Satellite ID string identifier used for particular dataset. This input
is provided by pysat. (default='')
data_path : str or NoneType
Path to directory to download data to. (default=None)
"""
warnings.warn("".join(["Downloads are not currently supported for ",
"pysat netCDF files"]))
return
def load(fnames, tag='', inst_id='', strict_meta=False, file_format='NETCDF4',
epoch_name='Epoch', epoch_unit='ms', epoch_origin='unix',
pandas_format=True, decode_timedelta=False,
labels={'units': ('units', str), 'name': ('long_name', str),
'notes': ('notes', str), 'desc': ('desc', str),
'plot': ('plot_label', str), 'axis': ('axis', str),
'scale': ('scale', str), 'min_val': ('value_min', np.float64),
'max_val': ('value_max', np.float64),
'fill_val': ('fill', np.float64)}):
"""Load pysat-created NetCDF data and meta data.
Parameters
----------
fnames : array-like
iterable of filename strings, full path, to data files to be loaded.
This input is nominally provided by pysat itself.
tag : str
tag name used to identify particular data set to be loaded.
This input is nominally provided by pysat itself. While
tag defaults to None here, pysat provides '' as the default
tag unless specified by user at Instrument instantiation. (default='')
inst_id : str
Satellite ID used to identify particular data set to be loaded.
This input is nominally provided by pysat itself. (default='')
strict_meta : bool
Flag that checks if metadata across fnames is the same if True
(default=False)
file_format : str
file_format keyword passed to netCDF4 routine. Expects one of
'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', or 'NETCDF4'.
(default='NETCDF4')
epoch_name : str
Data key for epoch variable. The epoch variable is expected to be an
array of integer or float values denoting time elapsed from an origin
specified by `epoch_origin` with units specified by `epoch_unit`. This
epoch variable will be converted to a `DatetimeIndex` for consistency
across pysat instruments. (default='Epoch')
epoch_unit : str
The pandas-defined unit of the epoch variable ('D', 's', 'ms', 'us',
'ns'). (default='ms')
epoch_origin : str or timestamp-convertable
Origin of epoch calculation, following convention for
`pandas.to_datetime`. Accepts timestamp-convertable objects, as well as
two specific strings for commonly used calendars. These conversions are
handled by `pandas.to_datetime`.
If ‘unix’ (or POSIX) time; origin is set to 1970-01-01.
If ‘julian’, `epoch_unit` must be ‘D’, and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting at
noon on January 1, 4713 BC. (default='unix')
pandas_format : bool
Flag specifying if data is stored in a pandas DataFrame (True) or
xarray Dataset (False). (default=False)
decode_timedelta : bool
Used for xarray data (`pandas_format` is False). If True, variables
with unit attributes that are 'timelike' ('hours', 'minutes', etc) are
converted to `np.timedelta64`. (default=False)
labels : dict
Dict where keys are the label attribute names and the values are tuples
that have the label values and value types in that order.
(default={'units': ('units', str), 'name': ('long_name', str),
'notes': ('notes', str), 'desc': ('desc', str),
'min_val': ('value_min', np.float64),
'max_val': ('value_max', np.float64), 'fill_val': ('fill', np.float64)})
Returns
-------
data : pds.DataFrame or xr.Dataset
Data to be assigned to the pysat.Instrument.data object.
mdata : pysat.Meta
Pysat Meta data for each data variable.
"""
# netCDF4 files, particularly those produced by pysat can be loaded using a
# pysat provided function, load_netcdf4.
data, mdata = pysat.utils.io.load_netcdf(fnames, strict_meta=strict_meta,
file_format=file_format,
epoch_name=epoch_name,
epoch_unit=epoch_unit,
epoch_origin=epoch_origin,
pandas_format=pandas_format,
decode_timedelta=decode_timedelta,
labels=labels)
return data, mdata
| rstoneback/pysat | pysat/instruments/pysat_netcdf.py | Python | bsd-3-clause | 9,543 | [
"NetCDF"
] | 73a66dc1b9ab50d83b0f2a933afc6bc41b7131c3d6c81627610ce5cc3fd1459a |
"""
@name: /home/briank/workspace/PyHouse/src/Modules/Core/_test/test_install.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2017-2017 by D. Brian Kimmel
@license: MIT License
@note: Created on May 5, 2017
@summary: Test
"""
__updated__ = '2017-05-05'
# Import system type stuff
import xml.etree.ElementTree as ET
from twisted.trial import unittest
# Import PyMh files
from test.xml_data import XML_LONG, TESTING_PYHOUSE
from test.testing_mixin import SetupPyHouseObj
from Modules.Core import install
from Modules.Computer.test.xml_computer import TESTING_COMPUTER_DIVISION
from Modules.Housing.test.xml_housing import TESTING_HOUSE_DIVISION
class SetupMixin(object):
"""
"""
def setUp(self, p_root):
self.m_pyhouse_obj = SetupPyHouseObj().BuildPyHouseObj(p_root)
self.m_xml = SetupPyHouseObj().BuildXml(p_root)
class A0(unittest.TestCase):
def setUp(self):
pass
def test_00_Print(self):
print('Id: test_install')
class A1_XML(SetupMixin, unittest.TestCase):
def setUp(self):
SetupMixin.setUp(self, ET.fromstring(XML_LONG))
def test_01_Tags(self):
""" Be sure that the XML contains the right stuff.
"""
# print(PrettyFormatAny.form(self.m_xml, 'A1-1-A - Tags'))
self.assertEqual(self.m_xml.root.tag, TESTING_PYHOUSE)
self.assertEqual(self.m_xml.computer_div.tag, TESTING_COMPUTER_DIVISION)
self.assertEqual(self.m_xml.house_div.tag, TESTING_HOUSE_DIVISION)
# ## END DBK
| DBrianKimmel/PyHouse | Project/src/Modules/Core/_test/test_install.py | Python | mit | 1,550 | [
"Brian"
] | 3b9eee5ae1534f05ff061247315115fc2c1dce3ac4d786da4d87ebffaa185aa5 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.instance_templates import InstanceTemplatesClient
from google.cloud.compute_v1.services.instance_templates import pagers
from google.cloud.compute_v1.services.instance_templates import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert InstanceTemplatesClient._get_default_mtls_endpoint(None) is None
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
InstanceTemplatesClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(InstanceTemplatesClient, "rest"),]
)
def test_instance_templates_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.InstanceTemplatesRestTransport, "rest"),],
)
def test_instance_templates_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(InstanceTemplatesClient, "rest"),]
)
def test_instance_templates_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_instance_templates_client_get_transport_class():
transport = InstanceTemplatesClient.get_transport_class()
available_transports = [
transports.InstanceTemplatesRestTransport,
]
assert transport in available_transports
transport = InstanceTemplatesClient.get_transport_class("rest")
assert transport == transports.InstanceTemplatesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"),],
)
@mock.patch.object(
InstanceTemplatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstanceTemplatesClient),
)
def test_instance_templates_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(InstanceTemplatesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(InstanceTemplatesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
InstanceTemplatesClient,
transports.InstanceTemplatesRestTransport,
"rest",
"true",
),
(
InstanceTemplatesClient,
transports.InstanceTemplatesRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
InstanceTemplatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstanceTemplatesClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_instance_templates_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [InstanceTemplatesClient])
@mock.patch.object(
InstanceTemplatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(InstanceTemplatesClient),
)
def test_instance_templates_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"),],
)
def test_instance_templates_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
InstanceTemplatesClient,
transports.InstanceTemplatesRestTransport,
"rest",
None,
),
],
)
def test_instance_templates_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("request_type", [compute.DeleteInstanceTemplateRequest, dict,])
def test_delete_unary_rest(request_type):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "instance_template": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteInstanceTemplateRequest,
):
transport_class = transports.InstanceTemplatesRestTransport
request_init = {}
request_init["instance_template"] = ""
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["instanceTemplate"] = "instance_template_value"
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "instanceTemplate" in jsonified_request
assert jsonified_request["instanceTemplate"] == "instance_template_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("instanceTemplate", "project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.InstanceTemplatesRestInterceptor(),
)
client = InstanceTemplatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteInstanceTemplateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "instance_template": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "instance_template": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", instance_template="instance_template_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/instanceTemplates/{instance_template}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteInstanceTemplateRequest(),
project="project_value",
instance_template="instance_template_value",
)
def test_delete_unary_rest_error():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetInstanceTemplateRequest, dict,])
def test_get_rest(request_type):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "instance_template": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplate(
creation_timestamp="creation_timestamp_value",
description="description_value",
id=205,
kind="kind_value",
name="name_value",
self_link="self_link_value",
source_instance="source_instance_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceTemplate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.InstanceTemplate)
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.self_link == "self_link_value"
assert response.source_instance == "source_instance_value"
def test_get_rest_required_fields(request_type=compute.GetInstanceTemplateRequest):
transport_class = transports.InstanceTemplatesRestTransport
request_init = {}
request_init["instance_template"] = ""
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["instanceTemplate"] = "instance_template_value"
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "instanceTemplate" in jsonified_request
assert jsonified_request["instanceTemplate"] == "instance_template_value"
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplate()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceTemplate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("instanceTemplate", "project",)))
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.InstanceTemplatesRestInterceptor(),
)
client = InstanceTemplatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.InstanceTemplate.to_json(
compute.InstanceTemplate()
)
request = compute.GetInstanceTemplateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.InstanceTemplate
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "instance_template": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplate()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "instance_template": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", instance_template="instance_template_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceTemplate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/instanceTemplates/{instance_template}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetInstanceTemplateRequest(),
project="project_value",
instance_template="instance_template_value",
)
def test_get_rest_error():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.GetIamPolicyInstanceTemplateRequest, dict,]
)
def test_get_iam_policy_rest(request_type):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(etag="etag_value", iam_owned=True, version=774,)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.version == 774
def test_get_iam_policy_rest_required_fields(
request_type=compute.GetIamPolicyInstanceTemplateRequest,
):
transport_class = transports.InstanceTemplatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get_iam_policy._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get_iam_policy._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("options_requested_policy_version",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get_iam_policy(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_iam_policy_rest_unset_required_fields():
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get_iam_policy._get_unset_required_fields({})
assert set(unset_fields) == (
set(("optionsRequestedPolicyVersion",)) & set(("project", "resource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_iam_policy_rest_interceptors(null_interceptor):
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.InstanceTemplatesRestInterceptor(),
)
client = InstanceTemplatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "post_get_iam_policy"
) as post, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "pre_get_iam_policy"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Policy.to_json(compute.Policy())
request = compute.GetIamPolicyInstanceTemplateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Policy
client.get_iam_policy(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_get_iam_policy_rest_bad_request(
transport: str = "rest", request_type=compute.GetIamPolicyInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get_iam_policy(request)
def test_get_iam_policy_rest_flattened():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "resource": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", resource="resource_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get_iam_policy(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/instanceTemplates/{resource}/getIamPolicy"
% client.transport._host,
args[1],
)
def test_get_iam_policy_rest_flattened_error(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
compute.GetIamPolicyInstanceTemplateRequest(),
project="project_value",
resource="resource_value",
)
def test_get_iam_policy_rest_error():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.InsertInstanceTemplateRequest, dict,])
def test_insert_unary_rest(request_type):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["instance_template_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"id": 205,
"kind": "kind_value",
"name": "name_value",
"properties": {
"advanced_machine_features": {
"enable_nested_virtualization": True,
"enable_uefi_networking": True,
"threads_per_core": 1689,
},
"can_ip_forward": True,
"confidential_instance_config": {"enable_confidential_compute": True},
"description": "description_value",
"disks": [
{
"auto_delete": True,
"boot": True,
"device_name": "device_name_value",
"disk_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"disk_size_gb": 1261,
"guest_os_features": [{"type_": "type__value"}],
"index": 536,
"initialize_params": {
"description": "description_value",
"disk_name": "disk_name_value",
"disk_size_gb": 1261,
"disk_type": "disk_type_value",
"labels": {},
"licenses": ["licenses_value_1", "licenses_value_2"],
"on_update_action": "on_update_action_value",
"provisioned_iops": 1740,
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"source_image": "source_image_value",
"source_image_encryption_key": {},
"source_snapshot": "source_snapshot_value",
"source_snapshot_encryption_key": {},
},
"interface": "interface_value",
"kind": "kind_value",
"licenses": ["licenses_value_1", "licenses_value_2"],
"mode": "mode_value",
"shielded_instance_initial_state": {
"dbs": [
{"content": "content_value", "file_type": "file_type_value"}
],
"dbxs": {},
"keks": {},
"pk": {},
},
"source": "source_value",
"type_": "type__value",
}
],
"guest_accelerators": [
{
"accelerator_count": 1805,
"accelerator_type": "accelerator_type_value",
}
],
"labels": {},
"machine_type": "machine_type_value",
"metadata": {
"fingerprint": "fingerprint_value",
"items": [{"key": "key_value", "value": "value_value"}],
"kind": "kind_value",
},
"min_cpu_platform": "min_cpu_platform_value",
"network_interfaces": [
{
"access_configs": [
{
"external_ipv6": "external_ipv6_value",
"external_ipv6_prefix_length": 2837,
"kind": "kind_value",
"name": "name_value",
"nat_i_p": "nat_i_p_value",
"network_tier": "network_tier_value",
"public_ptr_domain_name": "public_ptr_domain_name_value",
"set_public_ptr": True,
"type_": "type__value",
}
],
"alias_ip_ranges": [
{
"ip_cidr_range": "ip_cidr_range_value",
"subnetwork_range_name": "subnetwork_range_name_value",
}
],
"fingerprint": "fingerprint_value",
"ipv6_access_configs": {},
"ipv6_access_type": "ipv6_access_type_value",
"ipv6_address": "ipv6_address_value",
"kind": "kind_value",
"name": "name_value",
"network": "network_value",
"network_i_p": "network_i_p_value",
"nic_type": "nic_type_value",
"queue_count": 1197,
"stack_type": "stack_type_value",
"subnetwork": "subnetwork_value",
}
],
"network_performance_config": {
"total_egress_bandwidth_tier": "total_egress_bandwidth_tier_value"
},
"private_ipv6_google_access": "private_ipv6_google_access_value",
"reservation_affinity": {
"consume_reservation_type": "consume_reservation_type_value",
"key": "key_value",
"values": ["values_value_1", "values_value_2"],
},
"resource_manager_tags": {},
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"scheduling": {
"automatic_restart": True,
"instance_termination_action": "instance_termination_action_value",
"location_hint": "location_hint_value",
"min_node_cpus": 1379,
"node_affinities": [
{
"key": "key_value",
"operator": "operator_value",
"values": ["values_value_1", "values_value_2"],
}
],
"on_host_maintenance": "on_host_maintenance_value",
"preemptible": True,
"provisioning_model": "provisioning_model_value",
},
"service_accounts": [
{"email": "email_value", "scopes": ["scopes_value_1", "scopes_value_2"]}
],
"shielded_instance_config": {
"enable_integrity_monitoring": True,
"enable_secure_boot": True,
"enable_vtpm": True,
},
"tags": {
"fingerprint": "fingerprint_value",
"items": ["items_value_1", "items_value_2"],
},
},
"self_link": "self_link_value",
"source_instance": "source_instance_value",
"source_instance_params": {
"disk_configs": [
{
"auto_delete": True,
"custom_image": "custom_image_value",
"device_name": "device_name_value",
"instantiate_from": "instantiate_from_value",
}
]
},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertInstanceTemplateRequest,
):
transport_class = transports.InstanceTemplatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("instanceTemplateResource", "project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.InstanceTemplatesRestInterceptor(),
)
client = InstanceTemplatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertInstanceTemplateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["instance_template_resource"] = {
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"id": 205,
"kind": "kind_value",
"name": "name_value",
"properties": {
"advanced_machine_features": {
"enable_nested_virtualization": True,
"enable_uefi_networking": True,
"threads_per_core": 1689,
},
"can_ip_forward": True,
"confidential_instance_config": {"enable_confidential_compute": True},
"description": "description_value",
"disks": [
{
"auto_delete": True,
"boot": True,
"device_name": "device_name_value",
"disk_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"disk_size_gb": 1261,
"guest_os_features": [{"type_": "type__value"}],
"index": 536,
"initialize_params": {
"description": "description_value",
"disk_name": "disk_name_value",
"disk_size_gb": 1261,
"disk_type": "disk_type_value",
"labels": {},
"licenses": ["licenses_value_1", "licenses_value_2"],
"on_update_action": "on_update_action_value",
"provisioned_iops": 1740,
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"source_image": "source_image_value",
"source_image_encryption_key": {},
"source_snapshot": "source_snapshot_value",
"source_snapshot_encryption_key": {},
},
"interface": "interface_value",
"kind": "kind_value",
"licenses": ["licenses_value_1", "licenses_value_2"],
"mode": "mode_value",
"shielded_instance_initial_state": {
"dbs": [
{"content": "content_value", "file_type": "file_type_value"}
],
"dbxs": {},
"keks": {},
"pk": {},
},
"source": "source_value",
"type_": "type__value",
}
],
"guest_accelerators": [
{
"accelerator_count": 1805,
"accelerator_type": "accelerator_type_value",
}
],
"labels": {},
"machine_type": "machine_type_value",
"metadata": {
"fingerprint": "fingerprint_value",
"items": [{"key": "key_value", "value": "value_value"}],
"kind": "kind_value",
},
"min_cpu_platform": "min_cpu_platform_value",
"network_interfaces": [
{
"access_configs": [
{
"external_ipv6": "external_ipv6_value",
"external_ipv6_prefix_length": 2837,
"kind": "kind_value",
"name": "name_value",
"nat_i_p": "nat_i_p_value",
"network_tier": "network_tier_value",
"public_ptr_domain_name": "public_ptr_domain_name_value",
"set_public_ptr": True,
"type_": "type__value",
}
],
"alias_ip_ranges": [
{
"ip_cidr_range": "ip_cidr_range_value",
"subnetwork_range_name": "subnetwork_range_name_value",
}
],
"fingerprint": "fingerprint_value",
"ipv6_access_configs": {},
"ipv6_access_type": "ipv6_access_type_value",
"ipv6_address": "ipv6_address_value",
"kind": "kind_value",
"name": "name_value",
"network": "network_value",
"network_i_p": "network_i_p_value",
"nic_type": "nic_type_value",
"queue_count": 1197,
"stack_type": "stack_type_value",
"subnetwork": "subnetwork_value",
}
],
"network_performance_config": {
"total_egress_bandwidth_tier": "total_egress_bandwidth_tier_value"
},
"private_ipv6_google_access": "private_ipv6_google_access_value",
"reservation_affinity": {
"consume_reservation_type": "consume_reservation_type_value",
"key": "key_value",
"values": ["values_value_1", "values_value_2"],
},
"resource_manager_tags": {},
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"scheduling": {
"automatic_restart": True,
"instance_termination_action": "instance_termination_action_value",
"location_hint": "location_hint_value",
"min_node_cpus": 1379,
"node_affinities": [
{
"key": "key_value",
"operator": "operator_value",
"values": ["values_value_1", "values_value_2"],
}
],
"on_host_maintenance": "on_host_maintenance_value",
"preemptible": True,
"provisioning_model": "provisioning_model_value",
},
"service_accounts": [
{"email": "email_value", "scopes": ["scopes_value_1", "scopes_value_2"]}
],
"shielded_instance_config": {
"enable_integrity_monitoring": True,
"enable_secure_boot": True,
"enable_vtpm": True,
},
"tags": {
"fingerprint": "fingerprint_value",
"items": ["items_value_1", "items_value_2"],
},
},
"self_link": "self_link_value",
"source_instance": "source_instance_value",
"source_instance_params": {
"disk_configs": [
{
"auto_delete": True,
"custom_image": "custom_image_value",
"device_name": "device_name_value",
"instantiate_from": "instantiate_from_value",
}
]
},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
instance_template_resource=compute.InstanceTemplate(
creation_timestamp="creation_timestamp_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/instanceTemplates"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertInstanceTemplateRequest(),
project="project_value",
instance_template_resource=compute.InstanceTemplate(
creation_timestamp="creation_timestamp_value"
),
)
def test_insert_unary_rest_error():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ListInstanceTemplatesRequest, dict,])
def test_list_rest(request_type):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplateList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceTemplateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListInstanceTemplatesRequest):
transport_class = transports.InstanceTemplatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplateList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceTemplateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.InstanceTemplatesRestInterceptor(),
)
client = InstanceTemplatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.InstanceTemplateList.to_json(
compute.InstanceTemplateList()
)
request = compute.ListInstanceTemplatesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.InstanceTemplateList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListInstanceTemplatesRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.InstanceTemplateList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.InstanceTemplateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/instanceTemplates"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListInstanceTemplatesRequest(), project="project_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.InstanceTemplateList(
items=[
compute.InstanceTemplate(),
compute.InstanceTemplate(),
compute.InstanceTemplate(),
],
next_page_token="abc",
),
compute.InstanceTemplateList(items=[], next_page_token="def",),
compute.InstanceTemplateList(
items=[compute.InstanceTemplate(),], next_page_token="ghi",
),
compute.InstanceTemplateList(
items=[compute.InstanceTemplate(), compute.InstanceTemplate(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.InstanceTemplateList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.InstanceTemplate) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type", [compute.SetIamPolicyInstanceTemplateRequest, dict,]
)
def test_set_iam_policy_rest(request_type):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["global_set_policy_request_resource"] = {
"bindings": [
{
"binding_id": "binding_id_value",
"condition": {
"description": "description_value",
"expression": "expression_value",
"location": "location_value",
"title": "title_value",
},
"members": ["members_value_1", "members_value_2"],
"role": "role_value",
}
],
"etag": "etag_value",
"policy": {
"audit_configs": [
{
"audit_log_configs": [
{
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"ignore_child_exemptions": True,
"log_type": "log_type_value",
}
],
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"service": "service_value",
}
],
"bindings": {},
"etag": "etag_value",
"iam_owned": True,
"rules": [
{
"action": "action_value",
"conditions": [
{
"iam": "iam_value",
"op": "op_value",
"svc": "svc_value",
"sys": "sys_value",
"values": ["values_value_1", "values_value_2"],
}
],
"description": "description_value",
"ins": ["ins_value_1", "ins_value_2"],
"log_configs": [
{
"cloud_audit": {
"authorization_logging_options": {
"permission_type": "permission_type_value"
},
"log_name": "log_name_value",
},
"counter": {
"custom_fields": [
{"name": "name_value", "value": "value_value"}
],
"field": "field_value",
"metric": "metric_value",
},
"data_access": {"log_mode": "log_mode_value"},
}
],
"not_ins": ["not_ins_value_1", "not_ins_value_2"],
"permissions": ["permissions_value_1", "permissions_value_2"],
}
],
"version": 774,
},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy(etag="etag_value", iam_owned=True, version=774,)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_iam_policy(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Policy)
assert response.etag == "etag_value"
assert response.iam_owned is True
assert response.version == 774
def test_set_iam_policy_rest_required_fields(
request_type=compute.SetIamPolicyInstanceTemplateRequest,
):
transport_class = transports.InstanceTemplatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_iam_policy._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).set_iam_policy._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.set_iam_policy(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_set_iam_policy_rest_unset_required_fields():
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.set_iam_policy._get_unset_required_fields({})
assert set(unset_fields) == (
set(()) & set(("globalSetPolicyRequestResource", "project", "resource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_set_iam_policy_rest_interceptors(null_interceptor):
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.InstanceTemplatesRestInterceptor(),
)
client = InstanceTemplatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "post_set_iam_policy"
) as post, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "pre_set_iam_policy"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Policy.to_json(compute.Policy())
request = compute.SetIamPolicyInstanceTemplateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Policy
client.set_iam_policy(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_set_iam_policy_rest_bad_request(
transport: str = "rest", request_type=compute.SetIamPolicyInstanceTemplateRequest
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["global_set_policy_request_resource"] = {
"bindings": [
{
"binding_id": "binding_id_value",
"condition": {
"description": "description_value",
"expression": "expression_value",
"location": "location_value",
"title": "title_value",
},
"members": ["members_value_1", "members_value_2"],
"role": "role_value",
}
],
"etag": "etag_value",
"policy": {
"audit_configs": [
{
"audit_log_configs": [
{
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"ignore_child_exemptions": True,
"log_type": "log_type_value",
}
],
"exempted_members": [
"exempted_members_value_1",
"exempted_members_value_2",
],
"service": "service_value",
}
],
"bindings": {},
"etag": "etag_value",
"iam_owned": True,
"rules": [
{
"action": "action_value",
"conditions": [
{
"iam": "iam_value",
"op": "op_value",
"svc": "svc_value",
"sys": "sys_value",
"values": ["values_value_1", "values_value_2"],
}
],
"description": "description_value",
"ins": ["ins_value_1", "ins_value_2"],
"log_configs": [
{
"cloud_audit": {
"authorization_logging_options": {
"permission_type": "permission_type_value"
},
"log_name": "log_name_value",
},
"counter": {
"custom_fields": [
{"name": "name_value", "value": "value_value"}
],
"field": "field_value",
"metric": "metric_value",
},
"data_access": {"log_mode": "log_mode_value"},
}
],
"not_ins": ["not_ins_value_1", "not_ins_value_2"],
"permissions": ["permissions_value_1", "permissions_value_2"],
}
],
"version": 774,
},
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.set_iam_policy(request)
def test_set_iam_policy_rest_flattened():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Policy()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "resource": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
resource="resource_value",
global_set_policy_request_resource=compute.GlobalSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Policy.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.set_iam_policy(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/instanceTemplates/{resource}/setIamPolicy"
% client.transport._host,
args[1],
)
def test_set_iam_policy_rest_flattened_error(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
compute.SetIamPolicyInstanceTemplateRequest(),
project="project_value",
resource="resource_value",
global_set_policy_request_resource=compute.GlobalSetPolicyRequest(
bindings=[compute.Binding(binding_id="binding_id_value")]
),
)
def test_set_iam_policy_rest_error():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.TestIamPermissionsInstanceTemplateRequest, dict,]
)
def test_test_iam_permissions_rest(request_type):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["test_permissions_request_resource"] = {
"permissions": ["permissions_value_1", "permissions_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse(
permissions=["permissions_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.TestPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_rest_required_fields(
request_type=compute.TestIamPermissionsInstanceTemplateRequest,
):
transport_class = transports.InstanceTemplatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["resource"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).test_iam_permissions._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["resource"] = "resource_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).test_iam_permissions._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "resource" in jsonified_request
assert jsonified_request["resource"] == "resource_value"
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.test_iam_permissions(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_test_iam_permissions_rest_unset_required_fields():
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.test_iam_permissions._get_unset_required_fields({})
assert set(unset_fields) == (
set(()) & set(("project", "resource", "testPermissionsRequestResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_test_iam_permissions_rest_interceptors(null_interceptor):
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.InstanceTemplatesRestInterceptor(),
)
client = InstanceTemplatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "post_test_iam_permissions"
) as post, mock.patch.object(
transports.InstanceTemplatesRestInterceptor, "pre_test_iam_permissions"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.TestPermissionsResponse.to_json(
compute.TestPermissionsResponse()
)
request = compute.TestIamPermissionsInstanceTemplateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.TestPermissionsResponse
client.test_iam_permissions(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_test_iam_permissions_rest_bad_request(
transport: str = "rest",
request_type=compute.TestIamPermissionsInstanceTemplateRequest,
):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "resource": "sample2"}
request_init["test_permissions_request_resource"] = {
"permissions": ["permissions_value_1", "permissions_value_2"]
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.test_iam_permissions(request)
def test_test_iam_permissions_rest_flattened():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.TestPermissionsResponse()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "resource": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.TestPermissionsResponse.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.test_iam_permissions(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/global/instanceTemplates/{resource}/testIamPermissions"
% client.transport._host,
args[1],
)
def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.test_iam_permissions(
compute.TestIamPermissionsInstanceTemplateRequest(),
project="project_value",
resource="resource_value",
test_permissions_request_resource=compute.TestPermissionsRequest(
permissions=["permissions_value"]
),
)
def test_test_iam_permissions_rest_error():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstanceTemplatesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = InstanceTemplatesClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = InstanceTemplatesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = InstanceTemplatesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.InstanceTemplatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = InstanceTemplatesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.InstanceTemplatesRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_instance_templates_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.InstanceTemplatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_instance_templates_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.InstanceTemplatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"get_iam_policy",
"insert",
"list",
"set_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_instance_templates_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.InstanceTemplatesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_instance_templates_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.InstanceTemplatesTransport()
adc.assert_called_once()
def test_instance_templates_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
InstanceTemplatesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_instance_templates_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.InstanceTemplatesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_instance_templates_host_no_port(transport_name):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_instance_templates_host_with_port(transport_name):
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = InstanceTemplatesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = InstanceTemplatesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = InstanceTemplatesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = InstanceTemplatesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = InstanceTemplatesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = InstanceTemplatesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = InstanceTemplatesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = InstanceTemplatesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = InstanceTemplatesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = InstanceTemplatesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = InstanceTemplatesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.InstanceTemplatesTransport, "_prep_wrapped_messages"
) as prep:
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.InstanceTemplatesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = InstanceTemplatesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = InstanceTemplatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(InstanceTemplatesClient, transports.InstanceTemplatesRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-compute | tests/unit/gapic/compute_v1/test_instance_templates.py | Python | apache-2.0 | 127,769 | [
"Octopus"
] | ed41a8fc81eefafdce0aefb0c6dacc1e4f4f39e0c8c4aaed0bbcbca9b718c4a9 |
from __future__ import division
import numpy as np
from joblib import Parallel, delayed
from scipy.special import wofz
from scipy.optimize import curve_fit
from scipy.sparse import spdiags
from scipy.sparse import lil_matrix
from scipy.sparse.linalg import spsolve
from scipy.interpolate import interp1d
from scipy.stats import norm
from scipy.optimize import least_squares
from statsmodels.nonparametric.smoothers_lowess import lowess
from gmr import GMM
from protoclass.data_management import RDAModality
from protoclass.preprocessing import MRSIPhaseCorrection
from protoclass.preprocessing import MRSIFrequencyCorrection
from protoclass.preprocessing import MRSIBaselineCorrection
from fdasrsf import srsf_align
import matplotlib.pyplot as plt
path_mrsi = '/data/prostate/experiments/Patient 1036/MRSI/CSI_SE_3D_140ms_16c.rda'
def _find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return idx
def _gaussian_profile(x, alpha, mu, sigma):
return alpha * norm.pdf(x, loc=mu, scale=sigma)
def _voigt_profile(x, alpha, mu, sigma, gamma):
"""Private function to fit a Voigt profile.
Parameters
----------
x : ndarray, shape (len(x))
The input data.
alpha : float,
The amplitude factor.
mu : float,
The shift of the central value.
sigma : float,
sigma of the Gaussian.
gamma : float,
gamma of the Lorentzian.
Returns
-------
y : ndarray, shape (len(x), )
The Voigt profile.
"""
# Define z
z = ((x - mu) + 1j * gamma) / (sigma * np.sqrt(2))
# Compute the Faddeva function
w = wofz(z)
return alpha * (np.real(w)) / (sigma * np.sqrt(2. * np.pi))
def _ch_sp_cr_cit_model(x,
mu, sigma_1, gamma_1, alpha_1,
delta_2, sigma_2, gamma_2, alpha_2,
delta_3, sigma_3, gamma_3, alpha_3,
delta_4, sigma_4, gamma_4, alpha_4,
delta_5, sigma_5, gamma_5, alpha_5,
delta_6, sigma_6, gamma_6, alpha_6):
"""Private function to create the mixute of Voigt profiles."""
signal = _voigt_profile(x, alpha_1, mu, sigma_1, gamma_1)
signal += _voigt_profile(x, alpha_2, mu + delta_2, sigma_2, gamma_2)
signal += _voigt_profile(x, alpha_3, mu - delta_3, sigma_3, gamma_3)
signal += _voigt_profile(x, alpha_4, mu + delta_4, sigma_4, gamma_4)
signal += _voigt_profile(x, alpha_5, mu + delta_4 - delta_5, sigma_5,
gamma_5)
signal += _voigt_profile(x, alpha_6, mu + delta_4 - delta_6, sigma_6,
gamma_6)
return signal
def _cit_model(x,
mu, sigma_1, gamma_1, alpha_1,
delta_2, sigma_2, gamma_2, alpha_2,
delta_3, sigma_3, gamma_3, alpha_3):
"""Private function to create the mixute of Voigt profiles."""
signal = _voigt_profile(x, alpha_1, mu, sigma_1, gamma_1)
signal += _voigt_profile(x, alpha_2, mu + delta_2, sigma_2, gamma_2)
signal += _voigt_profile(x, alpha_3, mu - delta_3, sigma_3, gamma_3)
return signal
def voigt(ppm, x):
signal = _voigt_profile(ppm, x[3], x[0], x[1], x[2])
signal += _voigt_profile(ppm, x[7], x[0] + x[4], x[5], x[6])
signal += _voigt_profile(ppm, x[11], x[0] - x[8], x[9], x[10])
signal += _voigt_profile(ppm, x[15], x[0] + x[12], x[13], x[14])
signal += _voigt_profile(ppm, x[19], x[0] + x[12] - x[16], x[17], x[18])
signal += _voigt_profile(ppm, x[23], x[0] + x[12] - x[20], x[21], x[22])
return signal
def ls_voigt(x, ppm, y):
signal = _voigt_profile(ppm, x[3], x[0], x[1], x[2])
signal += _voigt_profile(ppm, x[7], x[0] + x[4], x[5], x[6])
signal += _voigt_profile(ppm, x[11], x[0] - x[8], x[9], x[10])
signal += _voigt_profile(ppm, x[15], x[0] + x[12], x[13], x[14])
signal += _voigt_profile(ppm, x[19], x[0] + x[12] - x[16], x[17], x[18])
signal += _voigt_profile(ppm, x[23], x[0] + x[12] - x[20], x[21], x[22])
return signal - y
def gauss(ppm, x):
signal = _gaussian_profile(ppm, x[2], x[0], x[1])
signal += _gaussian_profile(ppm, x[5], x[0] + x[3], x[4])
signal += _gaussian_profile(ppm, x[8], x[0] - x[6], x[7])
return signal
def ls_gauss(x, ppm, y):
signal = _gaussian_profile(ppm, x[2], x[0], x[1])
signal += _gaussian_profile(ppm, x[5], x[0] + x[3], x[4])
signal += _gaussian_profile(ppm, x[8], x[0] - x[6], x[7])
return signal - y
def _cit_gaussian_model(x,
mu, sigma_1, alpha_1,
delta_2, sigma_2, alpha_2,
delta_3, sigma_3, alpha_3):
"""Private function to create the mixute of Voigt profiles."""
signal = _gaussian_profile(x, alpha_1, mu, sigma_1)
#signal += _gaussian_profile(x, alpha_2, mu + delta_2, sigma_2)
#signal += _gaussian_profile(x, alpha_3, mu - delta_3, sigma_3)
return signal
def _ch_sp_cr_cit_fitting(ppm, spectrum):
"""Private function to fit a mixture of Voigt profile to
choline, spermine, creatine, and citrate metabolites.
"""
ppm_limits = (2.35, 3.25)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)
# Define the default parameters
# Define the default shifts
mu_dft = 2.56
delta_2_dft = .14
delta_3_dft = .14
delta_4_dft = .58
delta_5_dft = .12
delta_6_dft = .16
# Define their bounds
mu_bounds = (2.54, 2.68)
delta_2_bounds = (.08, .17)
delta_3_bounds = (.08, .17)
delta_4_bounds = (.55, .61)
delta_5_bounds = (.11, .13)
delta_6_bounds = (.13, .17)
# Define the default amplitude
alpha_1_dft = (f(mu_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_2_dft = (f(mu_dft + delta_2_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_3_dft = (f(mu_dft - delta_3_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_4_dft = (f(mu_dft + delta_4_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_5_dft = (f(mu_dft + delta_4_dft - delta_5_dft) /
_voigt_profile(0, 1., 0., .001, .001))
alpha_6_dft = (f(mu_dft + delta_4_dft - delta_6_dft) /
_voigt_profile(0, 1., 0., .001, .001))
# Create the vector for the default parameters
popt_default = [mu_dft, .001, .001, alpha_1_dft,
delta_2_dft, .001, .001, alpha_2_dft,
delta_3_dft, .001, .001, alpha_3_dft,
delta_4_dft, .001, .001, alpha_4_dft,
delta_5_dft, .001, .001, alpha_5_dft,
delta_6_dft, .001, .001, alpha_6_dft]
# Define the bounds properly
param_bounds = ([mu_bounds[0], 0., 0., 0.,
delta_2_bounds[0], 0., 0., 0.,
delta_3_bounds[0], 0., 0., 0.,
delta_4_bounds[0], 0., 0., 0.,
delta_5_bounds[0], 0., 0., 0.,
delta_6_bounds[0], 0., 0., 0.],
[mu_bounds[1], np.inf, np.inf, np.inf,
delta_2_bounds[1], np.inf, np.inf, np.inf,
delta_3_bounds[1], np.inf, np.inf, np.inf,
delta_4_bounds[1], np.inf, np.inf, np.inf,
delta_5_bounds[1], np.inf, np.inf, np.inf,
delta_6_bounds[1], np.inf, np.inf, np.inf])
try:
popt, _ = curve_fit(_ch_sp_cr_cit_model, ppm_interp,
f(ppm_interp),
p0=popt_default, bounds=param_bounds)
except RuntimeError:
popt = popt_default
return popt
def _cit_fitting(ppm, spectrum):
"""Private function to fit a mixture of Voigt profile to
citrate metabolites.
"""
ppm_limits = (2.35, 2.85)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)
# Define the default parameters
# Define the default shifts
mu_dft = 2.56
delta_2_dft = .14
delta_3_dft = .14
# Define their bounds
mu_bounds = (2.54, 2.68)
delta_2_bounds = (.08, .17)
delta_3_bounds = (.08, .17)
# Define the default amplitude
alpha_1_dft = (f(mu_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_2_dft = (f(mu_dft + delta_2_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_3_dft = (f(mu_dft - delta_3_dft) /
_voigt_profile(0., 1., 0., .001, .001))
# Create the vector for the default parameters
popt_default = [mu_dft, .001, .001, alpha_1_dft,
delta_2_dft, .001, .001, alpha_2_dft,
delta_3_dft, .001, .001, alpha_3_dft]
# Define the bounds properly
param_bounds = ([mu_bounds[0], 0., 0., 0.,
delta_2_bounds[0], 0., 0., 0.,
delta_3_bounds[0], 0., 0., 0.],
[mu_bounds[1], np.inf, np.inf, np.inf,
delta_2_bounds[1], np.inf, np.inf, np.inf,
delta_3_bounds[1], np.inf, np.inf, np.inf])
try:
popt, _ = curve_fit(_cit_model, ppm_interp,
f(ppm_interp),
p0=popt_default, bounds=param_bounds)
except RuntimeError:
popt = popt_default
return popt
def _cit_gaussian_fitting(ppm, spectrum):
"""Private function to fit a mixture of Voigt profile to
citrate metabolites.
"""
ppm_limits = (2.35, 3.25)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
f = interp1d(sub_ppm, sub_spectrum, kind='cubic')
ppm_interp = np.linspace(sub_ppm[0], sub_ppm[-1], num=5000)
# Define the default parameters
# Define the default shifts
mu_dft = 2.56
delta_2_dft = .14
delta_3_dft = .14
delta_4_dft = .58
delta_5_dft = .12
delta_6_dft = .16
# Define their bounds
mu_bounds = (2.54, 2.68)
delta_2_bounds = (.12, .16)
delta_3_bounds = (.12, .16)
delta_4_bounds = (.55, .61)
delta_5_bounds = (.11, .13)
delta_6_bounds = (.13, .17)
# # Define the default amplitude
# alpha_1_dft = (f(mu_dft) /
# _gaussian_profile(0., 1., 0., .01))
# alpha_2_dft = (f(mu_dft + delta_2_dft) /
# _gaussian_profile(0., 1., 0., .01))
# alpha_3_dft = (f(mu_dft - delta_3_dft) /
# _gaussian_profile(0., 1., 0., .01))
# # Create the vector for the default parameters
# popt_default = [mu_dft, .01, alpha_1_dft,
# delta_2_dft, .01, alpha_2_dft,
# delta_3_dft, .01, alpha_3_dft]
# # Define the bounds properly
# param_bounds = ([mu_bounds[0], 0., 0.,
# delta_2_bounds[0], 0., 0.,
# delta_3_bounds[0], 0., 0.],
# [mu_bounds[1], np.inf, np.inf,
# delta_2_bounds[1], np.inf, np.inf,
# delta_3_bounds[1], np.inf, np.inf])
# Define the default amplitude
alpha_1_dft = (f(mu_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_2_dft = (f(mu_dft + delta_2_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_3_dft = (f(mu_dft - delta_3_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_4_dft = (f(mu_dft + delta_4_dft) /
_voigt_profile(0., 1., 0., .001, .001))
alpha_5_dft = (f(mu_dft + delta_4_dft - delta_5_dft) /
_voigt_profile(0, 1., 0., .001, .001))
alpha_6_dft = (f(mu_dft + delta_4_dft - delta_6_dft) /
_voigt_profile(0, 1., 0., .001, .001))
# Create the vector for the default parameters
popt_default = [mu_dft, .001, .001, alpha_1_dft,
delta_2_dft, .001, .001, alpha_2_dft,
delta_3_dft, .001, .001, alpha_3_dft,
delta_4_dft, .001, .001, alpha_4_dft,
delta_5_dft, .001, .001, alpha_5_dft,
delta_6_dft, .001, .001, alpha_6_dft]
# Define the bounds properly
param_bounds = ([mu_bounds[0], 0., 0., 0.,
delta_2_bounds[0], 0., 0., 0.,
delta_3_bounds[0], 0., 0., 0.,
delta_4_bounds[0], 0., 0., 0.,
delta_5_bounds[0], 0., 0., 0.,
delta_6_bounds[0], 0., 0., 0.],
[mu_bounds[1], np.inf, np.inf, np.inf,
delta_2_bounds[1], np.inf, np.inf, np.inf,
delta_3_bounds[1], np.inf, np.inf, np.inf,
delta_4_bounds[1], np.inf, np.inf, np.inf,
delta_5_bounds[1], np.inf, np.inf, np.inf,
delta_6_bounds[1], np.inf, np.inf, np.inf])
# # Create the vector for the default parameters
# popt_default = np.array([mu_dft, .01, alpha_1_dft])
# # Define the bounds properly
# param_bounds = (np.array([mu_bounds[0], 0., 0.]),
# np.array([mu_bounds[1], np.inf, np.inf]))
# try:
# popt, _ = curve_fit(_cit_gaussian_model, ppm_interp,
# f(ppm_interp),
# p0=popt_default)#, bounds=param_bounds)
# except RuntimeError:
# popt = popt_default
res_robust = least_squares(ls_voigt, popt_default,
loss='huber', f_scale=.1,
bounds=param_bounds,
args=(ppm_interp, f(ppm_interp)))
return res_robust.x
rda_mod = RDAModality(1250.)
rda_mod.read_data_from_path(path_mrsi)
phase_correction = MRSIPhaseCorrection(rda_mod)
rda_mod = phase_correction.transform(rda_mod)
freq_correction = MRSIFrequencyCorrection(rda_mod)
rda_mod = freq_correction.fit(rda_mod).transform(rda_mod)
baseline_correction = MRSIBaselineCorrection(rda_mod)
rda_mod = baseline_correction.fit(rda_mod).transform(rda_mod)
x = 9
y = 5
z = 5
# out = _cit_gaussian_fitting(rda_mod.bandwidth_ppm[:, y, x, z],
# np.real(rda_mod.data_[:, y, x, z]))
ppm = rda_mod.bandwidth_ppm[:, y, x, z]
spectrum = np.real(rda_mod.data_[:, y, x, z])
ppm_limits = (2.35, 2.85)
idx_ppm = np.flatnonzero(np.bitwise_and(ppm > ppm_limits[0],
ppm < ppm_limits[1]))
sub_ppm = ppm[idx_ppm]
sub_spectrum = spectrum[idx_ppm]
| I2Cvb/prostate | scratch/metabolite_fitting.py | Python | mit | 15,001 | [
"Gaussian"
] | 10ea3cf2c3d14b2d0129f7dc8003c294d07df128b09d648b5ed9e5206b750cd4 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_pool
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Pool Avi RESTful Object
description:
- This module is used to configure Pool object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
a_pool:
description:
- Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app.
ab_pool:
description:
- A/b pool configuration.
ab_priority:
description:
- Priority of this pool in a a-b pool pair.
- Internally used.
apic_epg_name:
description:
- Synchronize cisco apic epg members with pool servers.
application_persistence_profile_ref:
description:
- Persistence will ensure the same user sticks to the same server for a desired duration of time.
- It is a reference to an object of type applicationpersistenceprofile.
autoscale_launch_config_ref:
description:
- If configured then avi will trigger orchestration of pool server creation and deletion.
- It is only supported for container clouds like mesos, opensift, kubernates, docker etc.
- It is a reference to an object of type autoscalelaunchconfig.
autoscale_networks:
description:
- Network ids for the launch configuration.
autoscale_policy_ref:
description:
- Reference to server autoscale policy.
- It is a reference to an object of type serverautoscalepolicy.
capacity_estimation:
description:
- Inline estimation of capacity of servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
capacity_estimation_ttfb_thresh:
description:
- The maximum time-to-first-byte of a server.
- Allowed values are 1-5000.
- Special values are 0 - 'automatic'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_ramp_duration:
description:
- Duration for which new connections will be gradually ramped up to a server recently brought online.
- Useful for lb algorithms that are least connection based.
- Allowed values are 1-300.
- Special values are 0 - 'immediate'.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
created_by:
description:
- Creator name.
default_server_port:
description:
- Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute.
- The ssl checkbox enables avi to server encryption.
- Allowed values are 1-65535.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
description:
description:
- A description of the pool.
domain_name:
description:
- Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates.
- It is performed only when common name check host_check_enabled is enabled.
east_west:
description:
- Inherited config from virtualservice.
enabled:
description:
- Enable or disable the pool.
- Disabling will terminate all open connections and pause health monitors.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
external_autoscale_groups:
description:
- Names of external auto-scale groups for pool servers.
- Currently available only for aws.
- Field introduced in 17.1.2.
fail_action:
description:
- Enable an action - close connection, http redirect or local http response - when a pool failure happens.
- By default, a connection will be closed, in case the pool experiences a failure.
fewest_tasks_feedback_delay:
description:
- Periodicity of feedback for fewest tasks server selection algorithm.
- Allowed values are 1-300.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
graceful_disable_timeout:
description:
- Used to gracefully disable a server.
- Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled.
- Allowed values are 1-60.
- Special values are 0 - 'immediate', -1 - 'infinite'.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
health_monitor_refs:
description:
- Verify server health by applying one or more health monitors.
- Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response.
- The passive monitor listens only to client to server communication.
- It raises or lowers the ratio of traffic destined to a server based on successful responses.
- It is a reference to an object of type healthmonitor.
host_check_enabled:
description:
- Enable common name check for server certificate.
- If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
inline_health_monitor:
description:
- The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses.
- This may alter the expected behavior of the lb method, such as round robin.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ipaddrgroup_ref:
description:
- Use list of servers from ip address group.
- It is a reference to an object of type ipaddrgroup.
lb_algorithm:
description:
- The load balancing algorithm will pick a server within the pool's list of available servers.
- Enum options - LB_ALGORITHM_LEAST_CONNECTIONS, LB_ALGORITHM_ROUND_ROBIN, LB_ALGORITHM_FASTEST_RESPONSE, LB_ALGORITHM_CONSISTENT_HASH,
- LB_ALGORITHM_LEAST_LOAD, LB_ALGORITHM_FEWEST_SERVERS, LB_ALGORITHM_RANDOM, LB_ALGORITHM_FEWEST_TASKS, LB_ALGORITHM_NEAREST_SERVER,
- LB_ALGORITHM_CORE_AFFINITY.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS.
lb_algorithm_consistent_hash_hdr:
description:
- Http header name to be used for the hash key.
lb_algorithm_core_nonaffinity:
description:
- Degree of non-affinity for core afffinity based server selection.
- Allowed values are 1-65535.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
version_added: "2.4"
lb_algorithm_hash:
description:
- Criteria used as a key for determining the hash between the client and server.
- Enum options - LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS, LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT,
- LB_ALGORITHM_CONSISTENT_HASH_URI, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_HEADER.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS.
max_concurrent_connections_per_server:
description:
- The maximum number of concurrent connections allowed to each server within the pool.
- Note applied value will be no less than the number of service engines that the pool is placed on.
- If set to 0, no limit is applied.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_conn_rate_per_server:
description:
- Rate limit connections to each server.
name:
description:
- The name of the pool.
required: true
networks:
description:
- (internal-use) networks designated as containing servers for this pool.
- The servers may be further narrowed down by a filter.
- This field is used internally by avi, not editable by the user.
nsx_securitygroup:
description:
- A list of nsx service groups where the servers for the pool are created.
- Field introduced in 17.1.1.
pki_profile_ref:
description:
- Avi will validate the ssl certificate present by a server against the selected pki profile.
- It is a reference to an object of type pkiprofile.
placement_networks:
description:
- Manually select the networks and subnets used to provide reachability to the pool's servers.
- Specify the subnet using the following syntax 10-1-1-0/24.
- Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine.
prst_hdr_name:
description:
- Header name for custom header persistence.
request_queue_depth:
description:
- Minimum number of requests to be queued when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as 128.
request_queue_enabled:
description:
- Enable request queue when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_server_name:
description:
- Rewrite incoming host header to server name of the server to which the request is proxied.
- Enabling this feature rewrites host header for requests to all servers in the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_sni:
description:
- If sni server name is specified, rewrite incoming host header to the sni server name.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_auto_scale:
description:
- Server autoscale.
- Not used anymore.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_count:
description:
- Number of server_count.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
server_name:
description:
- Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled.
- If no value is specified, avi will use the incoming host header instead.
server_reselect:
description:
- Server reselect configuration for http requests.
servers:
description:
- The pool directs load balanced traffic to this list of destination servers.
- The servers can be configured by ip address, name, network or via ip address group.
sni_enabled:
description:
- Enable tls sni for server connections.
- If disabled, avi will not send the sni extension as part of the handshake.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_key_and_certificate_ref:
description:
- Service engines will present a client ssl certificate to the server.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- When enabled, avi re-encrypts traffic to the backend servers.
- The specific ssl profile defines which ciphers and ssl versions will be supported.
- It is a reference to an object of type sslprofile.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_service_port:
description:
- Do not translate the client's destination port when sending the connection to the server.
- The pool or servers specified service port will still be used for health monitoring.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the pool.
vrf_ref:
description:
- Virtual routing context that the pool is bound to.
- This is used to provide the isolation of the set of networks the pool is attached to.
- The pool inherits the virtual routing conext of the virtual service, and this field is used only internally, and is set by pb-transform.
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a Pool with two servers and HTTP monitor
avi_pool:
controller: 10.10.1.20
username: avi_user
password: avi_password
name: testpool1
description: testpool1
state: present
health_monitor_refs:
- '/api/healthmonitor?name=System-HTTP'
servers:
- ip:
addr: 10.10.2.20
type: V4
- ip:
addr: 10.10.2.21
type: V4
'''
RETURN = '''
obj:
description: Pool (api/pool) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
a_pool=dict(type='str',),
ab_pool=dict(type='dict',),
ab_priority=dict(type='int',),
apic_epg_name=dict(type='str',),
application_persistence_profile_ref=dict(type='str',),
autoscale_launch_config_ref=dict(type='str',),
autoscale_networks=dict(type='list',),
autoscale_policy_ref=dict(type='str',),
capacity_estimation=dict(type='bool',),
capacity_estimation_ttfb_thresh=dict(type='int',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
connection_ramp_duration=dict(type='int',),
created_by=dict(type='str',),
default_server_port=dict(type='int',),
description=dict(type='str',),
domain_name=dict(type='list',),
east_west=dict(type='bool',),
enabled=dict(type='bool',),
external_autoscale_groups=dict(type='list',),
fail_action=dict(type='dict',),
fewest_tasks_feedback_delay=dict(type='int',),
graceful_disable_timeout=dict(type='int',),
health_monitor_refs=dict(type='list',),
host_check_enabled=dict(type='bool',),
inline_health_monitor=dict(type='bool',),
ipaddrgroup_ref=dict(type='str',),
lb_algorithm=dict(type='str',),
lb_algorithm_consistent_hash_hdr=dict(type='str',),
lb_algorithm_core_nonaffinity=dict(type='int',),
lb_algorithm_hash=dict(type='str',),
max_concurrent_connections_per_server=dict(type='int',),
max_conn_rate_per_server=dict(type='dict',),
name=dict(type='str', required=True),
networks=dict(type='list',),
nsx_securitygroup=dict(type='list',),
pki_profile_ref=dict(type='str',),
placement_networks=dict(type='list',),
prst_hdr_name=dict(type='str',),
request_queue_depth=dict(type='int',),
request_queue_enabled=dict(type='bool',),
rewrite_host_header_to_server_name=dict(type='bool',),
rewrite_host_header_to_sni=dict(type='bool',),
server_auto_scale=dict(type='bool',),
server_count=dict(type='int',),
server_name=dict(type='str',),
server_reselect=dict(type='dict',),
servers=dict(type='list',),
sni_enabled=dict(type='bool',),
ssl_key_and_certificate_ref=dict(type='str',),
ssl_profile_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_service_port=dict(type='bool',),
uuid=dict(type='str',),
vrf_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pool',
set([]))
if __name__ == '__main__':
main()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_pool.py | Python | bsd-3-clause | 18,914 | [
"VisIt"
] | f7044542f96e72265c09b67a8c000c75b91f4365385b815961cc924ab294ed91 |
from splinter import Browser
import log, config, time
def run():
browser = Browser('firefox', profile=config.FIREFOX_PROFILE_PATH)
browser.visit('http://www.tf2outpost.com/trades')
buttonList = browser.find_by_css(".trade_bump")
listSize = len(buttonList)
log.logMessage("Bumping " + str(listSize) + " items")
for i in range(listSize):
buttonList[i].click()
browser.quit()
def execute(minutes):
run()
time.sleep(minutes*60)
if __name__ == "__main__":
while True:
execute(1) | salolivares/outpostAutoBump | outpostAutoBump/bump.py | Python | mit | 535 | [
"VisIt"
] | 444bb1e07362e44c2148123313e2a6de0459fc73439a3921ad04ebe3d9abfa5a |
########################################################################
# File : ProcessMonitor.py
# Author : Stuart Paterson
########################################################################
""" The Process Monitor utility allows to calculate cumulative CPU time and memory
for a given PID and it's process group. This is only implemented for linux /proc
file systems but could feasibly be extended in the future.
"""
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import shellCall
__RCSID__ = "$Id$"
import os, re, platform
class ProcessMonitor( object ):
#############################################################################
def __init__( self ):
""" Standard constructor
"""
self.log = gLogger.getSubLogger( 'ProcessMonitor' )
self.osType = platform.uname()
#############################################################################
def getCPUConsumed( self, pid ):
"""Returns the CPU consumed for supported platforms when supplied a PID.
"""
currentOS = self.__checkCurrentOS()
if currentOS.lower() == 'linux':
return self.getCPUConsumedLinux( pid )
else:
self.log.warn( 'Platform %s is not supported' % ( currentOS ) )
return S_ERROR( 'Unsupported platform' )
def getMemoryConsumed( self, pid ):
"""Returns the CPU consumed for supported platforms when supplied a PID.
"""
currentOS = self.__checkCurrentOS()
if currentOS.lower() == 'linux':
return self.getMemoryConsumedLinux( pid )
else:
self.log.warn( 'Platform %s is not supported' % ( currentOS ) )
return S_ERROR( 'Unsupported platform' )
def getResourceConsumedLinux( self, pid ):
"""Returns the CPU consumed given a PID assuming a proc file system exists.
"""
pid = str( pid )
masterProcPath = '/proc/%s/stat' % ( pid )
if not os.path.exists( masterProcPath ):
return S_ERROR( 'Process %s does not exist' % ( pid ) )
#Get the current process list
pidListResult = self.__getProcListLinux()
if not pidListResult['OK']:
return pidListResult
pidList = pidListResult['Value']
return self.__getChildResourceConsumedLinux( pid, pidList )
#############################################################################
def getCPUConsumedLinux( self, pid ):
"""Returns the CPU consumed given a PID assuming a proc file system exists.
"""
result = self.getResourceConsumedLinux( pid )
if not result['OK']:
return result
currentCPU = result['Value']['CPU']
self.log.verbose( 'Final CPU estimate is %s' % currentCPU )
return S_OK( currentCPU )
def getMemoryConsumedLinux( self, pid ):
""" Get the current memory consumption
"""
result = self.getResourceConsumedLinux( pid )
if not result['OK']:
return result
vsize = result['Value']['Vsize']
rss = result['Value']['RSS']
self.log.verbose( 'Current memory estimate is Vsize: %s, RSS: %s' % ( vsize, rss ) )
return S_OK( {'Vsize': vsize, 'RSS': rss } )
#############################################################################
def __getProcListLinux( self ):
"""Gets list of process IDs from /proc/*.
"""
result = shellCall( 10, 'ls -d /proc/[0-9]*' )
if not result['OK']:
if not 'Value' in result:
return result
procList = result['Value'][1].replace( '/proc/', '' ).split( '\n' )
return S_OK( procList )
#############################################################################
def __getChildResourceConsumedLinux( self, pid, pidList, infoDict = None ):
"""Adds contributions to CPU total from child processes recursively.
"""
childCPU = 0
vsize = 0
rss = 0
pageSize = os.sysconf('SC_PAGESIZE')
if not infoDict:
infoDict = {}
for pidCheck in pidList:
info = self.__getProcInfoLinux( pidCheck )
if info['OK']:
infoDict[pidCheck] = info['Value']
procGroup = self.__getProcGroupLinux( pid )
if not procGroup['OK']:
return procGroup
procGroup = procGroup['Value'].strip()
for pidCheck, info in infoDict.items():
if pidCheck in infoDict and info[3] == pid:
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from child PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pidCheck]
result = self.__getChildResourceConsumedLinux( pidCheck, pidList, infoDict )
if result['OK']:
childCPU += result['Value']['CPU']
vsize += result['Value']['Vsize']
rss += result['Value']['RSS']
#Next add any contributions from orphan processes in same process group
for pidCheck, info in infoDict.items():
if pidCheck in infoDict and info[3] == 1 and info[4] == procGroup:
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from orphan PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pidCheck]
#Finally add the parent itself
if pid in infoDict:
info = infoDict[pid]
contribution = float( info[13] ) / 100 + float( info[14] ) / 100 + float( info[15] ) / 100 + float( info[16] ) / 100
childCPU += contribution
vsize += float( info[22] )
rss += float( info[23] ) * pageSize
self.log.debug( 'Added %s to CPU total (now %s) from PID %s %s' % ( contribution, childCPU, info[0], info[1] ) )
del infoDict[pid]
# Some debug printout if 0 CPU is determined
if childCPU == 0:
self.log.error( 'Consumed CPU is found to be 0' )
self.log.info( 'Contributing processes:' )
for pidCheck in pidList:
if pidCheck not in infoDict:
info = self.__getProcInfoLinux( pidCheck )
if info['OK']:
self.log.info( ' PID:', info['Value'] )
return S_OK( { "CPU": childCPU,
"Vsize": vsize,
"RSS": rss } )
#############################################################################
def __getProcInfoLinux( self, pid ):
"""Attempts to read /proc/PID/stat and returns list of items if ok.
/proc/[pid]/stat
Status information about the process. This is used by ps(1).
It is defined in /usr/src/linux/fs/proc/array.c.
The fields, in order, with their proper scanf(3) format
specifiers, are:
pid %d (1) The process ID.
comm %s (2) The filename of the executable, in
parentheses. This is visible whether or not the
executable is swapped out.
state %c (3) One character from the string "RSDZTW" where R
is running, S is sleeping in an interruptible
wait, D is waiting in uninterruptible disk sleep,
Z is zombie, T is traced or stopped (on a signal),
and W is paging.
ppid %d (4) The PID of the parent.
pgrp %d (5) The process group ID of the process.
session %d (6) The session ID of the process.
tty_nr %d (7) The controlling terminal of the process. (The
minor device number is contained in the
combination of bits 31 to 20 and 7 to 0; the major
device number is in bits 15 to 8.)
tpgid %d (8) The ID of the foreground process group of the
controlling terminal of the process.
flags %u (%lu before Linux 2.6.22)
(9) The kernel flags word of the process. For bit
meanings, see the PF_* defines in the Linux kernel
source file include/linux/sched.h. Details depend
on the kernel version.
minflt %lu (10) The number of minor faults the process has
made which have not required loading a memory page
from disk.
cminflt %lu (11) The number of minor faults that the process's
waited-for children have made.
majflt %lu (12) The number of major faults the process has
made which have required loading a memory page
from disk.
cmajflt %lu (13) The number of major faults that the process's
waited-for children have made.
utime %lu (14) Amount of time that this process has been
scheduled in user mode, measured in clock ticks
(divide by sysconf(_SC_CLK_TCK)). This includes
guest time, guest_time (time spent running a
virtual CPU, see below), so that applications that
are not aware of the guest time field do not lose
that time from their calculations.
stime %lu (15) Amount of time that this process has been
scheduled in kernel mode, measured in clock ticks
(divide by sysconf(_SC_CLK_TCK)).
cutime %ld (16) Amount of time that this process's waited-for
children have been scheduled in user mode,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)). (See also times(2).) This
includes guest time, cguest_time (time spent
running a virtual CPU, see below).
cstime %ld (17) Amount of time that this process's waited-for
children have been scheduled in kernel mode,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
priority %ld
(18) (Explanation for Linux 2.6) For processes
running a real-time scheduling policy (policy
below; see sched_setscheduler(2)), this is the
negated scheduling priority, minus one; that is, a
number in the range -2 to -100, corresponding to
real-time priorities 1 to 99. For processes
running under a non-real-time scheduling policy,
this is the raw nice value (setpriority(2)) as
represented in the kernel. The kernel stores nice
values as numbers in the range 0 (high) to 39
(low), corresponding to the user-visible nice
range of -20 to 19.
Before Linux 2.6, this was a scaled value based on
the scheduler weighting given to this process.
nice %ld (19) The nice value (see setpriority(2)), a value
in the range 19 (low priority) to -20 (high
priority).
num_threads %ld
(20) Number of threads in this process (since
Linux 2.6). Before kernel 2.6, this field was
hard coded to 0 as a placeholder for an earlier
removed field.
itrealvalue %ld
(21) The time in jiffies before the next SIGALRM
is sent to the process due to an interval timer.
Since kernel 2.6.17, this field is no longer
maintained, and is hard coded as 0.
starttime %llu (was %lu before Linux 2.6)
(22) The time the process started after system
boot. In kernels before Linux 2.6, this value was
expressed in jiffies. Since Linux 2.6, the value
is expressed in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
vsize %lu (23) Virtual memory size in bytes.
rss %ld (24) Resident Set Size: number of pages the
process has in real memory. This is just the
pages which count toward text, data, or stack
space. This does not include pages which have not
been demand-loaded in, or which are swapped out.
rsslim %lu (25) Current soft limit in bytes on the rss of the
process; see the description of RLIMIT_RSS in
getrlimit(2).
startcode %lu
(26) The address above which program text can run.
endcode %lu (27) The address below which program text can run.
startstack %lu
(28) The address of the start (i.e., bottom) of
the stack.
kstkesp %lu (29) The current value of ESP (stack pointer), as
found in the kernel stack page for the process.
kstkeip %lu (30) The current EIP (instruction pointer).
signal %lu (31) The bitmap of pending signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
blocked %lu (32) The bitmap of blocked signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
sigignore %lu
(33) The bitmap of ignored signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
sigcatch %lu
(34) The bitmap of caught signals, displayed as a
decimal number. Obsolete, because it does not
provide information on real-time signals; use
/proc/[pid]/status instead.
wchan %lu (35) This is the "channel" in which the process is
waiting. It is the address of a system call, and
can be looked up in a namelist if you need a
textual name. (If you have an up-to-date
/etc/psdatabase, then try ps -l to see the WCHAN
field in action.)
nswap %lu (36) Number of pages swapped (not maintained).
cnswap %lu (37) Cumulative nswap for child processes (not
maintained).
exit_signal %d (since Linux 2.1.22)
(38) Signal to be sent to parent when we die.
processor %d (since Linux 2.2.8)
(39) CPU number last executed on.
rt_priority %u (since Linux 2.5.19; was %lu before Linux
2.6.22)
(40) Real-time scheduling priority, a number in
the range 1 to 99 for processes scheduled under a
real-time policy, or 0, for non-real-time
processes (see sched_setscheduler(2)).
policy %u (since Linux 2.5.19; was %lu before Linux 2.6.22)
(41) Scheduling policy (see
sched_setscheduler(2)). Decode using the SCHED_*
constants in linux/sched.h.
delayacct_blkio_ticks %llu (since Linux 2.6.18)
(42) Aggregated block I/O delays, measured in
clock ticks (centiseconds).
guest_time %lu (since Linux 2.6.24)
(43) Guest time of the process (time spent running
a virtual CPU for a guest operating system),
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
cguest_time %ld (since Linux 2.6.24)
(44) Guest time of the process's children,
measured in clock ticks (divide by
sysconf(_SC_CLK_TCK)).
"""
procPath = '/proc/%s/stat' % ( pid )
try:
fopen = open( procPath, 'r' )
procStat = fopen.readline()
fopen.close()
except Exception:
return S_ERROR( 'Not able to check %s' % pid )
return S_OK( procStat.split( ' ' ) )
#############################################################################
def __getProcGroupLinux( self, pid ):
"""Returns UID for given PID.
"""
result = shellCall( 10, 'ps --no-headers -o pgrp -p %s' % ( pid ) )
if not result['OK']:
if not 'Value' in result:
return result
return S_OK( result['Value'][1] )
#############################################################################
def __checkCurrentOS( self ):
"""Checks it is possible to determine CPU consumed with this utility
for the current OS.
"""
localOS = None
self.osType = platform.uname()
if re.search( 'Darwin', self.osType[0] ):
localOS = 'Mac'
elif re.search( 'Windows', self.osType[0] ):
localOS = 'Windows'
else:
localOS = 'Linux'
self.log.debug( 'Will determine CPU consumed for %s flavour OS' % ( localOS ) )
return localOS
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#
| vmendez/DIRAC | Core/Utilities/ProcessMonitor.py | Python | gpl-3.0 | 18,432 | [
"DIRAC"
] | 60b8a0f1b9e23aa4ba6dbbe672b861ddbf521e80ca71383cb55fdeae675f08e1 |
#
# This is a sample ccp1guirc file
############################## User Defaults ######################
#
# The following are default values that you can modify here, or with
# the Edit -> Options dialogue. Any updated values will be saved back
# to the file.
#
# You can also set the locations of the scripts to run the various
# programmes here too.
# The background of the main CCP1GUI window
bg_rgb = (0, 0, 100)
# Change pick_tolerance to determine how close you need to click to
# an atom of it to be selected
pick_tolerance = 0.01
# Set show_selection_by_colour for an atom to change colour when
# it is selected (instead of just displaying a yellow dot).
show_selection_by_colour = 1
conn_scale = 1.0
conn_toler = 0.5
contact_scale = 1.0
contact_toler = 1.5
field_line_width = 1
field_point_size = 2
mol_line_width = 3
mol_point_size = 4
# Sphere rending options
mol_sphere_resolution = 8
mol_sphere_specular = 1.0
mol_sphere_diffuse = 1.0
mol_sphere_specular_power = 5
mol_sphere_ambient = 0.4
# Cylinder rending options
mol_cylinder_resolution = 8
mol_cylinder_specular = 0.7
mol_cylinder_diffuse = 0.7
mol_cylinder_specular_power = 10
mol_cylinder_ambient = 0.4
# Script locations
gamessuk_script = '/home/jmht/codes/GAMESS-UK-7.0/rungamess/rungamess'
#gamessuk_exe = '/home/jmht/test/GAMESS-UK-7.0/bin/gamess'
dalton_script = '/home/jmht/codes/dalton/dalton-2.0/bin/dalton'
###################### End User Defaults #####################
# An example of a new colour map
c = self.colourmap_func()
self.colourmaps.append(c)
c.set_title("Custom map")
c.set_range(-1.0,1.0)
c.set_colours([
(0 , 255 , 255),
(0 , 255 , 0),
(255 , 255 , 0),
(255 , 0 , 0) ])
c.build()
# Modifying the colouring scheme, this will make The X
# atoms (Z=0) red and hydrogens (Z=1) green
from objects.periodic import colours
colours[0] = (0.9, 0.2, 0.2)
colours[1] = (0.2, 0.9, 0.2)
# An example of adding a new menu
self.menuBar.addmenu('Custom', 'Example of a static custom menu')
mbutton = self.menuBar.component('Custom-button')
menu = Menu(mbutton, tearoff=0)
mbutton['menu'] = menu
#
# All code here will execute as part of the __init__ method of the
# main tk-based viewer widget, so any functions that are needed
# from Tk callbacks need to be declared global
#
# It will often be helpful to pass in the main viewer widget
# instance to the new code, this will be "self" in the namespace
#
global listobjects
def listobjects(gui):
txt = 'Objects:\n'
for o in self.data_list:
myclass = string.split(str(o.__class__),'.')[-1]
try:
name = o.name
except AttributeError:
name = "NoName"
try:
title = o.title
except AttributeError:
name = "NoTitle"
txt = txt + myclass + ', name= ' + name + ', title=' + title + '\n'
gui.info(txt)
menu.add_command(label='List Objects', underline=0,command=lambda s=self: listobjects(s) )
#
# tools for loading ChemShell hessian eigenmode information from
# the newopt optimiser
#
from interfaces.chemshell import chemshell_z_modes, chemshell_c_modes
global get_chemshell_z_modes
def get_chemshell_z_modes(gui):
gui.data_list = gui.data_list + chemshell_z_modes()
global get_chemshell_c_modes
def get_chemshell_c_modes(gui):
gui.data_list = gui.data_list + chemshell_c_modes()
menu.add_command(label='Load ChemShell Zopt Modes', underline=0,command=lambda s=self: get_chemshell_z_modes(s) )
menu.add_command(label='Load ChemShell Copt Modes', underline=0,command=lambda s=self: get_chemshell_c_modes(s) )
global gradient_from_field
def gradient_from_field(dens):
"""Crude finite difference gradient code, OK for pictures but not
recommended for anything else"""
ogdens = None
gdens = Field()
gdens.dim = copy.deepcopy(dens.dim)
gdens.origin = copy.deepcopy(dens.origin)
gdens.axis = copy.deepcopy(dens.axis)
gdens.ndd = 3
n = dens.dim[0]
nn = dens.dim[0]*dens.dim[1]
axisi = gdens.axis[0]
axisj = gdens.axis[1]
axisk = gdens.axis[2]
#print axisi
sx = len(axisi)
#print len(axisi), axisi*axisi
si = 0.529177*0.5 * (gdens.dim[0] - 1) / sqrt(axisi*axisi)
sj = 0.529177*0.5 * (gdens.dim[1] - 1) / sqrt(axisj*axisj)
sk = 0.529177*0.5 * (gdens.dim[2] - 1) / sqrt(axisk*axisk)
print 'scale facs',si, sj, sk
print 'axes'
print gdens.axis[0]
print gdens.axis[1]
print gdens.axis[2]
# Normalise axis vectors
axisi = axisi / sqrt(axisi*axisi)
axisj = axisj / sqrt(axisj*axisj)
axisk = axisk / sqrt(axisk*axisk)
gdens.data = []
gdens.ndd = 3
gdens.title = 'Gradient of' + dens.title
gdens.name = gdens.title
for k in range(dens.dim[2]):
for j in range(dens.dim[1]):
for i in range(dens.dim[0]):
vx = 0; vy = 0; vz = 0;
if i == 0 or i == dens.dim[0]-1:
pass
elif j == 0 or j == dens.dim[1]-1:
pass
elif k == 0 or k == dens.dim[2]-1:
pass
else:
# construct gradients along the 3 axis directions
vi = (dens.data[ (i+1) + (j )*n + (k )*nn ] - \
dens.data[ (i-1) + (j )*n + (k )*nn ]) * si
vi1 = (dens.data[ (i) + (j )*n + (k )*nn ] - \
dens.data[ (i-1) + (j )*n + (k )*nn ]) * si
vi2 = (dens.data[ (i+1) + (j )*n + (k )*nn ] - \
dens.data[ (i) + (j )*n + (k )*nn ]) * si
vj = (dens.data[ (i ) + (j+1)*n + (k )*nn ] - \
dens.data[ (i ) + (j-1)*n + (k )*nn ]) * sj
vj1 = (dens.data[ (i ) + (j)*n + (k )*nn ] - \
dens.data[ (i ) + (j-1)*n + (k )*nn ]) * sj
vj2 = (dens.data[ (i ) + (j+1)*n + (k )*nn ] - \
dens.data[ (i ) + (j)*n + (k )*nn ]) * sj
vk = (dens.data[ (i ) + (j )*n + (k+1)*nn ] - \
dens.data[ (i ) + (j )*n + (k-1)*nn ]) * sk
vk1 = (dens.data[ (i ) + (j )*n + (k)*nn ] - \
dens.data[ (i ) + (j )*n + (k-1)*nn ]) * sk
vk2 = (dens.data[ (i ) + (j )*n + (k+1)*nn ] - \
dens.data[ (i ) + (j )*n + (k)*nn ]) * sk
# print 'finite diffs i',vi, vi1, vi2
# print 'finite diffs j',vj, vj1, vj2
# print 'finite diffs k',vk, vk1, vk2
# sum components along axis directions (assumes orthogonal axes)
vx = vi * axisi[0] + vj * axisj[0] + vk * axisk[0]
vy = vi * axisi[1] + vj * axisj[1] + vk * axisk[1]
vz = vi * axisi[2] + vj * axisj[2] + vk * axisk[2]
d = dens.data[ 1*((i ) + (j )*n + (k )*nn) ]
if ogdens:
vxo = ogdens.data[ 3*((i ) + (j )*n + (k )*nn) ]
vyo = ogdens.data[ 3*((i ) + (j )*n + (k )*nn) + 1]
vzo = ogdens.data[ 3*((i ) + (j )*n + (k )*nn) + 2]
rx = vi / vxo
ry = vj / vyo
rz = vk / vzo
#print " %16.8e %12.4e %12.4e %12.4e %12.4e %12.4e %12.4e %4.2f %4.2f %4.2f" % (d, vx, vy, vz, vxo, vyo, vzo, rx,ry,rz)
else:
#print " %16.8e %12.4f %12.4f %12.4f " % (d, vx, vy, vz)
pass
gdens.data.append(vx)
gdens.data.append(vy)
gdens.data.append(vz)
#print gdens.output_punch()
return gdens
global vecfield
def vecfield(self):
print 'executing vecfield'
print self.data_list
for o in self.data_list:
myclass = string.split(str(o.__class__),'.')[-1]
if myclass == 'Field':
print 'field',o
if o.ndd == 1:
gdens = gradient_from_field(o)
self.data_list.append(gdens)
self.info('built vector field ' + gdens.title)
menu.add_command(label='Generate Vector Field ', underline=0,command=lambda s=self: vecfield(s) )
# An example of adding a new dynamic meny
self.menuBar.addmenu('Custom2', 'Example of a dynamic custom menu')
global lister
def lister(obj):
print 'lister',obj
obj.list()
global postit
global dynamic_menu
def postit(self):
print 'postit', self.data_list
menu = dynamic_menu
print 'menu', menu
menu.delete(0,Tkinter.AtEnd())
if len(self.data_list) == 0:
menu.add_command(label="No Objects Loaded", state="disabled")
else:
for obj in self.data_list:
# one submenu for each object
cascade = Menu(menu,tearoff=0)
txt = obj.name
cascade.add_command(label="List", underline=0,command=lambda o = obj: lister(o) )
menu.add_cascade(label=txt, menu=cascade)
print 'postit done'
return menu
mbutton = self.menuBar.component('Custom2-button')
menu = Menu(mbutton, tearoff=0, postcommand=lambda s=self : postit(s))
print 'dynamic menu is',menu
mbutton['menu'] = menu
dynamic_menu = menu
print 'rc finished'
| alexei-matveev/ccp1gui | scripts/ccp1guirc.py | Python | gpl-2.0 | 9,382 | [
"Dalton",
"GAMESS"
] | 17f1d437b40dff5b029620cbbbaf47a0a5a367d249a1a5f9db248da88a38340e |
"""
Author: Jon Ander Gomez Adrian (jon@dsic.upv.es, http://personales.upv.es/jon)
Version: 2.0
Date: October 2016
Universitat Politecnica de Valencia
Technical University of Valencia TU.VLC
"""
import sys
import numpy
from sklearn import metrics
class MyKernel:
"""
This class implements a Kernel Density Estimator by using Parzen Windows.
"""
def __init__(self, bandwidth = 1, kernel = 'gaussian'):
self.n_classes = 0
self.bandwidth = bandwidth
self.kernel = kernel
self.h = 1
self.data_ = None
# ------------------------------------------------------------------------------
def fit(self, X):
#self.data_ = numpy.clone(X)
self.data_ = X
self.h = self.bandwidth / numpy.sqrt(len(X))
return self
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def score_samples(self, X):
h = self.h
factor = 1.0 / numpy.sqrt(2 * numpy.pi * h * h)
#factor = 1.0 / numpy.sqrt( 2 * numpy.pi )
log_factor = -0.5 * numpy.log(numpy.pi * h * h)
log_dens = numpy.zeros(len(X))
log_len = numpy.log(len(self.data_))
for n in range(len(X)):
distances = metrics.pairwise.euclidean_distances(self.data_, X[n].reshape(1, -1), squared = True)
distances = - distances / (2 * h * h)
x = -numpy.inf
for i in range(len(distances)): x = numpy.logaddexp(x, distances[i])
log_dens[n] = log_factor + x - log_len
return log_dens
# ------------------------------------------------------------------------------
| jonandergomez/machine_learning_for_students | machine_learning/MyKernel.py | Python | mit | 1,767 | [
"Gaussian"
] | 7cf279f4ec652bc19037796dedcd4ef967acc44297d1bcc5d735fe51e29b6895 |
"""
KeepNote
Editor widget in main window
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@alum.mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import gettext
import sys, os
# pygtk imports
import pygtk
pygtk.require('2.0')
from gtk import gdk
import gtk.glade
import gobject
# keepnote imports
import keepnote
from keepnote import \
KeepNoteError, is_url, unicode_gtk
from keepnote.notebook import \
NoteBookError, \
get_node_url, \
parse_node_url, \
is_node_url
from keepnote import notebook as notebooklib
from keepnote import safefile
from keepnote.gui import richtext
from keepnote.gui.richtext import \
RichTextView, RichTextBuffer, \
RichTextIO, RichTextError
from keepnote.gui import \
CONTEXT_MENU_ACCEL_PATH, \
FileChooserDialog, \
get_resource, \
Action, \
ToggleAction, \
add_actions, \
dialog_find
from keepnote.gui.editor import KeepNoteEditor
_ = keepnote.translate
class TextEditor (KeepNoteEditor):
def __init__(self, app):
KeepNoteEditor.__init__(self, app)
self._app = app
self._notebook = None
# state
self._page = None # current NoteBookPage
self._page_scrolls = {} # remember scroll in each page
self._page_cursors = {}
self._textview_io = RichTextIO()
# textview and its callbacks
self._textview = RichTextView(RichTextBuffer(
self._app.get_richtext_tag_table())) # textview
self._textview.disable()
self._textview.connect("modified", self._on_modified_callback)
self._textview.connect("visit-url", self._on_visit_url)
# scrollbars
self._sw = gtk.ScrolledWindow()
self._sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._sw.set_shadow_type(gtk.SHADOW_IN)
self._sw.add(self._textview)
self.pack_start(self._sw)
#self._socket = gtk.Socket()
#self.pack_start(self._socket)
# menus
self.editor_menus = EditorMenus(self._app, self)
# find dialog
self.find_dialog = dialog_find.KeepNoteFindDialog(self)
self.show_all()
def set_notebook(self, notebook):
"""Set notebook for editor"""
# set new notebook
self._notebook = notebook
if self._notebook:
# read default font
pass
else:
# no new notebook, clear the view
self.clear_view()
def load_preferences(self, app_pref, first_open=False):
"""Load application preferences"""
self.editor_menus.enable_spell_check(
self._app.pref.get("editors", "general", "spell_check",
default=True))
self._textview.set_default_font("Monospace 10")
def save_preferences(self, app_pref):
"""Save application preferences"""
# record state in preferences
app_pref.set("editors", "general", "spell_check",
self._textview.is_spell_check_enabled())
def get_textview(self):
"""Return the textview"""
return self._textview
def is_focus(self):
"""Return True if text editor has focus"""
return self._textview.is_focus()
def grab_focus(self):
"""Pass focus to textview"""
self._textview.grab_focus()
def clear_view(self):
"""Clear editor view"""
self._page = None
self._textview.disable()
def undo(self):
"""Undo the last action in the viewer"""
self._textview.undo()
def redo(self):
"""Redo the last action in the viewer"""
self._textview.redo()
def view_nodes(self, nodes):
"""View a node(s) in the editor"""
# editor cannot view multiple nodes at once
# if asked to, it will view none
if len(nodes) > 1:
nodes = []
# save current page before changing nodes
self.save()
self._save_cursor()
if len(nodes) == 0:
self.clear_view()
else:
page = nodes[0]
self._page = page
self._textview.enable()
try:
if page.has_attr("payload_filename"):
infile = page.open_file(
page.get_attr("payload_filename"), "r", "utf-8")
text = infile.read()
infile.close()
self._textview.get_buffer().set_text(text)
self._load_cursor()
else:
self.clear_view()
except UnicodeDecodeError, e:
self.clear_view()
except RichTextError, e:
self.clear_view()
self.emit("error", e.msg, e)
except Exception, e:
keepnote.log_error()
self.clear_view()
self.emit("error", "Unknown error", e)
if len(nodes) > 0:
self.emit("view-node", nodes[0])
def _save_cursor(self):
if self._page is not None:
it = self._textview.get_buffer().get_iter_at_mark(
self._textview.get_buffer().get_insert())
self._page_cursors[self._page] = it.get_offset()
x, y = self._textview.window_to_buffer_coords(
gtk.TEXT_WINDOW_TEXT, 0, 0)
it = self._textview.get_iter_at_location(x, y)
self._page_scrolls[self._page] = it.get_offset()
def _load_cursor(self):
# place cursor in last location
if self._page in self._page_cursors:
offset = self._page_cursors[self._page]
it = self._textview.get_buffer().get_iter_at_offset(offset)
self._textview.get_buffer().place_cursor(it)
# place scroll in last position
if self._page in self._page_scrolls:
offset = self._page_scrolls[self._page]
buf = self._textview.get_buffer()
it = buf.get_iter_at_offset(offset)
mark = buf.create_mark(None, it, True)
self._textview.scroll_to_mark(mark,
0.49, use_align=True, xalign=0.0)
buf.delete_mark(mark)
def save(self):
"""Save the loaded page"""
if self._page is not None and \
self._page.is_valid() and \
self._textview.is_modified():
try:
# save text data
buf = self._textview.get_buffer()
text = unicode_gtk(buf.get_text(buf.get_start_iter(),
buf.get_end_iter()))
out = self._page.open_file(
self._page.get_attr("payload_filename"), "w", "utf-8")
out.write(text)
out.close()
# save meta data
self._page.set_attr_timestamp("modified_time")
self._page.save()
except RichTextError, e:
self.emit("error", e.msg, e)
except NoteBookError, e:
self.emit("error", e.msg, e)
except Exception, e:
self.emit("error", str(e), e)
def save_needed(self):
"""Returns True if textview is modified"""
return self._textview.is_modified()
return False
def add_ui(self, window):
self._textview.set_accel_group(window.get_accel_group())
self._textview.set_accel_path(CONTEXT_MENU_ACCEL_PATH)
self.editor_menus.add_ui(window)
def remove_ui(self, window):
self.editor_menus.remove_ui(window)
#===========================================
# callbacks for textview
def _on_modified_callback(self, textview, modified):
"""Callback for textview modification"""
self.emit("modified", self._page, modified)
# make notebook node modified
if modified:
self._page.mark_modified()
self._page.notify_change(False)
def _on_visit_url(self, textview, url):
"""Callback for textview visiting a URL"""
if is_node_url(url):
host, nodeid = parse_node_url(url)
node = self._notebook.get_node_by_id(nodeid)
if node:
self.emit("visit-node", node)
else:
try:
self._app.open_webpage(url)
except KeepNoteError, e:
self.emit("error", e.msg, e)
class EditorMenus (gobject.GObject):
def __init__(self, app, editor):
gobject.GObject.__init__(self)
self._app = app
self._editor = editor
self._action_group = None
self._uis = []
self.spell_check_toggle = None
self._removed_widgets = []
#=======================================================
# spellcheck
def enable_spell_check(self, enabled):
"""Spell check"""
self._editor.get_textview().enable_spell_check(enabled)
# see if spell check became enabled
enabled = self._editor.get_textview().is_spell_check_enabled()
# update UI to match
if self.spell_check_toggle:
self.spell_check_toggle.set_active(enabled)
return enabled
def on_spell_check_toggle(self, widget):
"""Toggle spell checker"""
self.enable_spell_check(widget.get_active())
#=====================================================
# toolbar and menus
def add_ui(self, window):
self._action_group = gtk.ActionGroup("Editor")
self._uis = []
add_actions(self._action_group, self.get_actions())
window.get_uimanager().insert_action_group(
self._action_group, 0)
for s in self.get_ui():
self._uis.append(window.get_uimanager().add_ui_from_string(s))
window.get_uimanager().ensure_update()
self.setup_menu(window, window.get_uimanager())
def remove_ui(self, window):
# remove ui
for ui in reversed(self._uis):
window.get_uimanager().remove_ui(ui)
self._uis = []
window.get_uimanager().ensure_update()
# remove action group
window.get_uimanager().remove_action_group(self._action_group)
self._action_group = None
def get_actions(self):
def BothAction(name1, *args):
return [Action(name1, *args), ToggleAction(name1 + " Tool", *args)]
return (map(lambda x: Action(*x), [
# finding
("Find In Page", gtk.STOCK_FIND, _("_Find In Page..."),
"<control>F", None,
lambda w: self._editor.find_dialog.on_find(False)),
("Find Next In Page", gtk.STOCK_FIND, _("Find _Next In Page..."),
"<control>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=True)),
("Find Previous In Page", gtk.STOCK_FIND,
_("Find Pre_vious In Page..."),
"<control><shift>G", None,
lambda w: self._editor.find_dialog.on_find(False, forward=False)),
("Replace In Page", gtk.STOCK_FIND_AND_REPLACE,
_("_Replace In Page..."),
"<control>R", None,
lambda w: self._editor.find_dialog.on_find(True)),
]) +
[ToggleAction("Spell Check", None, _("_Spell Check"),
"", None,
self.on_spell_check_toggle)]
)
def get_ui(self):
ui = ["""
<ui>
<menubar name="main_menu_bar">
<menu action="Edit">
<placeholder name="Viewer">
<placeholder name="Editor">
<placeholder name="Extension"/>
</placeholder>
</placeholder>
</menu>
<menu action="Search">
<placeholder name="Viewer">
<placeholder name="Editor">
<menuitem action="Find In Page"/>
<menuitem action="Find Next In Page"/>
<menuitem action="Find Previous In Page"/>
<menuitem action="Replace In Page"/>
</placeholder>
</placeholder>
</menu>
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
<menu action="Go">
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
</menu>
<menu action="Tools">
<placeholder name="Viewer">
<menuitem action="Spell Check"/>
</placeholder>
</menu>
</menubar>
</ui>
"""]
ui.append("""
<ui>
<toolbar name="main_tool_bar">
<placeholder name="Viewer">
<placeholder name="Editor">
</placeholder>
</placeholder>
</toolbar>
</ui>
""")
return ui
def setup_menu(self, window, uimanager):
u = uimanager
# get spell check toggle
self.spell_check_toggle = \
uimanager.get_widget("/main_menu_bar/Tools/Viewer/Spell Check")
self.spell_check_toggle.set_sensitive(
self._editor.get_textview().can_spell_check())
self.spell_check_toggle.set_active(window.get_app().pref.get(
"editors", "general", "spell_check", default=True))
| brotchie/keepnote | keepnote/gui/editor_text.py | Python | gpl-2.0 | 14,615 | [
"VisIt"
] | d236a9f832ba59c97bb8781e5e5b9673511f9693fda1ba3e5896034d7586d855 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import espressomd
import numpy as np
np.random.seed(42)
class NSquare(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
self.system.cell_system.set_n_square(use_verlet_lists=False)
self.system.time_step = 1e-3
self.system.cell_system.skin = 0.15
def tearDown(self):
self.system.part.clear()
def test_load_balancing(self):
n_part = 235
n_nodes = self.system.cell_system.get_state()['n_nodes']
n_part_avg = n_part // n_nodes
# Add the particles on node 0, so that they have to be resorted
partcls = self.system.part.add(
pos=n_part * [(0, 0, 0)], type=n_part * [1])
# And now change their positions
partcls.pos = self.system.box_l * \
np.random.random((n_part, 3))
# Add an interacting particle in a corner of the box
self.system.part.add(pos=[(0.01, 0.01, 0.01)], type=[0])
if espressomd.has_features(['LENNARD_JONES']):
self.system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1.0, sigma=0.14, cutoff=0.15, shift=0.1)
ref_energy = self.system.analysis.energy()['total']
assert ref_energy > 10.
# Distribute the particles on the nodes
part_dist = self.system.cell_system.resort()
# Check that we did not lose particles
self.assertEqual(sum(part_dist), n_part + 1)
# Check that the particles are evenly distributed
for node_parts in part_dist:
self.assertAlmostEqual(node_parts, n_part_avg, delta=2)
# Check that we can still access all the particles
# This basically checks if part_node and local_particles
# are still in a valid state after the particle exchange
self.assertEqual(sum(partcls.type), n_part)
# Check that the system is still valid
if espressomd.has_features(['LENNARD_JONES']):
# energy calculation
new_energy = self.system.analysis.energy()['total']
self.assertEqual(new_energy, ref_energy)
# force calculation
self.system.integrator.run(0, recalc_forces=True)
if __name__ == "__main__":
ut.main()
| espressomd/espresso | testsuite/python/nsquare.py | Python | gpl-3.0 | 2,977 | [
"ESPResSo"
] | 9f36d9351cb14cdb18f2f4ad9b4f566f268fbcc7543cacda1f6d13f681c12389 |
from moviepy.editor import VideoFileClip
from IPython.display import HTML
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# Defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending
# on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# Filling pixels inside the polygon defined by "vertices" with
# the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
# Given two points with x and y coordinates,
# return the slope of the line between them
def calc_slope(x1, y1, x2, y2):
return (y2 - y1)/(x2 - x1)
# Given the slope of a line, and the x and y coordinates of a
# point on that line, return the y intercept for slope-intercept form
def calc_y_intercept(x, y, m):
return y - m * x
# Given the slope and y intercept of a line, as well as a y
# value on the line, return the associated x value
def calc_x_from_line(y, m, b):
return (y - b)/m
# Given a list of values, calculate the mean
def calc_list_avg(list):
return sum(list)/len(list)
def draw_lines(img, lines, color=[255, 0, 0], thickness=5):
"""
NOTE: this is the function you might want to use as a starting point once
you want to average/extrapolate the line segments you detect to map
out the full extent of the lane (going from the result shown in
raw-lines-example.mp4 to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# Initialize lists to hold x and y values of the lines
# Would like to refactor
right_x1 = []
right_x2 = []
right_y1 = []
right_y2 = []
left_x1 = []
left_x2 = []
left_y1 = []
left_y2 = []
for line in lines:
for x1, y1, x2, y2 in line:
# Calculate slope to determine what side the line is on
# Filter out approximately horizontal lines
# If negative slope, add points to left side
# If positive, add to right side
if calc_slope(x1, y1, x2, y2) < -.55:
left_x1.append(x1)
left_x2.append(x2)
left_y1.append(y1)
left_y2.append(y2)
elif calc_slope(x1, y1, x2, y2) > .55:
right_x1.append(x1)
right_x2.append(x2)
right_y1.append(y1)
right_y2.append(y2)
# Create best fit lines based on points from either side
# Would like to refactor
# Left side
l_x1 = calc_list_avg(left_x1)
l_x2 = calc_list_avg(left_x2)
l_y1 = calc_list_avg(left_y1)
l_y2 = calc_list_avg(left_y2)
l_m = calc_slope(l_x1, l_y1, l_x2, l_y2)
l_b = calc_y_intercept(l_x1, l_y1, l_m)
l_x_bottom = int(calc_x_from_line(img.shape[0], l_m, l_b))
l_x_top = int(calc_x_from_line(img.shape[0]*.593, l_m, l_b))
cv2.line(img, (l_x_bottom, img.shape[0]),
(l_x_top, int(img.shape[0]*.593)), color, thickness)
# Right side
r_x1 = calc_list_avg(right_x1)
r_x2 = calc_list_avg(right_x2)
r_y1 = calc_list_avg(right_y1)
r_y2 = calc_list_avg(right_y2)
r_m = calc_slope(r_x1, r_y1, r_x2, r_y2)
r_b = calc_y_intercept(r_x1, r_y1, r_m)
r_x_bottom = int(calc_x_from_line(img.shape[0], r_m, r_b))
r_x_top = int(calc_x_from_line(img.shape[0]*.593, r_m, r_b))
cv2.line(img, (r_x_bottom, img.shape[0]),
(r_x_top, int(img.shape[0]*.593)), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def weighted_img(img, initial_img, α=0.8, β=1., λ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + λ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, λ)
def process_image(image):
# Get the image size
imshape = image.shape
# Canny parameters
low_threshold = 50
high_threshold = 150
blur_kernel = 5
# Image mask parameters, as a function of image size
vertices = np.array([[(imshape[1]*.132, imshape[0]),
(imshape[1]*.469, imshape[0]*.593),
(imshape[1]*.531, imshape[0]*.593),
(imshape[1]*.938, imshape[0])]],
dtype=np.int32)
# Hough transform parameters
rho = 2
theta = np.pi/180
hough_threshold = 15
min_line_length = 35
max_line_gap = 20
# # # BEGIN PIPELINE # # #
# Convert the image to grayscale, and apply a gaussian blur
gs_img = grayscale(image)
blur_img = gaussian_blur(gs_img, blur_kernel)
# Perform Canny edge detection on the blurred grayscale
canny_img = canny(gs_img, low_threshold, high_threshold)
# Mask off polygonal area
# Determine dynamically in the future?
masked_img = region_of_interest(canny_img, vertices)
# Retrieve lines from Hough transform
img_lines = hough_lines(masked_img, rho, theta, hough_threshold,
min_line_length, max_line_gap)
result = weighted_img(img_lines, np.copy(image))
# # # END PIPELINE # # #
return result
# Cycle through images, process them, and save in same directory
# Directory could be defined from cmdln or config file
in_directory = "test_images"
out_directory = in_directory + "_output/"
in_directory += "/"
for image in os.listdir(in_directory):
output_img = process_image(mpimg.imread(in_directory + image))
# Reorder color channels before saving
cv2.imwrite(out_directory + image,
cv2.cvtColor(output_img, cv2.COLOR_RGB2BGR))
# Generate white line video
white_output = 'test_videos_output/solidWhiteRight.mp4'
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(white_output, audio=False)
# Generate yellow line video
white_output = 'test_videos_output/solidYellowLeft.mp4'
clip1 = VideoFileClip("test_videos/solidYellowLeft.mp4")
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(white_output, audio=False)
# Generate challenge video
white_output = 'test_videos_output/challenge.mp4'
clip1 = VideoFileClip("test_videos/challenge.mp4")
white_clip = clip1.fl_image(process_image)
white_clip.write_videofile(white_output, audio=False)
| KyleARector/SelfDrivingCar | P1-FindingLaneLines/line_detection.py | Python | gpl-3.0 | 8,714 | [
"Gaussian"
] | 6922adc40b9d70195eed159bdee1a421a6334ba65ddb5f7513e69feb8f05d3e8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.