text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
""" ResourceStatusClient
Client to interact with the ResourceStatus service and from it with the DB.
"""
# pylint: disable=unused-argument
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import prepareDict
@createClient('ResourceStatus/ResourceStatus')
class ResourceStatusClient(Client):
"""
The :class:`ResourceStatusClient` class exposes the :mod:`DIRAC.ResourceStatus`
API. All functions you need are on this client.
You can use this client on this way
>>> from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
>>> rsClient = ResourceStatusClient()
"""
def __init__(self, **kwargs):
super(ResourceStatusClient, self).__init__(**kwargs)
self.setServer('ResourceStatus/ResourceStatus')
def insert(self, tableName, record):
"""
Insert a dictionary `record` as a row in table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of record to insert in the table
:return: S_OK() || S_ERROR()
"""
return self._getRPC().insert(tableName, record)
def select(self, tableName, params=None):
"""
Select rows from the table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of the selection parameters
:return: S_OK() || S_ERROR()
"""
if params is None:
params = {}
return self._getRPC().select(tableName, params)
def delete(self, tableName, params=None):
"""
Delect rows from the table `tableName`
:param str tableName: the name of the table
:param dict record: dictionary of the deletion parameters
:Returns:
S_OK() || S_ERROR()
"""
if params is None:
params = {}
return self._getRPC().delete(tableName, params)
################################################################################
# Element status methods - enjoy !
def insertStatusElement(self, element, tableType, name, statusType, status,
elementType, reason, dateEffective, lastCheckTime,
tokenOwner, tokenExpiration=None, vO='all'):
"""
Inserts on <element><tableType> a new row with the arguments given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
"""
columnNames = ["Name", "StatusType", "Status", "ElementType", "Reason",
"DateEffective", "LastCheckTime", "TokenOwner", "TokenExpiration", "VO"]
columnValues = [name, statusType, status, elementType, reason, dateEffective,
lastCheckTime, tokenOwner, tokenExpiration, vO]
return self._getRPC().insert(element + tableType, prepareDict(columnNames, columnValues))
def selectStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, meta=None, vO='all'):
"""
Gets from <element><tableType> all rows that match the parameters given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `[, string, list]`
name of the individual of class element
**statusType** - `[, string, list]`
it has to be a valid status type for the element class
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `[, string, list]`
column to distinguish between the different elements in the same element
table.
**reason** - `[, string, list]`
decision that triggered the assigned status
**dateEffective** - `[, datetime, list]`
time-stamp from which the status & status type are effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the status & status were checked
**tokenOwner** - `[, string, list]`
token assigned to the site & status type
**tokenExpiration** - `[, datetime, list]`
time-stamp setting validity of token ownership
**meta** - `dict`
metadata for the mysql query. Currently it is being used only for column selection.
For example: meta = { 'columns' : [ 'Name' ] } will return only the 'Name' column.
:return: S_OK() || S_ERROR()
"""
columnNames = ["Name", "StatusType", "Status", "ElementType", "Reason",
"DateEffective", "LastCheckTime", "TokenOwner", "TokenExpiration", "Meta", "VO"]
columnValues = [name, statusType, status, elementType, reason, dateEffective,
lastCheckTime, tokenOwner, tokenExpiration, meta, vO]
return self._getRPC().select(element + tableType, prepareDict(columnNames, columnValues))
def deleteStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, meta=None, vO='all'):
"""
Deletes from <element><tableType> all rows that match the parameters given.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `[, string, list]`
name of the individual of class element
**statusType** - `[, string, list]`
it has to be a valid status type for the element class
**status** - `[, string, list]`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `[, string, list]`
column to distinguish between the different elements in the same element
table.
**reason** - `[, string, list]`
decision that triggered the assigned status
**dateEffective** - `[, datetime, list]`
time-stamp from which the status & status type are effective
**lastCheckTime** - `[, datetime, list]`
time-stamp setting last time the status & status were checked
**tokenOwner** - `[, string, list]`
token assigned to the site & status type
**tokenExpiration** - `[, datetime, list]`
time-stamp setting validity of token ownership
**meta** - `dict`
metadata for the mysql query
:return: S_OK() || S_ERROR()
"""
columnNames = ["Name", "StatusType", "Status", "ElementType", "Reason",
"DateEffective", "LastCheckTime", "TokenOwner", "TokenExpiration", "Meta", "VO"]
columnValues = [name, statusType, status, elementType, reason, dateEffective,
lastCheckTime, tokenOwner, tokenExpiration, meta, vO]
return self._getRPC().delete(element + tableType, prepareDict(columnNames, columnValues))
def addOrModifyStatusElement(self, element, tableType, name=None,
statusType=None, status=None,
elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, vO='all'):
"""
Adds or updates-if-duplicated from <element><tableType> and also adds a log
if flag is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
"""
columnNames = ["Name", "StatusType", "Status", "ElementType", "Reason",
"DateEffective", "LastCheckTime", "TokenOwner", "TokenExpiration", "VO"]
columnValues = [name, statusType, status, elementType, reason, dateEffective,
lastCheckTime, tokenOwner, tokenExpiration, vO]
return self._getRPC().addOrModify(element + tableType, prepareDict(columnNames, columnValues))
def modifyStatusElement(self, element, tableType, name=None, statusType=None,
status=None, elementType=None, reason=None,
dateEffective=None, lastCheckTime=None, tokenOwner=None,
tokenExpiration=None, vO='all'):
"""
Updates from <element><tableType> and also adds a log if flag is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
"""
columnNames = ["Name", "StatusType", "Status", "ElementType", "Reason",
"DateEffective", "LastCheckTime", "TokenOwner", "TokenExpiration", "VO"]
columnValues = [name, statusType, status, elementType, reason, dateEffective,
lastCheckTime, tokenOwner, tokenExpiration, vO]
return self._getRPC().addOrModify(element + tableType, prepareDict(columnNames, columnValues))
def addIfNotThereStatusElement(self, element, tableType, name=None,
statusType=None, status=None,
elementType=None, reason=None,
dateEffective=None, lastCheckTime=None,
tokenOwner=None, tokenExpiration=None, vO='all'):
"""
Adds if-not-duplicated from <element><tableType> and also adds a log if flag
is active.
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElement ), any of the defaults: `Site` \
| `Resource` | `Node`
**tableType** - `string`
it has to be a valid tableType [ 'Status', 'Log', 'History' ]
**name** - `string`
name of the individual of class element
**statusType** - `string`
it has to be a valid status type for the element class
**status** - `string`
it has to be a valid status, any of the defaults: `Active` | `Degraded` | \
`Probing` | `Banned`
**elementType** - `string`
column to distinguish between the different elements in the same element
table.
**reason** - `string`
decision that triggered the assigned status
**dateEffective** - `datetime`
time-stamp from which the status & status type are effective
**lastCheckTime** - `datetime`
time-stamp setting last time the status & status were checked
**tokenOwner** - `string`
token assigned to the site & status type
**tokenExpiration** - `datetime`
time-stamp setting validity of token ownership
:return: S_OK() || S_ERROR()
"""
columnNames = ["Name", "StatusType", "Status", "ElementType", "Reason",
"DateEffective", "LastCheckTime", "TokenOwner", "TokenExpiration", "VO"]
columnValues = [name, statusType, status, elementType, reason, dateEffective,
lastCheckTime, tokenOwner, tokenExpiration, vO]
return self._getRPC().addIfNotThere(element + tableType, prepareDict(columnNames, columnValues))
##############################################################################
# Protected methods - Use carefully !!
def notify(self, request, params):
""" Send notification for a given request with its params to the diracAdmin
"""
address = Operations().getValue('ResourceStatus/Notification/DebugGroup/Users')
msg = 'Matching parameters: ' + str(params)
sbj = '[NOTIFICATION] DIRAC ResourceStatusDB: ' + request + ' entry'
NotificationClient().sendMail(address, sbj, msg, address)
def _extermineStatusElement(self, element, name, keepLogs=True):
"""
Deletes from <element>Status,
<element>History
<element>Log
all rows with `elementName`. It removes all the entries, logs, etc..
Use with common sense !
:Parameters:
**element** - `string`
it has to be a valid element ( ValidElements ), any of the defaults: \
`Site` | `Resource` | `Node`
**name** - `[, string, list]`
name of the individual of class element
**keepLogs** - `bool`
if active, logs are kept in the database
:return: S_OK() || S_ERROR()
"""
return self.__extermineStatusElement(element, name, keepLogs)
def __extermineStatusElement(self, element, name, keepLogs):
"""
This method iterates over the three ( or four ) table types - depending
on the value of keepLogs - deleting all matches of `name`.
"""
tableTypes = ['Status', 'History']
if keepLogs is False:
tableTypes.append('Log')
for table in tableTypes:
deleteQuery = self.deleteStatusElement(element, table, name=name)
if not deleteQuery['OK']:
return deleteQuery
return S_OK()
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
yujikato/DIRAC
|
src/DIRAC/ResourceStatusSystem/Client/ResourceStatusClient.py
|
Python
|
gpl-3.0
| 16,679
|
[
"DIRAC"
] |
ee249b5a306569effbf0a73c0eddb064a3d6975a7f3c00d55dc3da96839d4849
|
# Copyright 2010-2017, The University of Melbourne
# Copyright 2010-2017, Brian May
#
# This file is part of Karaage.
#
# Karaage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Karaage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Karaage If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth import authenticate
from django.contrib.auth.models import BaseUserManager
class PersonManager(BaseUserManager):
def authenticate(self, username, password):
return authenticate(username=username, password=password)
def get_queryset(self):
return super(PersonManager, self).get_queryset().select_related()
def _create_user(
self, username, email, short_name, full_name,
institute, password, is_admin, **extra_fields):
"""Creates a new active person. """
# Create Person
person = self.model(
username=username, email=email,
short_name=short_name, full_name=full_name,
is_admin=is_admin,
institute=institute,
**extra_fields
)
person.set_password(password)
person.save()
return person
def create_user(
self, username, email, short_name, full_name,
institute, password=None, **extra_fields):
""" Creates a new ordinary person. """
return self._create_user(
username=username, email=email,
short_name=short_name, full_name=full_name,
institute=institute, password=password,
is_admin=False, **extra_fields)
def create_superuser(
self, username, email, short_name, full_name,
institute, password, **extra_fields):
""" Creates a new person with super powers. """
return self._create_user(
username=username, email=email,
institute=institute, password=password,
short_name=short_name, full_name=full_name,
is_admin=True, **extra_fields)
class ActivePersonManager(PersonManager):
def get_queryset(self):
return super(ActivePersonManager, self) \
.get_queryset() \
.select_related() \
.filter(is_active=True, is_systemuser=False)
class DeletedPersonManager(PersonManager):
def get_queryset(self):
return super(DeletedPersonManager, self) \
.get_queryset() \
.filter(is_active=False)
class LeaderManager(PersonManager):
def get_queryset(self):
leader_ids = []
query = super(LeaderManager, self).get_queryset()
query = query.filter(is_active=True)
for l in query:
if l.is_leader():
leader_ids.append(l.id)
return self.filter(id__in=leader_ids)
|
brianmay/karaage
|
karaage/people/managers.py
|
Python
|
gpl-3.0
| 3,220
|
[
"Brian"
] |
bebb7d047fd3e45ad6e3102b2a17bd7f020ed91edab0a9765d88a3cdebe0cbca
|
#!/usr/bin/env python3
import argparse, pysam, gzip
NT_PAIRS = {'G':'C', 'T':'A', 'C':'G', 'A':'T', 'N': 'N',
'g':'c', 't':'a', 'c':'g', 'a':'t', 'n': 'n'}
def reverse_complement(seq):
seq_j = ''.join( [NT_PAIRS[base_i] for base_i in seq[::-1]] )
return seq_j
def text_open_write(filename):
if str(filename).endswith('.gz'):
return gzip.open(filename, 'wt')
else:
return open(filename, 'w')
def bam2fq(bam_file, fastq1, fastq2):
with pysam.AlignmentFile(bam_file) as bam, text_open_write(fastq1) as fq1, text_open_write(fastq2) as fq2:
reads1 = {}
reads2 = {}
reads = bam.fetch()
for read_i in reads:
if not read_i.is_secondary:
seq_i = reverse_complement(read_i.query_sequence) if read_i.is_reverse else read_i.query_sequence
qual_i = read_i.qual[::-1] if read_i.is_reverse else read_i.qual
if read_i.is_read1:
if read_i.query_name in reads2:
fq1.write( '@{}/1\n'.format(read_i.query_name) )
fq1.write( seq_i + '\n' )
fq1.write( '+\n' )
fq1.write( qual_i + '\n')
read_2 = reads2.pop(read_i.query_name)
fq2.write( '@{}/2\n'.format( read_2['qname'] ) )
fq2.write( read_2['seq'] + '\n' )
fq2.write( '+\n' )
fq2.write( read_2['bq'] + '\n')
else:
reads1[read_i.query_name] = {}
reads1[read_i.query_name]['qname'] = read_i.query_name
reads1[read_i.query_name]['seq'] = seq_i
reads1[read_i.query_name]['bq'] = qual_i
elif read_i.is_read2:
if read_i.query_name in reads1:
read_1 = reads1.pop(read_i.query_name)
fq1.write( '@{}/1\n'.format( read_1['qname'] ) )
fq1.write( read_1['seq'] + '\n' )
fq1.write( '+\n' )
fq1.write( read_1['bq'] + '\n')
fq2.write( '@{}/2\n'.format(read_i.query_name) )
fq2.write( seq_i + '\n' )
fq2.write( '+\n' )
fq2.write( qual_i + '\n')
else:
reads2[read_i.query_name] = {}
reads2[read_i.query_name]['qname'] = read_i.query_name
reads2[read_i.query_name]['seq'] = seq_i
reads2[read_i.query_name]['bq'] = qual_i
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert paired-end BAM to FASTQ1 and 2", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-bam', '--bam', type=str, help="bam file in")
parser.add_argument('-fq1', '--fastq1', type=str, help="fastq1 out")
parser.add_argument('-fq2', '--fastq2', type=str, help="fastq2 out")
args = parser.parse_args()
bam2fq(args.bam, args.fastq1, args.fastq2)
|
bioinform/somaticseq
|
somaticseq/utilities/paired_end_bam2fastq.py
|
Python
|
bsd-2-clause
| 3,416
|
[
"pysam"
] |
74c32c0e2f2d9c0d2eb548f0e0e37ae982b82bb223af2bdb5b4d6d821cea7579
|
import logging, re, json, commands, os, copy
from datetime import datetime, timedelta
import time
import json
import copy
import itertools, random
import string as strm
import math
from urllib import urlencode
from urlparse import urlparse, urlunparse, parse_qs
from django.utils.decorators import available_attrs
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, render, redirect
from django.template import RequestContext, loader
from django.db.models import Count, Sum
from django import forms
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.vary import vary_on_headers
from django.utils.cache import patch_vary_headers
from django.views.decorators.cache import never_cache
import django.utils.cache as ucache
from functools import wraps
from django.utils import timezone
from django.utils.cache import patch_cache_control, patch_response_headers
from django.db.models import Q
from django.core.cache import cache
from django.utils import encoding
from django.conf import settings as djangosettings
from django.db import connection, transaction
from django.db import connections
from core.common.utils import getPrefix, getContextVariables, QuerySetChain
from core.settings import STATIC_URL, FILTER_UI_ENV, defaultDatetimeFormat
from core.pandajob.models import PandaJob, Jobsactive4, Jobsdefined4, Jobswaiting4, Jobsarchived4, Jobsarchived, \
GetRWWithPrioJedi3DAYS, RemainedEventsPerCloud3dayswind, JobsWorldViewTaskType, Getfailedjobshspecarch, Getfailedjobshspec, JobsWorldView
from schedresource.models import Schedconfig
from core.common.models import Filestable4
from core.common.models import Datasets
from core.common.models import Sitedata
from core.common.models import FilestableArch
from core.common.models import Users
from core.common.models import Jobparamstable
from core.common.models import Metatable
from core.common.models import Logstable
from core.common.models import Jobsdebug
from core.common.models import Cloudconfig
from core.common.models import Incidents
from core.common.models import Pandalog
from core.common.models import JediJobRetryHistory
from core.common.models import JediTasks
from core.common.models import JediTasksOrdered
from core.common.models import GetEventsForTask
from core.common.models import JediTaskparams
from core.common.models import JediEvents
from core.common.models import JediDatasets
from core.common.models import JediDatasetContents
from core.common.models import JediWorkQueue
from core.common.models import RequestStat, BPUser, Visits
from core.settings.config import ENV
from core.common.models import RunningMCProductionTasks
from core.common.models import RunningDPDProductionTasks, RunningProdTasksModel
from time import gmtime, strftime
from settings.local import dbaccess
from settings.local import PRODSYS
import string as strm
from django.views.decorators.cache import cache_page
import TaskProgressPlot
import ErrorCodes
import GlobalShares
import hashlib
from threading import Thread,Lock
import decimal
import base64
import urllib3
from django.views.decorators.cache import never_cache
import chainsql
errorFields = []
errorCodes = {}
errorStages = {}
from django.template.defaulttags import register
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
try:
hostname = commands.getoutput('hostname')
if hostname.find('.') > 0: hostname = hostname[:hostname.find('.')]
except:
hostname = ''
callCount = 0
homeCloud = {}
objectStores = {}
pandaSites = {}
cloudList = ['CA', 'CERN', 'DE', 'ES', 'FR', 'IT', 'ND', 'NL', 'RU', 'TW', 'UK', 'US']
statelist = ['defined', 'waiting', 'pending', 'assigned', 'throttled', \
'activated', 'sent', 'starting', 'running', 'holding', \
'transferring', 'finished', 'failed', 'cancelled', 'merging', 'closed']
sitestatelist = ['defined', 'waiting', 'assigned', 'throttled', 'activated', 'sent', 'starting', 'running', 'holding',
'merging', 'transferring', 'finished', 'failed', 'cancelled']
eventservicestatelist = ['ready', 'sent', 'running', 'finished', 'cancelled', 'discarded', 'done', 'failed', 'fatal','merged']
taskstatelist = ['registered', 'defined', 'assigning', 'ready', 'pending', 'scouting', 'scouted', 'running', 'prepared',
'done', 'failed', 'finished', 'aborting', 'aborted', 'finishing', 'topreprocess', 'preprocessing',
'tobroken', 'broken', 'toretry', 'toincexec', 'rerefine']
taskstatelist_short = ['reg', 'def', 'assgn', 'rdy', 'pend', 'scout', 'sctd', 'run', 'prep', 'done', 'fail', 'finish',
'abrtg', 'abrtd', 'finishg', 'toprep', 'preprc', 'tobrok', 'broken', 'retry', 'incexe', 'refine']
taskstatedict = []
for i in range(0, len(taskstatelist)):
tsdict = {'state': taskstatelist[i], 'short': taskstatelist_short[i]}
taskstatedict.append(tsdict)
errorcodelist = [
{'name': 'brokerage', 'error': 'brokerageerrorcode', 'diag': 'brokerageerrordiag'},
{'name': 'ddm', 'error': 'ddmerrorcode', 'diag': 'ddmerrordiag'},
{'name': 'exe', 'error': 'exeerrorcode', 'diag': 'exeerrordiag'},
{'name': 'jobdispatcher', 'error': 'jobdispatchererrorcode', 'diag': 'jobdispatchererrordiag'},
{'name': 'pilot', 'error': 'piloterrorcode', 'diag': 'piloterrordiag'},
{'name': 'sup', 'error': 'superrorcode', 'diag': 'superrordiag'},
{'name': 'taskbuffer', 'error': 'taskbuffererrorcode', 'diag': 'taskbuffererrordiag'},
{'name': 'transformation', 'error': 'transexitcode', 'diag': None},
]
_logger = logging.getLogger('bigpandamon')
notcachedRemoteAddress = ['188.184.185.129']
LAST_N_HOURS_MAX = 0
# JOB_LIMIT = 0
# TFIRST = timezone.now()
# TLAST = timezone.now() - timedelta(hours=2400)
PLOW = 1000000
PHIGH = -1000000
standard_fields = ['processingtype', 'computingsite', 'jobstatus', 'prodsourcelabel', 'produsername', 'jeditaskid',
'workinggroup', 'transformation', 'cloud', 'homepackage', 'inputfileproject', 'inputfiletype',
'attemptnr', 'specialhandling', 'priorityrange', 'reqid', 'minramcount', 'eventservice',
'jobsubstatus', 'nucleus']
standard_sitefields = ['region', 'gocname', 'nickname', 'status', 'tier', 'comment_field', 'cloud', 'allowdirectaccess',
'allowfax', 'copytool', 'faxredirector', 'retry', 'timefloor']
standard_taskfields = ['workqueue_id', 'tasktype', 'superstatus', 'status', 'corecount', 'taskpriority', 'username', 'transuses',
'transpath', 'workinggroup', 'processingtype', 'cloud', 'campaign', 'project', 'stream', 'tag',
'reqid', 'ramcount', 'nucleus', 'eventservice']
VOLIST = ['atlas', 'bigpanda', 'htcondor', 'core', 'aipanda']
VONAME = {'atlas': 'ATLAS', 'bigpanda': 'BigPanDA', 'htcondor': 'HTCondor', 'core': 'LSST', '': ''}
VOMODE = ' '
def escapeInput(strToEscape):
charsToEscape = '$%^&()[]{};<>?\`~+%\'\"'
charsToReplace = '_' * len(charsToEscape)
tbl = strm.maketrans(charsToEscape, charsToReplace)
strToEscape = encoding.smart_str(strToEscape, encoding='ascii', errors='ignore')
strToEscape = strToEscape.translate(tbl)
return strToEscape
def setupSiteInfo(request):
requestParams = {}
if not 'requestParams' in request.session:
request.session['requestParams'] = requestParams
global homeCloud, objectStores, pandaSites, callCount
callCount += 1
if len(homeCloud) > 0 and callCount % 100 != 1 and 'refresh' not in request.session['requestParams']: return
sflist = ('siteid', 'site', 'status', 'cloud', 'tier', 'comment_field', 'objectstore', 'catchall', 'corepower')
sites = Schedconfig.objects.filter().exclude(cloud='CMS').values(*sflist)
for site in sites:
pandaSites[site['siteid']] = {}
for f in ('siteid', 'status', 'tier', 'site', 'comment_field', 'cloud', 'corepower'):
pandaSites[site['siteid']][f] = site[f]
homeCloud[site['siteid']] = site['cloud']
if (site['catchall'] != None) and (
site['catchall'].find('log_to_objectstore') >= 0 or site['objectstore'] != ''):
# print 'object store site', site['siteid'], site['catchall'], site['objectstore']
try:
fpath = getFilePathForObjectStore(site['objectstore'], filetype="logs")
#### dirty hack
fpath = fpath.replace('root://atlas-objectstore.cern.ch/atlas/logs',
'https://atlas-objectstore.cern.ch:1094/atlas/logs')
if fpath != "" and fpath.startswith('http'): objectStores[site['siteid']] = fpath
except:
pass
def initRequest(request):
global VOMODE, ENV, hostname
VOMODE = ''
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
request.session['IS_TESTER'] = False
if VOMODE == 'atlas':
if "MELLON_SAML_RESPONSE" in request.META and base64.b64decode(request.META['MELLON_SAML_RESPONSE']):
if "ADFS_FULLNAME" in request.META:
request.session['ADFS_FULLNAME'] = request.META['ADFS_FULLNAME']
if "ADFS_EMAIL" in request.META:
request.session['ADFS_EMAIL'] = request.META['ADFS_EMAIL']
if "ADFS_FIRSTNAME" in request.META:
request.session['ADFS_FIRSTNAME'] = request.META['ADFS_FIRSTNAME']
if "ADFS_LASTNAME" in request.META:
request.session['ADFS_LASTNAME'] = request.META['ADFS_LASTNAME']
if "ADFS_LOGIN" in request.META:
request.session['ADFS_LOGIN'] = request.META['ADFS_LOGIN']
user = None
try:
user = BPUser.objects.get(username=request.session['ADFS_LOGIN'])
request.session['IS_TESTER'] = user.is_tester
except BPUser.DoesNotExist:
user = BPUser.objects.create_user(username=request.session['ADFS_LOGIN'], email=request.session['ADFS_EMAIL'], first_name=request.session['ADFS_FIRSTNAME'], last_name=request.session['ADFS_LASTNAME'])
user.set_unusable_password()
user.save()
viewParams = {}
# if not 'viewParams' in request.session:
request.session['viewParams'] = viewParams
url = request.get_full_path()
u = urlparse(url)
query = parse_qs(u.query)
query.pop('timestamp', None)
u = u._replace(query=urlencode(query, True))
request.session['notimestampurl'] = urlunparse(u) + ('&' if len(query) > 0 else '?')
#if 'USER' in os.environ and os.environ['USER'] != 'apache':
# request.session['debug'] = True
if 'debug' in request.GET and request.GET['debug'] == 'insider':
request.session['debug'] = True
djangosettings.DEBUG = True
else:
request.session['debug'] = False
djangosettings.DEBUG = False
if len(hostname) > 0: request.session['hostname'] = hostname
##self monitor
initSelfMonitor(request)
## Set default page lifetime in the http header, for the use of the front end cache
request.session['max_age_minutes'] = 10
## Is it an https connection with a legit cert presented by the user?
if 'SSL_CLIENT_S_DN' in request.META or 'HTTP_X_SSL_CLIENT_S_DN' in request.META:
if 'SSL_CLIENT_S_DN' in request.META:
request.session['userdn'] = request.META['SSL_CLIENT_S_DN']
else:
request.session['userdn'] = request.META['HTTP_X_SSL_CLIENT_S_DN']
userrec = Users.objects.filter(dn__startswith=request.session['userdn']).values()
if len(userrec) > 0:
request.session['username'] = userrec[0]['name']
ENV['MON_VO'] = ''
request.session['viewParams']['MON_VO'] = ''
if 'HTTP_HOST' in request.META:
for vo in VOLIST:
if request.META['HTTP_HOST'].startswith(vo):
VOMODE = vo
else:
VOMODE = 'atlas'
## If DB is Oracle, set vomode to atlas
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
ENV['MON_VO'] = VONAME[VOMODE]
request.session['viewParams']['MON_VO'] = ENV['MON_VO']
global errorFields, errorCodes, errorStages
requestParams = {}
request.session['requestParams'] = requestParams
if request.method == 'POST':
for p in request.POST:
if p in ('csrfmiddlewaretoken',): continue
pval = request.POST[p]
pval = pval.replace('+', ' ')
request.session['requestParams'][p.lower()] = pval
else:
for p in request.GET:
pval = request.GET[p]
pval = pval.replace('+', ' ')
if p.lower() != 'batchid': # Special requester exception
pval = pval.replace('#', '')
## is it int, if it's supposed to be?
if p.lower() in (
'days', 'hours', 'limit', 'display_limit', 'taskid', 'jeditaskid', 'jobsetid', 'corecount', 'taskpriority',
'priority', 'attemptnr', 'statenotupdated', 'tasknotupdated',):
try:
i = int(request.GET[p])
except:
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
"errormessage": "Illegal value '%s' for %s" % (pval, p),
}
return False, render_to_response('errorPage.html', data, RequestContext(request))
request.session['requestParams'][p.lower()] = pval
setupSiteInfo(request)
if len(errorFields) == 0:
codes = ErrorCodes.ErrorCodes()
errorFields, errorCodes, errorStages = codes.getErrorCodes()
return True, None
def preprocessWildCardString(strToProcess, fieldToLookAt):
if (len(strToProcess) == 0):
return '(1=1)'
cardParametersRaw = strToProcess.split('*')
cardRealParameters = [s for s in cardParametersRaw if len(s) >= 1]
countRealParameters = len(cardRealParameters)
countParameters = len(cardParametersRaw)
if (countParameters == 0):
return '(1=1)'
currentRealParCount = 0
currentParCount = 0
extraQueryString = '('
for parameter in cardParametersRaw:
leadStar = False
trailStar = False
if len(parameter) > 0:
if (currentParCount - 1 >= 0):
# if len(cardParametersRaw[currentParCount-1]) == 0:
leadStar = True
if (currentParCount + 1 < countParameters):
# if len(cardParametersRaw[currentParCount+1]) == 0:
trailStar = True
if fieldToLookAt.lower() == 'PRODUSERID':
leadStar = True
trailStar = True
if (leadStar and trailStar):
extraQueryString += '( ' + fieldToLookAt + ' LIKE (\'%%' + parameter + '%%\'))'
elif (not leadStar and not trailStar):
extraQueryString += '( ' + fieldToLookAt + ' LIKE (\'' + parameter + '\'))'
elif (leadStar and not trailStar):
extraQueryString += '( ' + fieldToLookAt + ' LIKE (\'%%' + parameter + '\'))'
elif (not leadStar and trailStar):
extraQueryString += '( ' + fieldToLookAt + ' LIKE (\'' + parameter + '%%\'))'
currentRealParCount += 1
if currentRealParCount < countRealParameters:
extraQueryString += ' AND '
currentParCount += 1
extraQueryString += ')'
return extraQueryString
def setupView(request, opmode='', hours=0, limit=-99, querytype='job', wildCardExt=False):
viewParams = {}
if not 'viewParams' in request.session:
request.session['viewParams'] = viewParams
LAST_N_HOURS_MAX = 0
excludeJobNameFromWildCard = True
if 'jobname' in request.session['requestParams']:
jobrequest = request.session['requestParams']['jobname']
if (('*' in jobrequest) or ('|' in jobrequest)):
excludeJobNameFromWildCard = False
wildSearchFields = []
if querytype == 'job':
for field in Jobsactive4._meta.get_fields():
if (field.get_internal_type() == 'CharField'):
if not (field.name == 'jobstatus' or field.name == 'modificationhost' or field.name == 'batchid' or (
excludeJobNameFromWildCard and field.name == 'jobname')):
wildSearchFields.append(field.name)
if querytype == 'task':
for field in JediTasks._meta.get_fields():
if (field.get_internal_type() == 'CharField'):
if not (field.name == 'status' or field.name == 'modificationhost'):
wildSearchFields.append(field.name)
deepquery = False
fields = standard_fields
if 'limit' in request.session['requestParams']:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
elif limit != -99 and limit > 0:
request.session['JOB_LIMIT'] = limit
elif VOMODE == 'atlas':
request.session['JOB_LIMIT'] = 10000
else:
request.session['JOB_LIMIT'] = 10000
if VOMODE == 'atlas':
LAST_N_HOURS_MAX = 12
else:
LAST_N_HOURS_MAX = 7 * 24
if VOMODE == 'atlas':
if 'cloud' not in fields: fields.append('cloud')
if 'atlasrelease' not in fields: fields.append('atlasrelease')
if 'produsername' in request.session['requestParams'] or 'jeditaskid' in request.session[
'requestParams'] or 'user' in request.session['requestParams']:
if 'jobsetid' not in fields: fields.append('jobsetid')
if ('hours' not in request.session['requestParams']) and (
'days' not in request.session['requestParams']) and (
'jobsetid' in request.session['requestParams'] or 'taskid' in request.session[
'requestParams'] or 'jeditaskid' in request.session['requestParams']):
## Cases where deep query is safe. Unless the time depth is specified in URL.
if 'hours' not in request.session['requestParams'] and 'days' not in request.session['requestParams']:
deepquery = True
else:
if 'jobsetid' in fields: fields.remove('jobsetid')
else:
fields.append('vo')
if hours > 0:
## Call param overrides default hours, but not a param on the URL
LAST_N_HOURS_MAX = hours
## For site-specific queries, allow longer time window
if 'computingsite' in request.session['requestParams']:
LAST_N_HOURS_MAX = 12
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
LAST_N_HOURS_MAX = 3 * 24
## hours specified in the URL takes priority over the above
if 'hours' in request.session['requestParams']:
LAST_N_HOURS_MAX = int(request.session['requestParams']['hours'])
if 'days' in request.session['requestParams']:
LAST_N_HOURS_MAX = int(request.session['requestParams']['days']) * 24
## Exempt single-job, single-task etc queries from time constraint
if 'hours' not in request.session['requestParams'] and 'days' not in request.session['requestParams']:
if 'jeditaskid' in request.session['requestParams']: deepquery = True
if 'taskid' in request.session['requestParams']: deepquery = True
if 'pandaid' in request.session['requestParams']: deepquery = True
if 'jobname' in request.session['requestParams']: deepquery = True
if 'batchid' in request.session['requestParams']: deepquery = True
if deepquery:
opmode = 'notime'
hours = LAST_N_HOURS_MAX = 24 * 180
request.session['JOB_LIMIT'] = 999999
if opmode != 'notime':
if LAST_N_HOURS_MAX <= 72 and not ('date_from' in request.session['requestParams'] or 'date_to' in request.session['requestParams']
or 'earlierthan' in request.session['requestParams'] or 'earlierthandays' in request.session['requestParams']):
request.session['viewParams']['selection'] = ", last %s hours" % LAST_N_HOURS_MAX
else:
request.session['viewParams']['selection'] = ", last %d days" % (float(LAST_N_HOURS_MAX) / 24.)
# if JOB_LIMIT < 999999 and JOB_LIMIT > 0:
# viewParams['selection'] += ", <font style='color:#FF8040; size=-1'>Warning: limit %s per job table</font>" % JOB_LIMIT
request.session['viewParams']['selection'] += ". <b>Params:</b> "
# if 'days' not in requestParams:
# viewParams['selection'] += "hours=%s" % LAST_N_HOURS_MAX
# else:
# viewParams['selection'] += "days=%s" % int(LAST_N_HOURS_MAX/24)
if request.session['JOB_LIMIT'] < 100000 and request.session['JOB_LIMIT'] > 0:
request.session['viewParams']['selection'] += " <b>limit=</b>%s" % request.session['JOB_LIMIT']
else:
request.session['viewParams']['selection'] = ""
for param in request.session['requestParams']:
if request.session['requestParams'][param] == 'None': continue
if request.session['requestParams'][param] == '': continue
if param == 'display_limit': continue
if param == 'sortby': continue
if param == 'limit' and request.session['JOB_LIMIT'] > 0: continue
request.session['viewParams']['selection'] += " <b>%s=</b>%s " % (
param, request.session['requestParams'][param])
startdate = None
if 'date_from' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_from'], '%Y-%m-%d')
startdate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if not startdate:
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
# startdate = startdate.strftime(defaultDatetimeFormat)
enddate = None
if 'date_to' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_to'], '%Y-%m-%d')
enddate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if 'earlierthan' in request.session['requestParams']:
enddate = timezone.now() - timedelta(hours=int(request.session['requestParams']['earlierthan']))
# enddate = enddate.strftime(defaultDatetimeFormat)
if 'earlierthandays' in request.session['requestParams']:
enddate = timezone.now() - timedelta(hours=int(request.session['requestParams']['earlierthandays']) * 24)
# enddate = enddate.strftime(defaultDatetimeFormat)
if enddate == None:
enddate = timezone.now() # .strftime(defaultDatetimeFormat)
request.session['noenddate'] = True
else:
request.session['noenddate'] = False
query = {
'modificationtime__range': [startdate.strftime(defaultDatetimeFormat), enddate.strftime(defaultDatetimeFormat)]}
request.session['TFIRST'] = startdate # startdate[:18]
request.session['TLAST'] = enddate # enddate[:18]
### Add any extensions to the query determined from the URL
for vo in ['atlas', 'core']:
if request.META['HTTP_HOST'].startswith(vo):
query['vo'] = vo
for param in request.session['requestParams']:
if param in ('hours', 'days'): continue
if param == 'cloud' and request.session['requestParams'][param] == 'All':
continue
elif param == 'priorityrange':
mat = re.match('([0-9]+)\:([0-9]+)', request.session['requestParams'][param])
if mat:
plo = int(mat.group(1))
phi = int(mat.group(2))
query['currentpriority__gte'] = plo
query['currentpriority__lte'] = phi
elif param == 'jobsetrange':
mat = re.match('([0-9]+)\:([0-9]+)', request.session['requestParams'][param])
if mat:
plo = int(mat.group(1))
phi = int(mat.group(2))
query['jobsetid__gte'] = plo
query['jobsetid__lte'] = phi
elif param == 'user' or param == 'username':
if querytype == 'job':
query['produsername__icontains'] = request.session['requestParams'][param].strip()
elif param in ('project',) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__istartswith'] = val
elif param in ('outputfiletype',) and querytype != 'task':
val = request.session['requestParams'][param]
query['destinationdblock__icontains'] = val
elif param in ('stream',) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__icontains'] = val
elif param in ('tag',) and querytype == 'task':
val = request.session['requestParams'][param]
query['taskname__endswith'] = val
elif param == 'reqid_from':
val = int(request.session['requestParams'][param])
query['reqid__gte'] = val
elif param == 'reqid_to':
val = int(request.session['requestParams'][param])
query['reqid__lte'] = val
elif param == 'processingtype':
val = request.session['requestParams'][param]
query['processingtype'] = val
elif param == 'mismatchedcloudsite' and request.session['requestParams'][param] == 'true':
listOfCloudSitesMismatched = cache.get('mismatched-cloud-sites-list')
if (listOfCloudSitesMismatched is None) or (len(listOfCloudSitesMismatched) == 0):
request.session['viewParams'][
'selection'] += " <b>The query can not be processed because list of mismatches is not found. Please visit %s/dash/production/?cloudview=region page and then try again</b>" % \
request.session['hostname']
else:
extraQueryString = '('
for count, cloudSitePair in enumerate(listOfCloudSitesMismatched):
extraQueryString += ' ( (cloud=\'%s\') and (computingsite=\'%s\') ) ' % (
cloudSitePair[1], cloudSitePair[0])
if (count < (len(listOfCloudSitesMismatched) - 1)):
extraQueryString += ' OR '
extraQueryString += ')'
if querytype == 'task':
for field in JediTasks._meta.get_fields():
# for param in requestParams:
if param == field.name:
if param == 'ramcount':
if 'GB' in request.session['requestParams'][param]:
leftlimit, rightlimit = (request.session['requestParams'][param]).split('-')
rightlimit = rightlimit[:-2]
query['%s__range' % param] = (int(leftlimit) * 1000, int(rightlimit) * 1000 - 1)
else:
query[param] = int(request.session['requestParams'][param])
elif param == 'transpath':
query['%s__endswith' % param] = request.session['requestParams'][param]
elif param == 'tasktype':
ttype = request.session['requestParams'][param]
if ttype.startswith('anal'):
ttype = 'anal'
elif ttype.startswith('prod'):
ttype = 'prod'
query[param] = ttype
elif param == 'superstatus':
val = escapeInput(request.session['requestParams'][param])
values = val.split('|')
query['superstatus__in'] = values
elif param == 'reqid':
val = escapeInput(request.session['requestParams'][param])
if val.find('|') >= 0:
values = val.split('|')
values = [int(val) for val in values]
query['reqid__in'] = values
else:
query['reqid'] = int(val)
elif param == 'eventservice':
if request.session['requestParams'][param] == 'eventservice' or \
request.session['requestParams'][param] == '1':
query['eventservice'] = 1
else:
query['eventservice'] = 0
else:
if (param not in wildSearchFields):
query[param] = request.session['requestParams'][param]
else:
for field in Jobsactive4._meta.get_fields():
if param == field.name:
if param == 'minramcount':
if 'GB' in request.session['requestParams'][param]:
leftlimit, rightlimit = (request.session['requestParams'][param]).split('-')
rightlimit = rightlimit[:-2]
query['%s__range' % param] = (int(leftlimit) * 1000, int(rightlimit) * 1000 - 1)
else:
query[param] = int(request.session['requestParams'][param])
elif param == 'specialhandling':
query['specialhandling__contains'] = request.session['requestParams'][param]
elif param == 'reqid':
val = escapeInput(request.session['requestParams'][param])
if val.find('|') >= 0:
values = val.split('|')
values = [int(val) for val in values]
query['reqid__in'] = values
else:
query['reqid'] = int(val)
elif param == 'transformation' or param == 'transpath':
query['%s__endswith' % param] = request.session['requestParams'][param]
elif param == 'modificationhost' and request.session['requestParams'][param].find('@') < 0:
query['%s__contains' % param] = request.session['requestParams'][param]
elif param == 'jeditaskid':
if request.session['requestParams']['jeditaskid'] != 'None':
if int(request.session['requestParams']['jeditaskid']) < 4000000:
query['taskid'] = request.session['requestParams'][param]
else:
query[param] = request.session['requestParams'][param]
elif param == 'taskid':
if request.session['requestParams']['taskid'] != 'None': query[param] = \
request.session['requestParams'][param]
elif param == 'pandaid':
try:
pid = request.session['requestParams']['pandaid']
if pid.find(',') >= 0:
pidl = pid.split(',')
query['pandaid__in'] = pidl
else:
query['pandaid'] = int(pid)
except:
query['jobname'] = request.session['requestParams']['pandaid']
elif param == 'jobstatus' and request.session['requestParams'][param] == 'finished' and (('mode' in
request.session[
'requestParams'] and
request.session[
'requestParams'][
'mode'] == 'eventservice') or (
'jobtype' in request.session['requestParams'] and request.session['requestParams'][
'jobtype'] == 'eventservice')):
query['jobstatus__in'] = ('finished', 'cancelled')
elif param == 'jobstatus':
val = escapeInput(request.session['requestParams'][param])
values = val.split('|')
query['jobstatus__in'] = values
elif param == 'eventservice':
if request.session['requestParams'][param] == 'esmerge' or request.session['requestParams'][
param] == '2':
query['eventservice'] = 2
elif request.session['requestParams'][param] == 'eventservice' or \
request.session['requestParams'][param] == '1':
query['eventservice'] = 1
elif request.session['requestParams'][param] == 'not2':
try:
extraQueryString += ' AND (eventservice != 2) '
except NameError:
extraQueryString = '(eventservice != 2)'
else:
query['eventservice__isnull'] = True
else:
if (param not in wildSearchFields):
query[param] = request.session['requestParams'][param]
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
else:
jobtype = opmode
if jobtype.startswith('anal'):
query['prodsourcelabel__in'] = ['panda', 'user', 'prod_test', 'rc_test']
elif jobtype.startswith('prod'):
query['prodsourcelabel__in'] = ['managed', 'prod_test', 'rc_test']
elif jobtype == 'groupproduction':
query['prodsourcelabel'] = 'managed'
query['workinggroup__isnull'] = False
elif jobtype == 'eventservice':
query['eventservice'] = 1
elif jobtype == 'esmerge':
query['eventservice'] = 2
elif jobtype.find('test') >= 0:
query['prodsourcelabel__icontains'] = jobtype
if (wildCardExt == False):
return query
try:
extraQueryString += ' AND '
except NameError:
extraQueryString = ''
wildSearchFields = (set(wildSearchFields) & set(request.session['requestParams'].keys()))
wildSearchFields1 = set()
for currenfField in wildSearchFields:
if not (currenfField.lower() == 'transformation'):
if not ((currenfField.lower() == 'cloud') & (
any(card.lower() == 'all' for card in request.session['requestParams'][currenfField].split('|')))):
wildSearchFields1.add(currenfField)
wildSearchFields = wildSearchFields1
lenWildSearchFields = len(wildSearchFields)
currentField = 1
for currenfField in wildSearchFields:
extraQueryString += '('
wildCards = request.session['requestParams'][currenfField].split('|')
countCards = len(wildCards)
currentCardCount = 1
if not ((currenfField.lower() == 'cloud') & (any(card.lower() == 'all' for card in wildCards))):
for card in wildCards:
extraQueryString += preprocessWildCardString(card, currenfField)
if (currentCardCount < countCards): extraQueryString += ' OR '
currentCardCount += 1
extraQueryString += ')'
if (currentField < lenWildSearchFields): extraQueryString += ' AND '
currentField += 1
if ('jobparam' in request.session['requestParams'].keys()):
jobParWildCards = request.session['requestParams']['jobparam'].split('|')
jobParCountCards = len(jobParWildCards)
jobParCurrentCardCount = 1
extraJobParCondition = '('
for card in jobParWildCards:
extraJobParCondition += preprocessWildCardString(escapeInput(card), 'JOBPARAMETERS')
if (jobParCurrentCardCount < jobParCountCards): extraJobParCondition += ' OR '
jobParCurrentCardCount += 1
extraJobParCondition += ')'
pandaIDs = []
jobParamQuery = {'modificationtime__range': [startdate.strftime(defaultDatetimeFormat),
enddate.strftime(defaultDatetimeFormat)]}
jobs = Jobparamstable.objects.filter(**jobParamQuery).extra(where=[extraJobParCondition]).values('pandaid')
for values in jobs:
pandaIDs.append(values['pandaid'])
query['pandaid__in'] = pandaIDs
if extraQueryString.endswith(' AND '):
extraQueryString = extraQueryString[:-5]
if (len(extraQueryString) < 2):
extraQueryString = '1=1'
return (query, extraQueryString, LAST_N_HOURS_MAX)
def dropRetrielsJobs(jobs, jeditaskid, isReturnDroppedPMerge):
# dropping algorithm for jobs belong to single task
# !!! Necessary job's attributes:
# PANDAID
# JOBSTATUS
# PROCESSINGTYPE
# JOBSETID
droplist = []
droppedIDs = set()
droppedPmerge = set()
retryquery = {}
retryquery['jeditaskid'] = jeditaskid
retries = JediJobRetryHistory.objects.filter(**retryquery).extra(
where=["OLDPANDAID!=NEWPANDAID AND RELATIONTYPE IN ('', 'retry', 'pmerge', 'merge', "
"'jobset_retry', 'es_merge', 'originpandaid')"]).order_by('newpandaid').values()
print 'got retriels %d %d' % (len(retries),len(jobs))
print 'doing the drop'
hashRetries = {}
for retry in retries:
hashRetries[retry['oldpandaid']] = retry
newjobs = []
for job in jobs:
dropJob = 0
pandaid = job['pandaid']
if not isEventService(job):
if hashRetries.has_key(pandaid):
retry = hashRetries[pandaid]
if retry['relationtype'] == '' or retry['relationtype'] == 'retry' or (
job['processingtype'] == 'pmerge' and (job['jobstatus'] == 'failed' or job['jobstatus'] == 'cancelled') and retry[
'relationtype'] == 'merge'):
dropJob = retry['newpandaid']
else:
if (job['jobsetid'] in hashRetries) and (
hashRetries[job['jobsetid']]['relationtype'] == 'jobset_retry'):
dropJob = 1
else:
if (job['pandaid'] in hashRetries and job['jobstatus'] not in ('finished', 'merging')):
if (hashRetries[job['pandaid']]['relationtype'] == ('retry')):
dropJob = 1
# if (hashRetries[job['pandaid']]['relationtype'] == 'es_merge' and (
# job['jobsubstatus'] == 'es_merge')):
# dropJob = 1
if (dropJob == 0):
if (job['jobsetid'] in hashRetries) and (
hashRetries[job['jobsetid']]['relationtype'] in ('jobset_retry')):
dropJob = 1
if (job['jobstatus'] == 'closed' and (job['jobsubstatus'] in ('es_unused', 'es_inaction'))):
dropJob = 1
# if 'jobstatus' in request.session['requestParams'] and request.session['requestParams'][
# 'jobstatus'] == 'cancelled' and job['jobstatus'] != 'cancelled':
# dropJob = 1
if dropJob == 0 and not isReturnDroppedPMerge:
# and not (
# 'processingtype' in request.session['requestParams'] and request.session['requestParams'][
# 'processingtype'] == 'pmerge')
if not (job['processingtype'] == 'pmerge'):
newjobs.append(job)
else:
droppedPmerge.add(pandaid)
elif (dropJob == 0):
newjobs.append(job)
else:
if not pandaid in droppedIDs:
droppedIDs.add(pandaid)
droplist.append({'pandaid': pandaid, 'newpandaid': dropJob})
print '%d jobs dropped' % (len(jobs) - len(newjobs))
droplist = sorted(droplist, key=lambda x: -x['pandaid'])
jobs = newjobs
return jobs, droplist, droppedPmerge
def cleanJobListLite(request, jobl, mode='nodrop', doAddMeta=True):
for job in jobl:
job['duration'] = ""
job['durationsec'] = 0
# if job['jobstatus'] in ['finished','failed','holding']:
if 'endtime' in job and 'starttime' in job and job['starttime']:
starttime = job['starttime']
if job['endtime']:
endtime = job['endtime']
else:
endtime = timezone.now()
duration = max(endtime - starttime, timedelta(seconds=0))
ndays = duration.days
strduration = str(timedelta(seconds=duration.seconds))
job['duration'] = "%s:%s" % (ndays, strduration)
job['durationsec'] = ndays * 24 * 3600 + duration.seconds
job['waittime'] = ""
# if job['jobstatus'] in ['running','finished','failed','holding','cancelled','transferring']:
if 'creationtime' in job and 'starttime' in job and job['creationtime']:
creationtime = job['creationtime']
if job['starttime']:
starttime = job['starttime']
else:
starttime = timezone.now()
wait = starttime - creationtime
ndays = wait.days
strwait = str(timedelta(seconds=wait.seconds))
job['waittime'] = "%s:%s" % (ndays, strwait)
if 'currentpriority' in job:
plo = int(job['currentpriority']) - int(job['currentpriority']) % 100
phi = plo + 99
job['priorityrange'] = "%d:%d" % (plo, phi)
if 'jobsetid' in job and job['jobsetid']:
plo = int(job['jobsetid']) - int(job['jobsetid']) % 100
phi = plo + 99
job['jobsetrange'] = "%d:%d" % (plo, phi)
return jobl
def cleanJobList(request, jobl, mode='nodrop', doAddMeta=True):
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'drop': mode = 'drop'
doAddMetaStill = False
if 'fields' in request.session['requestParams']:
fieldsStr = request.session['requestParams']['fields']
fields = fieldsStr.split("|")
if 'metastruct' in fields:
doAddMetaStill = True
if doAddMeta:
jobs = addJobMetadata(jobl, doAddMetaStill)
else:
jobs = jobl
for job in jobs:
if isEventService(job):
#if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 111:
# job['taskbuffererrordiag'] = 'Rerun scheduled to pick up unprocessed events'
# job['piloterrorcode'] = 0
# job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# job['jobstatus'] = 'finished'
#if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 112:
# job['taskbuffererrordiag'] = 'All events processed, merge job created'
# job['piloterrorcode'] = 0
# job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# job['jobstatus'] = 'finished'
#if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 114:
# job['taskbuffererrordiag'] = 'No rerun to pick up unprocessed, at max attempts'
# job['piloterrorcode'] = 0
# job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# job['jobstatus'] = 'finished'
# if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 115:
# job['taskbuffererrordiag'] = 'No events remaining, other jobs still processing'
# job['piloterrorcode'] = 0
# job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# #job['jobstatus'] = 'finished'
#if 'taskbuffererrorcode' in job and job['taskbuffererrorcode'] == 116:
# job['taskbuffererrordiag'] = 'No remaining event ranges to allocate'
# job['piloterrorcode'] = 0
# job['piloterrordiag'] = 'Job terminated by signal from PanDA server'
# job['jobstatus'] = 'finished'
if 'jobmetrics' in job:
pat = re.compile('.*mode\=([^\s]+).*HPCStatus\=([A-Za-z0-9]+)')
mat = pat.match(job['jobmetrics'])
if mat:
job['jobmode'] = mat.group(1)
job['substate'] = mat.group(2)
pat = re.compile('.*coreCount\=([0-9]+)')
mat = pat.match(job['jobmetrics'])
if mat:
job['corecount'] = mat.group(1)
if 'jobsubstatus' in job and job['jobstatus'] == 'closed' and job['jobsubstatus'] == 'toreassign':
job['jobstatus'] += ':' + job['jobsubstatus']
if 'eventservice' in job:
if job['eventservice'] == 1:
job['eventservice'] = 'eventservice'
elif job['eventservice'] == 2:
job['eventservice'] = 'esmerge'
else:
job['eventservice'] = 'ordinary'
if 'destinationdblock' in job:
ddbfields = job['destinationdblock'].split('.')
if len(ddbfields) == 6:
job['outputfiletype'] = ddbfields[4]
elif len(ddbfields) >= 7:
job['outputfiletype'] = ddbfields[6]
else:
job['outputfiletype'] = '?'
# print job['destinationdblock'], job['outputfiletype']
try:
job['homecloud'] = homeCloud[job['computingsite']]
except:
job['homecloud'] = None
if 'produsername' in job and not job['produsername']:
if ('produserid' in job) and job['produserid']:
job['produsername'] = job['produserid']
else:
job['produsername'] = 'Unknown'
if job['transformation']: job['transformation'] = job['transformation'].split('/')[-1]
if (job['jobstatus'] == 'failed' or job['jobstatus'] == 'cancelled') and 'brokerageerrorcode' in job:
job['errorinfo'] = errorInfo(job, nchars=70)
else:
job['errorinfo'] = ''
job['jobinfo'] = ''
if isEventService(job):
if 'taskbuffererrordiag' in job and len(job['taskbuffererrordiag']) > 0:
job['jobinfo'] = job['taskbuffererrordiag']
elif 'specialhandling' in job and job['specialhandling'] == 'esmerge':
job['jobinfo'] = 'Event service merge job'
else:
job['jobinfo'] = 'Event service job'
job['duration'] = ""
job['durationsec'] = 0
# if job['jobstatus'] in ['finished','failed','holding']:
if 'endtime' in job and 'starttime' in job and job['starttime']:
starttime = job['starttime']
if job['endtime']:
endtime = job['endtime']
else:
endtime = timezone.now()
duration = max(endtime - starttime, timedelta(seconds=0))
ndays = duration.days
strduration = str(timedelta(seconds=duration.seconds))
job['duration'] = "%s:%s" % (ndays, strduration)
job['durationsec'] = ndays * 24 * 3600 + duration.seconds
job['waittime'] = ""
# if job['jobstatus'] in ['running','finished','failed','holding','cancelled','transferring']:
if 'creationtime' in job and 'starttime' in job and job['creationtime']:
creationtime = job['creationtime']
if job['starttime']:
starttime = job['starttime']
else:
starttime = timezone.now()
wait = starttime - creationtime
ndays = wait.days
strwait = str(timedelta(seconds=wait.seconds))
job['waittime'] = "%s:%s" % (ndays, strwait)
if 'currentpriority' in job:
plo = int(job['currentpriority']) - int(job['currentpriority']) % 100
phi = plo + 99
job['priorityrange'] = "%d:%d" % (plo, phi)
if 'jobsetid' in job and job['jobsetid']:
plo = int(job['jobsetid']) - int(job['jobsetid']) % 100
phi = plo + 99
job['jobsetrange'] = "%d:%d" % (plo, phi)
if 'corecount' in job and job['corecount'] is None:
job['corecount'] = 1
## drop duplicate jobs
droplist = []
job1 = {}
newjobs = []
for job in jobs:
pandaid = job['pandaid']
dropJob = 0
if pandaid in job1:
## This is a duplicate. Drop it.
dropJob = 1
else:
job1[pandaid] = 1
if (dropJob == 0):
newjobs.append(job)
jobs = newjobs
if mode == 'nodrop':
print 'job list cleaned'
return jobs
global PLOW, PHIGH
request.session['TFIRST'] = timezone.now() # .strftime(defaultDatetimeFormat)
request.session['TLAST'] = (timezone.now() - timedelta(hours=2400)) # .strftime(defaultDatetimeFormat)
PLOW = 1000000
PHIGH = -1000000
for job in jobs:
if job['modificationtime'] > request.session['TLAST']: request.session['TLAST'] = job['modificationtime']
if job['modificationtime'] < request.session['TFIRST']: request.session['TFIRST'] = job['modificationtime']
if job['currentpriority'] > PHIGH: PHIGH = job['currentpriority']
if job['currentpriority'] < PLOW: PLOW = job['currentpriority']
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
print 'job list cleaned'
return jobs
def cleanTaskList(request, tasks):
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
for task in tasks:
if task['transpath']: task['transpath'] = task['transpath'].split('/')[-1]
if task['statechangetime'] == None: task['statechangetime'] = task['modificationtime']
## Get status of input processing as indicator of task progress
dsquery = {}
dsquery['type__in'] = ['input', 'pseudo_input']
dsquery['masterid__isnull'] = True
taskl = []
for t in tasks:
taskl.append(t['jeditaskid'])
# dsquery['jeditaskid__in'] = taskl
random.seed()
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
for id in taskl:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (
tmpTableName, id, transactionKey)) # Backend dependable
# connection.commit()
dsets = JediDatasets.objects.filter(**dsquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values(
'jeditaskid', 'nfiles', 'nfilesfinished', 'nfilesfailed')
dsinfo = {}
if len(dsets) > 0:
for ds in dsets:
taskid = ds['jeditaskid']
if taskid not in dsinfo:
dsinfo[taskid] = []
dsinfo[taskid].append(ds)
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
# connection.commit()
# connection.leave_transaction_management()
for task in tasks:
if 'totevrem' not in task:
task['totevrem'] = None
if 'eventservice' in task:
if task['eventservice'] == 1:
task['eventservice'] = 'eventservice'
else:
task['eventservice'] = 'ordinary'
if 'errordialog' in task:
if len(task['errordialog']) > 100: task['errordialog'] = task['errordialog'][:90] + '...'
if 'reqid' in task and task['reqid'] < 100000 and task['reqid'] > 100 and task['reqid'] != 300 and (
('tasktype' in task) and (not task['tasktype'].startswith('anal'))):
task['deftreqid'] = task['reqid']
if 'corecount' in task and task['corecount'] is None:
task['corecount'] = 1
# if task['status'] == 'running' and task['jeditaskid'] in dsinfo:
dstotals = {}
dstotals['nfiles'] = 0
dstotals['nfilesfinished'] = 0
dstotals['nfilesfailed'] = 0
dstotals['pctfinished'] = 0
dstotals['pctfailed'] = 0
if (task['jeditaskid'] in dsinfo):
nfiles = 0
nfinished = 0
nfailed = 0
for ds in dsinfo[task['jeditaskid']]:
if int(ds['nfiles']) > 0:
nfiles += int(ds['nfiles'])
nfinished += int(ds['nfilesfinished'])
nfailed += int(ds['nfilesfailed'])
if nfiles > 0:
dstotals = {}
dstotals['nfiles'] = nfiles
dstotals['nfilesfinished'] = nfinished
dstotals['nfilesfailed'] = nfailed
dstotals['pctfinished'] = int(100. * nfinished / nfiles)
dstotals['pctfailed'] = int(100. * nfailed / nfiles)
task['dsinfo'] = dstotals
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'time-ascending':
tasks = sorted(tasks, key=lambda x: x['modificationtime'])
if sortby == 'time-descending':
tasks = sorted(tasks, key=lambda x: x['modificationtime'], reverse=True)
if sortby == 'statetime-descending':
tasks = sorted(tasks, key=lambda x: x['statechangetime'], reverse=True)
elif sortby == 'priority':
tasks = sorted(tasks, key=lambda x: x['taskpriority'], reverse=True)
elif sortby == 'nfiles':
tasks = sorted(tasks, key=lambda x: x['dsinfo']['nfiles'], reverse=True)
elif sortby == 'pctfinished':
tasks = sorted(tasks, key=lambda x: x['dsinfo']['pctfinished'], reverse=True)
elif sortby == 'pctfailed':
tasks = sorted(tasks, key=lambda x: x['dsinfo']['pctfailed'], reverse=True)
elif sortby == 'taskname':
tasks = sorted(tasks, key=lambda x: x['taskname'])
elif sortby == 'jeditaskid' or sortby == 'taskid':
tasks = sorted(tasks, key=lambda x: -x['jeditaskid'])
elif sortby == 'cloud':
tasks = sorted(tasks, key=lambda x: x['cloud'], reverse=True)
else:
sortby = "jeditaskid"
tasks = sorted(tasks, key=lambda x: -x['jeditaskid'])
return tasks
def jobSummaryDict(request, jobs, fieldlist=None):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
if fieldlist:
flist = fieldlist
else:
flist = standard_fields
for job in jobs:
for f in flist:
if f in job and job[f]:
if f == 'taskid' and int(job[f]) < 1000000 and 'produsername' not in request.session[
'requestParams']: continue
if f == 'nucleus' and job[f] is None: continue
if f == 'specialhandling':
if not 'specialhandling' in sumd: sumd['specialhandling'] = {}
shl = job['specialhandling'].split()
for v in shl:
if not v in sumd['specialhandling']: sumd['specialhandling'][v] = 0
sumd['specialhandling'][v] += 1
else:
if not f in sumd: sumd[f] = {}
if not job[f] in sumd[f]: sumd[f][job[f]] = 0
sumd[f][job[f]] += 1
for extra in ('jobmode', 'substate', 'outputfiletype'):
if extra in job:
if not extra in sumd: sumd[extra] = {}
if not job[extra] in sumd[extra]: sumd[extra][job[extra]] = 0
sumd[extra][job[extra]] += 1
## event service
esjobdict = {}
esjobs = []
for job in jobs:
if isEventService(job):
esjobs.append(job['pandaid'])
# esjobdict[job['pandaid']] = {}
# for s in eventservicestatelist:
# esjobdict[job['pandaid']][s] = 0
if len(esjobs) > 0:
sumd['eventservicestatus'] = {}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
# connection.commit()
new_cur.execute("SELECT STATUS, COUNT(STATUS) AS COUNTSTAT FROM (SELECT /*+ dynamic_sampling(TMP_IDS1 0) cardinality(TMP_IDS1 10) INDEX_RS_ASC(ev JEDI_EVENTS_PANDAID_STATUS_IDX) NO_INDEX_FFS(ev JEDI_EVENTS_PK) NO_INDEX_SS(ev JEDI_EVENTS_PK) */ PANDAID, STATUS FROM ATLAS_PANDA.JEDI_EVENTS ev, %s WHERE TRANSACTIONKEY = %i AND PANDAID = ID) t1 GROUP BY STATUS" % (tmpTableName, transactionKey))
evtable = dictfetchall(new_cur)
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
# connection.commit()
# connection.leave_transaction_management()
for ev in evtable:
evstat = eventservicestatelist[ev['STATUS']]
sumd['eventservicestatus'][evstat] = ev['COUNTSTAT']
# for ev in evtable:
# evstat = eventservicestatelist[ev['STATUS']]
# if evstat not in sumd['eventservicestatus']:
# sumd['eventservicestatus'][evstat] = 0
# sumd['eventservicestatus'][evstat] += 1
# #esjobdict[ev['PANDAID']][evstat] += 1
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
if f == 'minramcount':
newvalues = {}
for ky in kys:
roundedval = int(ky / 1000)
if roundedval in newvalues:
newvalues[roundedval] += sumd[f][ky]
else:
newvalues[roundedval] = sumd[f][ky]
for ky in newvalues:
iteml.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})
iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())
else:
if f in ('priorityrange', 'jobsetrange'):
skys = []
for k in kys:
skys.append({'key': k, 'val': int(k[:k.index(':')])})
skys = sorted(skys, key=lambda x: x['val'])
kys = []
for sk in skys:
kys.append(sk['key'])
elif f in ('attemptnr', 'jeditaskid', 'taskid',):
kys = sorted(kys, key=lambda x: int(x))
else:
kys.sort()
for ky in kys:
iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
iteml = sorted(iteml, key=lambda x: x['kvalue'], reverse=True)
elif f not in ('priorityrange', 'jobsetrange', 'attemptnr', 'jeditaskid', 'taskid',):
iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x: x['field'])
return suml, esjobdict
def siteSummaryDict(sites):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
sumd['category'] = {}
sumd['category']['test'] = 0
sumd['category']['production'] = 0
sumd['category']['analysis'] = 0
sumd['category']['multicloud'] = 0
for site in sites:
for f in standard_sitefields:
if f in site:
if not f in sumd: sumd[f] = {}
if not site[f] in sumd[f]: sumd[f][site[f]] = 0
sumd[f][site[f]] += 1
isProd = True
if site['siteid'].find('ANALY') >= 0:
isProd = False
sumd['category']['analysis'] += 1
if site['siteid'].lower().find('test') >= 0:
isProd = False
sumd['category']['test'] += 1
if (site['multicloud'] is not None) and (site['multicloud'] != 'None') and (
re.match('[A-Z]+', site['multicloud'])):
sumd['category']['multicloud'] += 1
if isProd: sumd['category']['production'] += 1
if VOMODE != 'atlas': del sumd['cloud']
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
for ky in kys:
iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x: x['field'])
return suml
def userSummaryDict(jobs):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
for job in jobs:
if 'produsername' in job and job['produsername'] != None:
user = job['produsername'].lower()
else:
user = 'Unknown'
if not user in sumd:
sumd[user] = {}
for state in statelist:
sumd[user][state] = 0
sumd[user]['name'] = job['produsername']
sumd[user]['cputime'] = 0
sumd[user]['njobs'] = 0
for state in statelist:
sumd[user]['n' + state] = 0
sumd[user]['nsites'] = 0
sumd[user]['sites'] = {}
sumd[user]['nclouds'] = 0
sumd[user]['clouds'] = {}
sumd[user]['nqueued'] = 0
sumd[user]['latest'] = timezone.now() - timedelta(hours=2400)
sumd[user]['pandaid'] = 0
cloud = job['cloud']
site = job['computingsite']
cpu = float(job['cpuconsumptiontime']) / 1.
state = job['jobstatus']
if job['modificationtime'] > sumd[user]['latest']: sumd[user]['latest'] = job['modificationtime']
if job['pandaid'] > sumd[user]['pandaid']: sumd[user]['pandaid'] = job['pandaid']
sumd[user]['cputime'] += cpu
sumd[user]['njobs'] += 1
if 'n%s' % (state) not in sumd[user]:
sumd[user]['n' + state] = 0
sumd[user]['n' + state] += 1
if not site in sumd[user]['sites']: sumd[user]['sites'][site] = 0
sumd[user]['sites'][site] += 1
if not site in sumd[user]['clouds']: sumd[user]['clouds'][cloud] = 0
sumd[user]['clouds'][cloud] += 1
for user in sumd:
sumd[user]['nsites'] = len(sumd[user]['sites'])
sumd[user]['nclouds'] = len(sumd[user]['clouds'])
sumd[user]['nqueued'] = sumd[user]['ndefined'] + sumd[user]['nwaiting'] + sumd[user]['nassigned'] + sumd[user][
'nactivated']
sumd[user]['cputime'] = "%d" % float(sumd[user]['cputime'])
## convert to list ordered by username
ukeys = sumd.keys()
ukeys.sort()
suml = []
for u in ukeys:
uitem = {}
uitem['name'] = u
uitem['latest'] = sumd[u]['pandaid']
uitem['dict'] = sumd[u]
suml.append(uitem)
suml = sorted(suml, key=lambda x: -x['latest'])
return suml
def taskSummaryDict(request, tasks, fieldlist=None):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
sumd = {}
if fieldlist:
flist = fieldlist
else:
flist = standard_taskfields
for task in tasks:
for f in flist:
if 'tasktype' in request.session['requestParams'] and request.session['requestParams'][
'tasktype'].startswith('analy'):
## Remove the noisy useless parameters in analysis listings
if flist in ('reqid', 'stream', 'tag'): continue
if len(task['taskname'].split('.')) == 5:
if f == 'project':
try:
if not f in sumd: sumd[f] = {}
project = task['taskname'].split('.')[0]
if not project in sumd[f]: sumd[f][project] = 0
sumd[f][project] += 1
except:
pass
if f == 'stream':
try:
if not f in sumd: sumd[f] = {}
stream = task['taskname'].split('.')[2]
if not re.match('[0-9]+', stream):
if not stream in sumd[f]: sumd[f][stream] = 0
sumd[f][stream] += 1
except:
pass
if f == 'tag':
try:
if not f in sumd: sumd[f] = {}
tags = task['taskname'].split('.')[4]
if not tags.startswith('job_'):
tagl = tags.split('_')
tag = tagl[-1]
if not tag in sumd[f]: sumd[f][tag] = 0
sumd[f][tag] += 1
# for tag in tagl:
# if not tag in sumd[f]: sumd[f][tag] = 0
# sumd[f][tag] += 1
except:
pass
if f in task and task[f]:
val = task[f]
if val == 'anal': val = 'analy'
if not f in sumd: sumd[f] = {}
if not val in sumd[f]: sumd[f][val] = 0
sumd[f][val] += 1
## convert to ordered lists
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
if f != 'ramcount':
for ky in kys:
iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})
iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())
else:
newvalues = {}
for ky in kys:
roundedval = int(ky / 1000)
if roundedval in newvalues:
newvalues[roundedval] += sumd[f][ky]
else:
newvalues[roundedval] = sumd[f][ky]
for ky in newvalues:
iteml.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})
iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x: x['field'])
return suml
def wgTaskSummary(request, fieldname='workinggroup', view='production', taskdays=3):
""" Return a dictionary summarizing the field values for the chosen most interesting fields """
query = {}
hours = 24 * taskdays
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query['modificationtime__range'] = [startdate, enddate]
if fieldname == 'workinggroup': query['workinggroup__isnull'] = False
if view == 'production':
query['tasktype'] = 'prod'
elif view == 'analysis':
query['tasktype'] = 'anal'
if 'processingtype' in request.session['requestParams']:
query['processingtype'] = request.session['requestParams']['processingtype']
if 'workinggroup' in request.session['requestParams']:
query['workinggroup'] = request.session['requestParams']['workinggroup']
if 'project' in request.session['requestParams']:
query['taskname__istartswith'] = request.session['requestParams']['project']
summary = JediTasks.objects.filter(**query).values(fieldname, 'status').annotate(Count('status')).order_by(
fieldname, 'status')
totstates = {}
tottasks = 0
wgsum = {}
for state in taskstatelist:
totstates[state] = 0
for rec in summary:
wg = rec[fieldname]
status = rec['status']
count = rec['status__count']
if status not in taskstatelist: continue
tottasks += count
totstates[status] += count
if wg not in wgsum:
wgsum[wg] = {}
wgsum[wg]['name'] = wg
wgsum[wg]['count'] = 0
wgsum[wg]['states'] = {}
wgsum[wg]['statelist'] = []
for state in taskstatelist:
wgsum[wg]['states'][state] = {}
wgsum[wg]['states'][state]['name'] = state
wgsum[wg]['states'][state]['count'] = 0
wgsum[wg]['count'] += count
wgsum[wg]['states'][status]['count'] += count
## convert to ordered lists
suml = []
for f in wgsum:
itemd = {}
itemd['field'] = f
itemd['count'] = wgsum[f]['count']
kys = taskstatelist
iteml = []
for ky in kys:
iteml.append({'kname': ky, 'kvalue': wgsum[f]['states'][ky]['count']})
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x: x['field'])
return suml
def extensibleURL(request, xurl=''):
""" Return a URL that is ready for p=v query extension(s) to be appended """
if xurl == '': xurl = request.get_full_path()
if xurl.endswith('/'):
if 'tag' or '/job/' or '/task/' in xurl:
xurl = xurl[0:len(xurl)]
else:
xurl = xurl[0:len(xurl) - 1]
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
# if 'jobtype' in requestParams:
# xurl += "jobtype=%s&" % requestParams['jobtype']
return xurl
def mainPage(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request)
debuginfo = None
if request.session['debug']:
debuginfo = "<h2>Debug info</h2>"
from django.conf import settings
for name in dir(settings):
debuginfo += "%s = %s<br>" % (name, getattr(settings, name))
debuginfo += "<br>******* Environment<br>"
for env in os.environ:
debuginfo += "%s = %s<br>" % (env, os.environ[env])
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'debuginfo': debuginfo,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('core-mainPage.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
elif (('HTTP_ACCEPT' in request.META) and request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json')) or (
'json' in request.session['requestParams']):
return HttpResponse('json', content_type='text/html')
else:
return HttpResponse('not understood', content_type='text/html')
def helpPage(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request)
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('completeHelp.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
elif request.META.get('CONTENT_TYPE', 'text/plain') == 'application/json':
return HttpResponse('json', content_type='text/html')
else:
return HttpResponse('not understood', content_type='text/html')
def errorInfo(job, nchars=300, mode='html'):
errtxt = ''
err1 = ''
desc, codesDescribed = getErrorDescription(job, provideProcessedCodes=True)
if int(job['brokerageerrorcode']) != 0 and int(job['brokerageerrorcode']) not in codesDescribed:
errtxt += 'Brokerage error %s: %s <br>' % (job['brokerageerrorcode'], job['brokerageerrordiag'])
if err1 == '': err1 = "Broker: %s" % job['brokerageerrordiag']
if int(job['ddmerrorcode']) != 0 and int(job['ddmerrorcode']) not in codesDescribed:
errtxt += 'DDM error %s: %s <br>' % (job['ddmerrorcode'], job['ddmerrordiag'])
if err1 == '': err1 = "DDM: %s" % job['ddmerrordiag']
if int(job['exeerrorcode']) != 0 and int(job['exeerrorcode']) not in codesDescribed:
errtxt += 'Executable error %s: %s <br>' % (job['exeerrorcode'], job['exeerrordiag'])
if err1 == '': err1 = "Exe: %s" % job['exeerrordiag']
if int(job['jobdispatchererrorcode']) != 0 and int(job['jobdispatchererrorcode']) not in codesDescribed:
errtxt += 'Dispatcher error %s: %s <br>' % (job['jobdispatchererrorcode'], job['jobdispatchererrordiag'])
if err1 == '': err1 = "Dispatcher: %s" % job['jobdispatchererrordiag']
if int(job['piloterrorcode']) != 0 and int(job['piloterrorcode']) not in codesDescribed:
errtxt += 'Pilot error %s: %s <br>' % (job['piloterrorcode'], job['piloterrordiag'])
if err1 == '': err1 = "Pilot: %s" % job['piloterrordiag']
if int(job['superrorcode']) != 0 and int(job['superrorcode']) not in codesDescribed:
errtxt += 'Sup error %s: %s <br>' % (job['superrorcode'], job['superrordiag'])
if err1 == '': err1 = job['superrordiag']
if int(job['taskbuffererrorcode']) != 0 and int(job['taskbuffererrorcode']) not in codesDescribed:
errtxt += 'Task buffer error %s: %s <br>' % (job['taskbuffererrorcode'], job['taskbuffererrordiag'])
if err1 == '': err1 = 'Taskbuffer: %s' % job['taskbuffererrordiag']
if job['transexitcode'] != '' and job['transexitcode'] is not None and int(job['transexitcode']) > 0 and int(job['transexitcode']) not in codesDescribed:
errtxt += 'Trf exit code %s.' % job['transexitcode']
if err1 == '': err1 = 'Trf exit code %s' % job['transexitcode']
if len(desc) > 0:
errtxt += '%s<br>' % desc
if err1 == '': err1 = getErrorDescription(job, mode='string')
if len(errtxt) > nchars:
ret = errtxt[:nchars] + '...'
else:
ret = errtxt[:nchars]
if err1.find('lost heartbeat') >= 0: err1 = 'lost heartbeat'
if err1.lower().find('unknown transexitcode') >= 0: err1 = 'unknown transexit'
if err1.find(' at ') >= 0: err1 = err1[:err1.find(' at ') - 1]
if errtxt.find('lost heartbeat') >= 0: err1 = 'lost heartbeat'
err1 = err1.replace('\n', ' ')
if mode == 'html':
return errtxt
else:
return err1[:nchars]
def jobParamList(request):
idlist = []
if 'pandaid' in request.session['requestParams']:
idstring = request.session['requestParams']['pandaid']
idstringl = idstring.split(',')
for id in idstringl:
idlist.append(int(id))
query = {}
query['pandaid__in'] = idlist
jobparams = Jobparamstable.objects.filter(**query).values()
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
return HttpResponse(json.dumps(jobparams, cls=DateEncoder), content_type='text/html')
else:
return HttpResponse('not supported', content_type='text/html')
def jobSummaryDictProto(request, cutsummary, requestToken):
esjobdict = []
sqlRequest = "SELECT ATTR, ATTR_VALUE, NUM_OCCUR FROM ATLAS_PANDABIGMON.JOBSPAGE_CUMULATIVE_RESULT WHERE " \
"REQUEST_TOKEN=%s AND ATTR_VALUE <> 'END' ORDER BY ATTR, ATTR_VALUE;)" % str(requestToken)
cur = connection.cursor()
cur.execute(sqlRequest)
rawsummary = cur.fetchall()
cur.close()
errsByCount = []
summaryhash = {}
for row in rawsummary:
if row[0] in summaryhash:
if row[1] in summaryhash[row[0]]:
summaryhash[row[0]][row[1]] += row[2]
else:
summaryhash[row[0]][row[1]] = row[2]
else:
item = {}
item[row[1]] = row[2]
summaryhash[row[0]] = item
# second checkpoint
shkeys = summaryhash.keys()
sumd = []
jobsToList = set()
njobs = 0
for shkey in shkeys:
if shkey != 'PANDAID' and shkey != 'ErrorCode' and shkey != 'MINRAMCOUNT':
# check this condition
entry = {}
entry['field'] = shkey
entrlist = []
if (cutsummary):
cutlen = 5
else:
cutlen = len(summaryhash[shkey].keys())
for subshkey in summaryhash[shkey].keys()[0:cutlen]:
subentry = {}
subentry['kname'] = subshkey
subentry['kvalue'] = summaryhash[shkey][subshkey]
if (shkey == 'COMPUTINGSITE'):
njobs += summaryhash[shkey][subshkey]
entrlist.append(subentry)
entry['list'] = entrlist
sumd.append(entry)
elif shkey == 'PANDAID':
for subshkey in summaryhash[shkey]:
jobsToList.add(subshkey)
elif shkey == 'MINRAMCOUNT':
entry = {}
entry['field'] = shkey
entrlist = []
newvalues = {}
for subshkey in summaryhash[shkey].keys():
roundedval = int(subshkey / 1000)
if roundedval in newvalues:
newvalues[roundedval] += summaryhash[shkey][subshkey]
else:
newvalues[roundedval] = summaryhash[shkey][subshkey]
for ky in newvalues:
entrlist.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})
entrlist = sorted(entrlist, key=lambda x: str(x['kname']).lower())
entry['list'] = entrlist
sumd.append(entry)
elif shkey == 'ErrorCode':
for subshkey in summaryhash[shkey]:
errval = {}
errval['codename'] = subshkey.split(':')[0]
errval['codeval'] = subshkey.split(':')[1]
errval['count'] = summaryhash[shkey][subshkey]
errval['error'] = subshkey
error = [it['error'] for it in errorcodelist if it['name'] == errval['codename'].lower()]
if len(error) > 0 and error[0] in errorCodes and int(errval['codeval']) in errorCodes[error[0]]:
errval['diag'] = errorCodes[error[0]][int(errval['codeval'])]
errsByCount.append(errval)
return sumd, esjobdict, jobsToList, njobs, errsByCount
def postpone(function):
def decorator(*args, **kwargs):
t = Thread(target=function, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return decorator
@postpone
def startDataRetrieve(request, dropmode, query, requestToken, wildCardExtension):
plsql = "BEGIN ATLAS_PANDABIGMON.QUERY_JOBSPAGE_CUMULATIVE("
plsql += " REQUEST_TOKEN=>"+str(requestToken)+", "
requestFields = {}
a = datetime.strptime(query['modificationtime__range'][0], defaultDatetimeFormat)
b = datetime.strptime(query['modificationtime__range'][1], defaultDatetimeFormat)
delta = b - a
range = delta.days+delta.seconds/86400.0
#if (range == 180.0):
# plsql += " RANGE_DAYS=>null, "
#else:
# plsql += " RANGE_DAYS=>"+str(range)+", "
for item in request.GET:
requestFields[item.lower()] = request.GET[item]
if (('jeditaskid' in requestFields) and range == 180.0): #This is a temporary patch to avoid absence of pandaids
plsql += " RANGE_DAYS=>null, "
else:
plsql += " RANGE_DAYS=>" + str(range) + ", "
if not dropmode:
plsql += " WITH_RETRIALS=>'Y', "
else:
plsql += " WITH_RETRIALS=>'N', "
if ('noenddate' in request.session and request.session['noenddate'] == False):
plsql += " END_DATE=>'"+str(b.date().strftime('%d-%m-%Y'))+"', "
if ('pandaid' in requestFields):
plsql += " PANDAID=>("
pandaIdRequest = requestFields['pandaid'].split(',')
for pandaID in pandaIdRequest:
try:
pandaID = int(pandaID)
plsql += str(pandaID) + ','
except:
pass # it is better to add here wrong data handler
plsql = plsql[:-1] +'), '
for item in standard_fields:
if ((item + '__in') in query):
plsql += " " + item.upper() + "=>'" + str(query[item+'__in'][0]) + "', "
if ((item + '__endswith') in query and item=='transformation'):
plsql += " " + item.upper() + "=>'" + str(query[item+'__endswith']) + "', "
elif (item in query):
plsql += " " + item.upper() + "=>'" + str(query[item]) + "', "
elif (((item + '__range') in query) and (item == 'minramcount')):
plsql += " " + item.upper() + "=>'" + str(query[item + '__range']) + "', "
else:
pos = wildCardExtension.find(item, 0)
if pos > 0:
firstc = wildCardExtension.find("'", pos) + 1
sec = wildCardExtension.find("'", firstc)
value = wildCardExtension[firstc: sec]
plsql += " "+item.upper()+"=>'"+value+"', "
plsql = plsql[:-2]
plsql += "); END;;"
print plsql
# Here we call stored proc to fill temporary data
cursor = connection.cursor()
countCalls = 0
while (countCalls < 3):
try:
cursor.execute(plsql)
countCalls += 1
except Exception as ex:
print ex
if ex[0].code == 8103:
pass
else:
break
cursor.close()
# plsql = """BEGIN ATLAS_PANDABIGMON.QUERY_JOBSPAGE_CUMULATIVE(:REQUEST_TOKEN, :RANGE_DAYS); END;;"""
# cursor.execute(plsql, {'REQUEST_TOKEN': 54, 'RANGE_DAYS': 1})
def jobListP(request, mode=None, param=None):
valid, response = initRequest(request)
#if 'JOB_LIMIT' in request.session:
# del request.session['JOB_LIMIT']
# Hack to void limit caption in the params label
request.session['requestParams']['limit'] = 10000000
#is_json = False
# Here We start Retreiving Summary and return almost empty template
if ('requesttoken' in request.session):
print 'Existing'
# Get request token. This sheme of getting tokens should be more sophisticated (at least not use sequential numbers)
requestToken = 0
if len(request.GET.values()) == 0:
requestToken = -1
elif len(request.GET.values()) == 1 and 'json' in request.GET:
requestToken = -1
else:
sqlRequest = "SELECT ATLAS_PANDABIGMON.PANDAMON_REQUEST_TOKEN_SEQ.NEXTVAL as my_req_token FROM dual;"
cur = connection.cursor()
cur.execute(sqlRequest)
requestToken = cur.fetchall()
cur.close()
requestToken = requestToken[0][0]
if (requestToken == 0):
print "Error in getting reuest token"
return
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'drop': dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'nodrop': dropmode = False
requestFields = {}
for item in request.GET:
requestFields[item.lower()] = request.GET[item]
if not (requestToken == -1):
startDataRetrieve(request, dropmode, query, requestToken, wildCardExtension)
#request.session['viewParams']['selection'] = request.session['viewParams']['selection'][:request.session['viewParams']['selection'].index('<b>limit=</b>')]
if 'json' not in request.session['requestParams']:
data = {
'requesttoken': requestToken,
'tfirst': request.session['TFIRST'],
'tlast': request.session['TLAST'],
'viewParams': request.session['viewParams'] if 'viewParams' in request.session else None,
'built': datetime.now().strftime("%H:%M:%S"),
}
del request.session['TFIRST']
del request.session['TLAST']
response = render_to_response('jobListWrapper.html', data, RequestContext(request))
endSelfMonitor(request)
return response
else:
data = getJobList(request,requestToken)
response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def getJobList(request,requesttoken=None):
rawsummary={}
if 'requestParams' in request.session and u'display_limit' in request.session['requestParams']:
display_limit = int(request.session['requestParams']['display_limit'])
url_nolimit = removeParam(request.get_full_path(), 'display_limit')
else:
display_limit = 100
url_nolimit = request.get_full_path()
njobsmax = display_limit
cur = connection.cursor()
if 'requesttoken' in request.GET:
sqlRequest = "SELECT * FROM ATLAS_PANDABIGMON.JOBSPAGE_CUMULATIVE_RESULT WHERE REQUEST_TOKEN=%s" % request.GET[
'requesttoken']
cur.execute(sqlRequest)
rawsummary = cur.fetchall()
# if 'requesttoken' not in request.session:
# request.session['requesttoken'] = request.REQUEST[
# 'requesttoken']
else:
sqlRequest = "SELECT * FROM ATLAS_PANDABIGMON.JOBSPAGE_CUMULATIVE_RESULT WHERE REQUEST_TOKEN=%s" % int(requesttoken)
while len(rawsummary) == 0:
cur.execute(sqlRequest)
rawsummary = cur.fetchall()
time.sleep(10)
# if 'requesttoken' not in request.session:
# request.session['requesttoken'] = requesttoken
cur.close()
#if 'requesttoken' not in request.GET:
# return HttpResponse('')
errsByCount = []
summaryhash = {}
doRefresh = True
for row in rawsummary:
if row[2] == 'END':
doRefresh = False
else:
if row[1] in summaryhash:
if row[1] in summaryhash[row[1]]:
summaryhash[row[1]][row[2]] += row[3]
else:
summaryhash[row[1]][row[2]] = row[3]
else:
item = {}
item[row[2]] = row[3]
summaryhash[row[1]] = item
shkeys = summaryhash.keys()
sumd = []
jobsToList = set()
njobs = 0
for shkey in shkeys:
if not shkey in ['PANDAID', 'ErrorCode', 'MINRAMCOUNT']:
# check this condition
entry = {}
entry['field'] = shkey
entrlist = []
for subshkey in summaryhash[shkey].keys():
subentry = {}
subentry['kname'] = subshkey
subentry['kvalue'] = summaryhash[shkey][subshkey]
if (shkey == 'JOBSTATUS'):
njobs += summaryhash[shkey][subshkey]
entrlist.append(subentry)
entry['list'] = entrlist
sumd.append(entry)
elif shkey == 'MINRAMCOUNT':
entry = {}
entry['field'] = shkey
entrlist = []
newvalues = {}
for subshkey in summaryhash[shkey].keys():
roundedval = int( int(subshkey) / 1000)
if roundedval in newvalues:
newvalues[roundedval] += summaryhash[shkey][subshkey]
else:
newvalues[roundedval] = summaryhash[shkey][subshkey]
for ky in newvalues:
entrlist.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})
entrlist = sorted(entrlist, key=lambda x: str(x['kname']).lower())
entry['list'] = entrlist
sumd.append(entry)
elif shkey == 'PANDAID':
for subshkey in summaryhash[shkey]:
jobsToList.add(subshkey)
elif shkey == 'ErrorCode':
for subshkey in summaryhash[shkey]:
errval = {}
errval['codename'] = subshkey.split(':')[0]
errval['codeval'] = subshkey.split(':')[1]
errval['count'] = summaryhash[shkey][subshkey]
errval['error'] = subshkey
error = [it['error'] for it in errorcodelist if it['name'] == errval['codename'].lower()]
if len(error) > 0 and error[0] in errorCodes and int(errval['codeval']) in errorCodes[error[0]]:
errval['diag'] = errorCodes[error[0]][int(errval['codeval'])]
errsByCount.append(errval)
if sumd:
for item in sumd:
if item['field'] == 'JEDITASKID':
item['list'] = sorted(item['list'], key=lambda k: k['kvalue'], reverse=True)
jobs = []
if not doRefresh:
pandaIDVal = [int(val) for val in jobsToList]
pandaIDVal = pandaIDVal[:njobsmax]
newquery = {}
newquery['pandaid__in'] = pandaIDVal
eventservice = False
if 'requestParams' in request.session and 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
eventservice = True
if 'requestParams' in request.session and 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or
request.session['requestParams'][
'eventservice'] == '1'):
eventservice = True
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'requestParams' in request.session and 'json' in request.session['requestParams']):
values = [f.name for f in Jobsactive4._meta.get_fields()]
elif eventservice:
values = 'jobsubstatus', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'proddblock', 'destinationdblock', 'jobmetrics', 'reqid', 'minramcount', 'statechangetime', 'jobsubstatus', 'eventservice'
else:
values = 'jobsubstatus', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'computingelement', 'proddblock', 'destinationdblock', 'reqid', 'minramcount', 'statechangetime', 'avgvmem', 'maxvmem', 'maxpss', 'maxrss', 'nucleus', 'eventservice'
jobs.extend(Jobsdefined4.objects.filter(**newquery).values(*values))
jobs.extend(Jobsactive4.objects.filter(**newquery).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**newquery).values(*values))
jobs.extend(Jobsarchived4.objects.filter(**newquery).values(*values))
if (len(jobs) < njobsmax):
jobs.extend(Jobsarchived.objects.filter(**newquery).values(*values))
if 'requestParams' in request.session and 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'time-ascending':
jobs = sorted(jobs, key=lambda x: x['modificationtime'])
if sortby == 'time-descending':
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
if sortby == 'statetime':
jobs = sorted(jobs, key=lambda x: x['statechangetime'], reverse=True)
elif sortby == 'priority':
jobs = sorted(jobs, key=lambda x: x['currentpriority'], reverse=True)
elif sortby == 'attemptnr':
jobs = sorted(jobs, key=lambda x: x['attemptnr'], reverse=True)
elif sortby == 'duration-ascending':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'duration-descending':
jobs = sorted(jobs, key=lambda x: x['durationsec'], reverse=True)
elif sortby == 'duration':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'PandaID':
jobs = sorted(jobs, key=lambda x: x['pandaid'], reverse=True)
else:
sortby = "time-descending"
if len(jobs) > 0 and 'modificationtime' in jobs[0]:
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
droplist = []
droppedIDs = set()
droppedPmerge = set()
jobs = cleanJobListLite(request, jobs)
jobtype = ''
if 'requestParams' in request.session and 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
if 'requestParams' in request.session and 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'time-ascending':
jobs = sorted(jobs, key=lambda x: x['modificationtime'])
if sortby == 'time-descending':
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
if sortby == 'statetime':
jobs = sorted(jobs, key=lambda x: x['statechangetime'], reverse=True)
elif sortby == 'priority':
jobs = sorted(jobs, key=lambda x: x['currentpriority'], reverse=True)
elif sortby == 'attemptnr':
jobs = sorted(jobs, key=lambda x: x['attemptnr'], reverse=True)
elif sortby == 'duration-ascending':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'duration-descending':
jobs = sorted(jobs, key=lambda x: x['durationsec'], reverse=True)
elif sortby == 'duration':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'PandaID':
jobs = sorted(jobs, key=lambda x: x['pandaid'], reverse=True)
else:
sortby = "time-descending"
if len(jobs) > 0 and 'modificationtime' in jobs[0]:
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
taskname = ''
if 'requestParams' in request.session and 'jeditaskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['jeditaskid'])
if 'requestParams' in request.session and 'taskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['taskid'])
if 'requestParams' in request.session and 'produsername' in request.session['requestParams']:
user = request.session['requestParams']['produsername']
elif 'requestParams' in request.session and 'user' in request.session['requestParams']:
user = request.session['requestParams']['user']
else:
user = None
## set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
# show warning or not
showwarn = 0
if 'JOB_LIMIT' in request.session:
if njobs <= request.session['JOB_LIMIT']:
showwarn = 0
else:
showwarn = 1
jobsToShow = jobs[:njobsmax]
if 'requestParams' in request.session and 'jeditaskid' in request.session['requestParams']:
if len(jobs) > 0:
for job in jobs:
if 'maxvmem' in job:
if type(job['maxvmem']) is int and job['maxvmem'] > 0:
job['maxvmemmb'] = "%0.2f" % (job['maxvmem'] / 1000.)
job['avgvmemmb'] = "%0.2f" % (job['avgvmem'] / 1000.)
if 'maxpss' in job:
if type(job['maxpss']) is int and job['maxpss'] > 0:
job['maxpss'] = "%0.2f" % (job['maxpss'] / 1024.)
# errsByCount, errsBySite, errsByUser, errsByTask, errdSumd, errHist =
if 'HTTP_REFERER' in request.META:
xurl = extensibleURL(request, request.META['HTTP_REFERER'])
else:
xurl = request.META['PATH_INFO'] + "?"+ request.META['QUERY_STRING']
print xurl
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
nosorturl = removeParam(nosorturl, 'display_limit', mode='extensible')
TFIRST = None
TLAST = None
if 'TFIRST' in request.session:
TFIRST = request.session['TFIRST']
del request.session['TFIRST']
if 'TLAST' in request.session:
TLAST = request.session['TLAST']
del request.session['TLAST']
if 'viewParams' in request.session and 'limit' in request.session['viewParams']:
del request.session['viewParams']['limit']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
nodropPartURL = cleanURLFromDropPart(xurl)
#sumd = None
#errsByCount = None
data = {
'errsByCount': errsByCount,
# 'errdSumd': errdSumd,
'request': request,
'viewParams': request.session['viewParams'] if 'viewParams' in request.session else None,
'requestParams': request.session['requestParams'] if 'requestParams' in request.session else None,
'jobList': jobsToShow[:njobsmax],
'jobtype': jobtype,
'njobs': njobs,
'user': user,
'sumd': sumd,
'xurl': xurl,
# 'droplist': droplist,
# 'ndrops': len(droplist) if len(droplist) > 0 else (- len(droppedPmerge)),
'ndrops': 0,
'tfirst': TFIRST,
'tlast': TLAST,
'plow': PLOW,
'phigh': PHIGH,
'joblimit': request.session['JOB_LIMIT'] if 'JOB_LIMIT' in request.session else None,
'limit': 0,
# 'totalJobs': totalJobs,
# 'showTop': showTop,
'url_nolimit': url_nolimit,
'display_limit': display_limit,
'sortby': sortby,
'nosorturl': nosorturl,
'taskname': taskname,
'flowstruct': flowstruct,
'nodropPartURL': nodropPartURL,
'doRefresh': doRefresh,
'built': datetime.now().strftime("%H:%M:%S"),
}
else:
if (('fields' in request.session['requestParams']) and (len(jobs) > 0)):
fields = request.session['requestParams']['fields'].split(',')
fields = (set(fields) & set(jobs[0].keys()))
for job in jobs:
for field in list(job.keys()):
if field in fields:
pass
else:
del job[field]
data = {
"selectionsummary": sumd,
"jobs": jobs,
"errsByCount": errsByCount,
}
return data
def jobListPDiv(request, mode=None, param=None):
initRequest(request)
data = getCacheEntry(request, "jobListWrapper")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('jobListWrapper.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
data = getJobList(request)
data.update(getContextVariables(request))
setCacheEntry(request, "jobListWrapper", json.dumps(data, cls=DateEncoder), 60 * 20)
##self monitor
endSelfMonitor(request)
# if eventservice:
# response = render_to_response('jobListESProto.html', data, RequestContext(request))
# else:
response = render_to_response('jobListContent.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def getCacheEntry(request, viewType):
is_json = False
# We do this check to always rebuild cache for the page when it called from the crawler
if (('REMOTE_ADDR' in request.META) and (request.META['REMOTE_ADDR'] in notcachedRemoteAddress)):
return None
request._cache_update_cache = False
if ((('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) or (
'json' in request.GET)):
is_json = True
key_prefix = "%s_%s_%s_" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX, viewType)
path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path())))
cache_key = '%s.%s' % (key_prefix, path.hexdigest())
return cache.get(cache_key, None)
def setCacheEntry(request, viewType, data, timeout):
is_json = False
request._cache_update_cache = False
if ((('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) or (
'json' in request.GET)):
is_json = True
key_prefix = "%s_%s_%s_" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX, viewType)
path = hashlib.md5(encoding.force_bytes(encoding.iri_to_uri(request.get_full_path())))
cache_key = '%s.%s' % (key_prefix, path.hexdigest())
cache.set(cache_key, data, timeout)
def cache_filter(timeout):
# This function provides splitting cache keys depending on conditions above the parameters specified in the URL
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
is_json = False
request._cache_update_cache = False
# here we can apply any conditions to separate cache streams
if ((('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) or (
'json' in request.GET)):
is_json = True
key_prefix = "%s_%s_" % (is_json, djangosettings.CACHE_MIDDLEWARE_KEY_PREFIX)
cache_key = ucache.get_cache_key(request, key_prefix, 'GET', cache)
if cache_key is None:
# we should add saving result
responce = (view_func)(request, *args, **kwargs)
cache_key = ucache.learn_cache_key(request, responce, timeout, key_prefix, cache)
cache.set(cache_key, responce, timeout)
return responce
responce = cache.get(cache_key, None)
if responce is None:
responce = (view_func)(request, *args, **kwargs)
cache_key = ucache.learn_cache_key(request, responce, timeout, key_prefix, cache)
cache.set(cache_key, responce, timeout)
return responce
return _wrapped_view
return decorator
def jobList(request, mode=None, param=None):
valid, response = initRequest(request)
dkey = digkey(request)
#Here we try to get data from cache
data = getCacheEntry(request, "jobList")
if data is not None:
data = json.loads(data)
data['request'] = request
if data['eventservice'] == True:
response = render_to_response('jobListES.html', data, RequestContext(request))
else:
response = render_to_response('jobList.html', data, RequestContext(request))
endSelfMonitor(request)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if not valid: return response
if 'dump' in request.session['requestParams'] and request.session['requestParams']['dump'] == 'parameters':
return jobParamList(request)
eventservice = False
if 'jobtype' in request.session['requestParams'] and request.session['requestParams']['jobtype'] == 'eventservice':
eventservice = True
if 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or request.session['requestParams'][
'eventservice'] == '1'):
eventservice = True
noarchjobs = False
if ('noarchjobs' in request.session['requestParams'] and request.session['requestParams']['noarchjobs'] == '1'):
noarchjobs = True
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
if 'batchid' in request.session['requestParams']:
query['batchid'] = request.session['requestParams']['batchid']
jobs = []
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
values = [f.name for f in Jobsactive4._meta.get_fields()]
elif eventservice:
values = 'jobsubstatus', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'proddblock', 'destinationdblock', 'jobmetrics', 'reqid', 'minramcount', 'statechangetime', 'jobsubstatus', 'eventservice' , 'nevents'
else:
values = 'jobsubstatus', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'computingelement', 'proddblock', 'destinationdblock', 'reqid', 'minramcount', 'statechangetime', 'avgvmem', 'maxvmem', 'maxpss', 'maxrss', 'nucleus', 'eventservice', 'nevents'
JOB_LIMITS = request.session['JOB_LIMIT']
totalJobs = 0
showTop = 0
if 'limit' in request.session['requestParams']:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
if 'transferringnotupdated' in request.session['requestParams']:
jobs = stateNotUpdated(request, state='transferring', values=values, wildCardExtension=wildCardExtension)
elif 'statenotupdated' in request.session['requestParams']:
jobs = stateNotUpdated(request, values=values, wildCardExtension=wildCardExtension)
else:
excludedTimeQuery = copy.deepcopy(query)
if ('modificationtime__range' in excludedTimeQuery and not 'date_to' in request.session['requestParams']):
del excludedTimeQuery['modificationtime__range']
jobs.extend(Jobsdefined4.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension])[
:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsactive4.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension])[
:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension])[
:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[
:request.session['JOB_LIMIT']].values(*values))
listJobs = [Jobsarchived4,Jobsactive4,Jobswaiting4,Jobsdefined4]
if not noarchjobs:
queryFrozenStates = []
if 'jobstatus' in request.session['requestParams']:
queryFrozenStates = filter(set(request.session['requestParams']['jobstatus'].split('|')).__contains__,
['finished', 'failed', 'cancelled', 'closed'])
##hard limit is set to 2K
if ('jobstatus' not in request.session['requestParams'] or len(queryFrozenStates) > 0):
if ('limit' not in request.session['requestParams'] and 'jeditaskid' not in request.session[
'requestParams']):
request.session['JOB_LIMIT'] = 20000
JOB_LIMITS = 20000
showTop = 1
elif ('limit' not in request.session['requestParams'] and 'jeditaskid' in request.session[
'requestParams']):
request.session['JOB_LIMIT'] = 200000
JOB_LIMITS = 200000
else:
request.session['JOB_LIMIT'] = int(request.session['requestParams']['limit'])
JOB_LIMITS = int(request.session['requestParams']['limit'])
if (((datetime.now() - datetime.strptime(query['modificationtime__range'][0],
"%Y-%m-%d %H:%M:%S")).days > 1) or \
((datetime.now() - datetime.strptime(query['modificationtime__range'][1],
"%Y-%m-%d %H:%M:%S")).days > 1)):
archJobs = Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[
:request.session['JOB_LIMIT']].values(*values)
listJobs.append(Jobsarchived)
totalJobs = len(archJobs)
jobs.extend(archJobs)
print listJobs
thread = Thread(target=totalCount, args=(listJobs, query, wildCardExtension,dkey))
thread.start()
## If the list is for a particular JEDI task, filter out the jobs superseded by retries
taskids = {}
for job in jobs:
if 'jeditaskid' in job: taskids[job['jeditaskid']] = 1
dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'drop': dropmode = True
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'nodrop': dropmode = False
isReturnDroppedPMerge=False
if 'processingtype' in request.session['requestParams'] and \
request.session['requestParams']['processingtype'] == 'pmerge': isReturnDroppedPMerge=True
droplist = []
droppedPmerge = set()
if dropmode and (len(taskids) == 1):
jobs, droplist, droppedPmerge = dropRetrielsJobs(jobs,taskids.keys()[0],isReturnDroppedPMerge)
jobs = cleanJobList(request, jobs)
njobs = len(jobs)
jobtype = ''
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
if u'display_limit' in request.session['requestParams']:
if int(request.session['requestParams']['display_limit']) > njobs:
display_limit = njobs
else:
display_limit = int(request.session['requestParams']['display_limit'])
url_nolimit = removeParam(request.get_full_path(), 'display_limit')
else:
display_limit = 1000
url_nolimit = request.get_full_path()
njobsmax = display_limit
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'time-ascending':
jobs = sorted(jobs, key=lambda x:x['modificationtime'] if not x['modificationtime'] is None else datetime(1900, 1, 1))
if sortby == 'time-descending':
jobs = sorted(jobs, key=lambda x:x['modificationtime'] if not x['modificationtime'] is None else datetime(1900, 1, 1), reverse=True)
if sortby == 'statetime':
jobs = sorted(jobs, key=lambda x:x['statechangetime'] if not x['statechangetime'] is None else datetime(1900, 1, 1), reverse=True)
elif sortby == 'priority':
jobs = sorted(jobs, key=lambda x:x['currentpriority'] if not x['currentpriority'] is None else 0, reverse=True)
elif sortby == 'attemptnr':
jobs = sorted(jobs, key=lambda x: x['attemptnr'], reverse=True)
elif sortby == 'duration-ascending':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'duration-descending':
jobs = sorted(jobs, key=lambda x: x['durationsec'], reverse=True)
elif sortby == 'duration':
jobs = sorted(jobs, key=lambda x: x['durationsec'])
elif sortby == 'PandaID':
jobs = sorted(jobs, key=lambda x: x['pandaid'], reverse=True)
else:
sortby = "time-descending"
if len(jobs) > 0 and 'modificationtime' in jobs[0]:
jobs = sorted(jobs, key=lambda x: x['modificationtime'], reverse=True)
taskname = ''
if 'jeditaskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['jeditaskid'])
if 'taskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['taskid'])
if 'produsername' in request.session['requestParams']:
user = request.session['requestParams']['produsername']
elif 'user' in request.session['requestParams']:
user = request.session['requestParams']['user']
else:
user = None
## set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
if (('datasets' in request.session['requestParams']) and (
request.session['requestParams']['datasets'] == 'yes') and (
('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json')))):
for job in jobs:
files = []
files.extend(JediDatasetContents.objects.filter(pandaid=pandaid).order_by('type').values())
ninput = 0
if len(files) > 0:
for f in files:
if f['type'] == 'input': ninput += 1
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
dsets = JediDatasets.objects.filter(datasetid=f['datasetid']).values()
if len(dsets) > 0:
f['datasetname'] = dsets[0]['datasetname']
if True:
# if ninput == 0:
files.extend(Filestable4.objects.filter(pandaid=pandaid).order_by('type').values())
if len(files) == 0:
files.extend(FilestableArch.objects.filter(pandaid=pandaid).order_by('type').values())
if len(files) > 0:
for f in files:
if 'creationdate' not in f: f['creationdate'] = f['modificationtime']
if 'fileid' not in f: f['fileid'] = f['row_id']
if 'datasetname' not in f: f['datasetname'] = f['dataset']
if 'modificationtime' in f: f['oldfiletable'] = 1
if 'destinationdblock' in f and f['destinationdblock'] is not None:
f['destinationdblock_vis'] = f['destinationdblock'].split('_')[-1]
files = sorted(files, key=lambda x: x['type'])
nfiles = len(files)
logfile = {}
for file in files:
if file['type'] == 'log':
logfile['lfn'] = file['lfn']
logfile['guid'] = file['guid']
if 'destinationse' in file:
logfile['site'] = file['destinationse']
else:
logfilerec = Filestable4.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) == 0:
logfilerec = FilestableArch.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) > 0:
logfile['site'] = logfilerec[0]['destinationse']
logfile['guid'] = logfilerec[0]['guid']
logfile['scope'] = file['scope']
file['fsize'] = int(file['fsize'] / 1000000)
job['datasets'] = files
# show warning or not
if njobs <= request.session['JOB_LIMIT']:
showwarn = 0
else:
showwarn = 1
# Sort in order to see the most important tasks
sumd, esjobdict = jobSummaryDict(request, jobs)
if sumd:
for item in sumd:
if item['field'] == 'jeditaskid':
item['list'] = sorted(item['list'], key=lambda k: k['kvalue'], reverse=True)
if 'jeditaskid' in request.session['requestParams']:
if len(jobs) > 0:
for job in jobs:
if 'maxvmem' in job:
if type(job['maxvmem']) is int and job['maxvmem'] > 0:
job['maxvmemmb'] = "%0.2f" % (job['maxvmem'] / 1000.)
job['avgvmemmb'] = "%0.2f" % (job['avgvmem'] / 1000.)
if 'maxpss' in job:
if type(job['maxpss']) is int and job['maxpss'] > 0:
job['maxpss'] = "%0.2f" % (job['maxpss'] / 1024.)
testjobs = False
if 'prodsourcelabel' in request.session['requestParams'] and request.session['requestParams'][
'prodsourcelabel'].lower().find('test') >= 0:
testjobs = True
tasknamedict = taskNameDict(jobs)
errsByCount, errsBySite, errsByUser, errsByTask, errdSumd, errHist = errorSummaryDict(request, jobs, tasknamedict,
testjobs)
# Here we getting extended data for site
jobsToShow = jobs[:njobsmax]
distinctComputingSites = []
for job in jobsToShow:
distinctComputingSites.append(job['computingsite'])
distinctComputingSites = list(set(distinctComputingSites))
query = {}
query['siteid__in'] = distinctComputingSites
siteres = Schedconfig.objects.filter(**query).exclude(cloud='CMS').extra().values('siteid', 'status',
'comment_field')
siteHash = {}
for site in siteres:
siteHash[site['siteid']] = (site['status'], site['comment_field'])
for job in jobsToShow:
if job['computingsite'] in siteHash.keys():
job['computingsitestatus'] = siteHash[job['computingsite']][0]
job['computingsitecomment'] = siteHash[job['computingsite']][1]
try:
thread.join()
jobsTotalCount = sum(tcount[dkey])
print dkey
print tcount[dkey]
del tcount[dkey]
print tcount
print jobsTotalCount
except:
jobsTotalCount = -1
listPar =[]
for key, val in request.session['requestParams'].iteritems():
if (key!='limit' and key!='display_limit'):
listPar.append(key + '=' + str(val))
if len(listPar)>0:
urlParametrs = '&'.join(listPar)+'&'
else:
urlParametrs = None
print listPar
del listPar
if (math.fabs(njobs-jobsTotalCount)<1000 or jobsTotalCount == -1):
jobsTotalCount=None
else:
jobsTotalCount = int(math.ceil((jobsTotalCount+10000)/10000)*10000)
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
print xurl
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
nosorturl = removeParam(nosorturl, 'display_limit', mode='extensible')
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
nodropPartURL = cleanURLFromDropPart(xurl)
data = {
'prefix': getPrefix(request),
'errsByCount': errsByCount,
'errdSumd': errdSumd,
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'jobList': jobsToShow,
'jobtype': jobtype,
'njobs': njobs,
'user': user,
'sumd': sumd,
'xurl': xurl,
'xurlnopref': xurl[5:],
'droplist': droplist,
'ndrops': len(droplist) if len(droplist) > 0 else (- len(droppedPmerge)),
'tfirst': TFIRST,
'tlast': TLAST,
'plow': PLOW,
'phigh': PHIGH,
'showwarn': showwarn,
'joblimit': request.session['JOB_LIMIT'],
'limit': JOB_LIMITS,
'totalJobs': totalJobs,
'showTop': showTop,
'url_nolimit': url_nolimit,
'display_limit': display_limit,
'sortby': sortby,
'nosorturl': nosorturl,
'taskname': taskname,
'flowstruct': flowstruct,
'nodropPartURL': nodropPartURL,
'eventservice': eventservice,
'jobsTotalCount': jobsTotalCount,
'requestString': urlParametrs,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
setCacheEntry(request, "jobList", json.dumps(data, cls=DateEncoder), 60 * 20)
##self monitor
endSelfMonitor(request)
if eventservice:
response = render_to_response('jobListES.html', data, RequestContext(request))
else:
response = render_to_response('jobList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
if (('fields' in request.session['requestParams']) and (len(jobs) > 0)):
fields = request.session['requestParams']['fields'].split(',')
fields = (set(fields) & set(jobs[0].keys()))
for job in jobs:
for field in list(job.keys()):
if field in fields:
pass
else:
del job[field]
data = {
"selectionsummary": sumd,
"jobs": jobs,
"errsByCount": errsByCount,
}
response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def isEventService(job):
if 'specialhandling' in job and job['specialhandling'] and (
job['specialhandling'].find('eventservice') >= 0 or job['specialhandling'].find('esmerge') >= 0 or (
job['eventservice'] != 'ordinary' and job['eventservice'] > 0)):
return True
else:
return False
def cleanURLFromDropPart(url):
posDropPart = url.find('mode')
if (posDropPart == -1):
return url
else:
if url[posDropPart - 1] == '&':
posDropPart -= 1
nextAmp = url.find('&', posDropPart + 1)
if nextAmp == -1:
return url[0:posDropPart]
else:
return url[0:posDropPart] + url[nextAmp + 1:]
def getSequentialRetries(pandaid, jeditaskid, countOfInvocations):
retryquery = {}
countOfInvocations.append(1)
retryquery['jeditaskid'] = jeditaskid
retryquery['newpandaid'] = pandaid
newretries = []
if (len(countOfInvocations) < 100):
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('oldpandaid').reverse().values()
newretries.extend(retries)
for retry in retries:
if retry['relationtype'] in ['merge', 'retry']:
jsquery = {}
jsquery['jeditaskid'] = jeditaskid
jsquery['pandaid'] = retry['oldpandaid']
values = ['pandaid', 'jobstatus', 'jeditaskid']
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
for job in jsjobs:
if job['jobstatus'] == 'failed':
for retry in newretries:
if (retry['oldpandaid'] == job['pandaid']):
retry['relationtype'] = 'retry'
newretries.extend(getSequentialRetries(job['pandaid'], job['jeditaskid'], countOfInvocations))
outlist = []
added_keys = set()
for row in newretries:
lookup = row['oldpandaid']
if lookup not in added_keys:
outlist.append(row)
added_keys.add(lookup)
return outlist
def getSequentialRetries_ES(pandaid, jobsetid, jeditaskid, countOfInvocations, recurse=0):
retryquery = {}
retryquery['jeditaskid'] = jeditaskid
retryquery['newpandaid'] = jobsetid
retryquery['relationtype'] = 'jobset_retry'
countOfInvocations.append(1)
newretries = []
if (len(countOfInvocations) < 100):
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('oldpandaid').reverse().values()
newretries.extend(retries)
for retry in retries:
jsquery = {}
jsquery['jeditaskid'] = jeditaskid
jsquery['jobstatus'] = 'failed'
jsquery['jobsetid'] = retry['oldpandaid']
values = ['pandaid', 'jobstatus', 'jobsetid', 'jeditaskid']
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
for job in jsjobs:
if job['jobstatus'] == 'failed':
for retry in newretries:
if (retry['oldpandaid'] == job['jobsetid']):
retry['relationtype'] = 'retry'
retry['jobid'] = job['pandaid']
newretries.extend(getSequentialRetries_ES(job['pandaid'],
jobsetid, job['jeditaskid'], countOfInvocations,
recurse + 1))
outlist = []
added_keys = set()
for row in newretries:
if 'jobid' in row:
lookup = row['jobid']
if lookup not in added_keys:
outlist.append(row)
added_keys.add(lookup)
return outlist
def getSequentialRetries_ESupstream(pandaid, jobsetid, jeditaskid, countOfInvocations, recurse=0):
retryquery = {}
retryquery['jeditaskid'] = jeditaskid
retryquery['oldpandaid'] = jobsetid
retryquery['relationtype'] = 'jobset_retry'
countOfInvocations.append(1)
newretries = []
if (len(countOfInvocations) < 100):
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').values()
newretries.extend(retries)
for retry in retries:
jsquery = {}
jsquery['jeditaskid'] = jeditaskid
jsquery['jobsetid'] = retry['newpandaid']
values = ['pandaid', 'jobstatus', 'jobsetid', 'jeditaskid']
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
for job in jsjobs:
for retry in newretries:
if (retry['newpandaid'] == job['jobsetid']):
retry['relationtype'] = 'retry'
retry['jobid'] = job['pandaid']
outlist = []
added_keys = set()
for row in newretries:
if 'jobid' in row:
lookup = row['jobid']
if lookup not in added_keys:
outlist.append(row)
added_keys.add(lookup)
return outlist
def descendentjoberrsinfo(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
job_pandaid = job_jeditaskid = -1
if 'pandaid' in request.session['requestParams']:
job_pandaid = int(request.session['requestParams']['pandaid'])
if 'jeditaskid' in request.session['requestParams']:
job_jeditaskid = int(request.session['requestParams']['jeditaskid'])
if (job_pandaid == -1) or (job_jeditaskid == -1):
data = {"error": "no pandaid or jeditaskid supplied"}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
query = setupView(request, hours=365 * 24)
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
if len(jobs) == 0:
jobs.extend(Jobsarchived.objects.filter(**query).values())
if len(jobs) == 0:
del request.session['TFIRST']
del request.session['TLAST']
data = {"error": "job not found"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
job = jobs[0]
countOfInvocations = []
if not isEventService(job):
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['pandaid']
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries(job['pandaid'], job['jeditaskid'])
else:
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['jobsetid']
retryquery['relationtype'] = 'jobset_retry'
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries_ES(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
query = {'jeditaskid': job_jeditaskid}
jobslist = []
for retry in pretries:
jobslist.append(retry['oldpandaid'])
for retry in retries:
jobslist.append(retry['oldpandaid'])
query['pandaid__in'] = jobslist
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
jobs.extend(Jobsarchived.objects.filter(**query).values())
jobs = cleanJobList(request, jobs, mode='nodrop')
errors = {}
for job in jobs:
errors[job['pandaid']] = getErrorDescription(job, mode='txt')
endSelfMonitor(request)
del request.session['TFIRST']
del request.session['TLAST']
response = render_to_response('descentJobsErrors.html', {'errors': errors}, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def eventsInfo(request, mode=None, param=None):
if not 'jeditaskid' in request.GET:
data = {}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
jeditaskid = request.GET['jeditaskid']
cur = connection.cursor()
cur.execute(
"select sum(decode(c.startevent,NULL,c.nevents,endevent-startevent+1)) nevents,c.status from atlas_panda.jedi_datasets d,atlas_panda.jedi_dataset_contents c where d.jeditaskid=c.jeditaskid and d.datasetid=c.datasetid and d.jeditaskid=%s and d.type in ('input','pseudo_input') and d.masterid is null group by c.status;" % (
jeditaskid))
events = cur.fetchall()
cur.close()
data = {}
for ev in events:
data[ev[1]] = ev[0]
data['jeditaskid'] = jeditaskid
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
@csrf_exempt
def jobInfo(request, pandaid=None, batchid=None, p2=None, p3=None, p4=None):
valid, response = initRequest(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "jobInfo")
if data is not None:
data = json.loads(data)
data['request'] = request
if data['eventservice'] == True:
response = render_to_response('jobInfoES.html', data, RequestContext(request))
else:
response = render_to_response('jobInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
eventservice = False
query = setupView(request, hours=365 * 24)
jobid = ''
if 'creator' in request.session['requestParams']:
## Find the job that created the specified file.
fquery = {}
fquery['lfn'] = request.session['requestParams']['creator']
fquery['type'] = 'output'
fileq = Filestable4.objects.filter(**fquery)
fileq = fileq.values('pandaid', 'type')
if fileq and len(fileq) > 0:
pandaid = fileq[0]['pandaid']
else:
fileq = FilestableArch.objects.filter(**fquery).values('pandaid', 'type')
if fileq and len(fileq) > 0:
pandaid = fileq[0]['pandaid']
if pandaid:
jobid = pandaid
try:
query['pandaid'] = int(pandaid)
except:
query['jobname'] = pandaid
if batchid:
jobid = batchid
query['batchid'] = batchid
if 'pandaid' in request.session['requestParams']:
try:
pandaid = int(request.session['requestParams']['pandaid'])
except ValueError:
pandaid = 0
jobid = pandaid
query['pandaid'] = pandaid
elif 'batchid' in request.session['requestParams']:
batchid = request.session['requestParams']['batchid']
jobid = "'" + batchid + "'"
query['batchid'] = batchid
elif 'jobname' in request.session['requestParams']:
jobid = request.session['requestParams']['jobname']
query['jobname'] = jobid
jobs = []
if pandaid or batchid:
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
jobs.extend(Jobsdefined4.objects.filter(**query).values())
jobs.extend(Jobsactive4.objects.filter(**query).values())
jobs.extend(Jobswaiting4.objects.filter(**query).values())
jobs.extend(Jobsarchived4.objects.filter(**query).values())
if len(jobs) == 0:
jobs.extend(Jobsarchived.objects.filter(**query).values())
jobs = cleanJobList(request, jobs, mode='nodrop')
if len(jobs) == 0:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'pandaid': pandaid,
'job': None,
'jobid': jobid,
}
##self monitor
endSelfMonitor(request)
response = render_to_response('jobInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
job = {}
colnames = []
columns = []
try:
job = jobs[0]
tquery = {}
tquery['jeditaskid'] = job['jeditaskid']
tquery['storagetoken__isnull'] = False
storagetoken = JediDatasets.objects.filter(**tquery).values('storagetoken')
if storagetoken:
job['destinationse'] = storagetoken[0]['storagetoken']
pandaid = job['pandaid']
colnames = job.keys()
colnames.sort()
for k in colnames:
val = job[k]
if job[k] == None:
val = ''
continue
pair = {'name': k, 'value': val}
columns.append(pair)
except IndexError:
job = {}
## Check for logfile extracts
logs = Logstable.objects.filter(pandaid=pandaid)
if logs:
logextract = logs[0].log1
else:
logextract = None
files = []
typeFiles = {}
fileSummary = ''
inputFilesSize = 0
if 'nofiles' not in request.session['requestParams']:
## Get job files. First look in JEDI datasetcontents
print "Pulling file info"
files.extend(Filestable4.objects.filter(pandaid=pandaid).order_by('type').values())
ninput = 0
noutput = 0
npseudo_input = 0
if len(files) > 0:
for f in files:
f['destination'] = ' '
if f['type'] == 'input':
ninput += 1
inputFilesSize += f['fsize'] / 1048576.
if f['type'] in typeFiles:
typeFiles[f['type']] += 1
else:
typeFiles[f['type']] = 1
if f['type'] == 'output':
noutput += 1
if len(jobs[0]['jobmetrics']) > 0:
for s in jobs[0]['jobmetrics'].split(' '):
if 'logBucketID' in s:
logBucketID = int(s.split('=')[1])
if logBucketID in [45, 41, 105, 106, 42, 61, 103, 2, 82, 101, 117,
115]: # Bucket Codes for S3 destination
f['destination'] = 'S3'
# if len(jobs[0]['jobmetrics']) > 0:
# jobmetrics = dict(s.split('=') for s in jobs[0]['jobmetrics'].split(' '))
# if 'logBucketID' in jobmetrics:
# if int(jobmetrics['logBucketID']) in [3, 21, 45, 46, 104, 41, 105, 106, 42, 61, 21, 102, 103, 2, 82, 81, 82, 101]: #Bucket Codes for S3 destination
# f['destination'] = 'S3'
if f['type'] == 'pseudo_input': npseudo_input += 1
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
dsets = JediDatasets.objects.filter(datasetid=f['datasetid']).values()
if len(dsets) > 0:
f['datasetname'] = dsets[0]['datasetname']
if job['computingsite'] in pandaSites.keys():
f['ddmsite'] = pandaSites[job['computingsite']]['site']
if 'dst' in f['destinationdblocktoken']:
parced = f['destinationdblocktoken'].split("_")
f['ddmsite'] = parced[0][4:]
f['dsttoken'] = parced[1]
files = [x for x in files if x['destination'] != 'S3']
if len(typeFiles) > 0:
inputFilesSize = "%0.2f" % inputFilesSize
for i in typeFiles:
fileSummary += str(i) + ': ' + str(typeFiles[i])
if (i == 'input'): fileSummary += ', size: ' + inputFilesSize + '(MB)'
fileSummary += '; '
fileSummary = fileSummary[:-2]
if len(files) == 0:
files.extend(FilestableArch.objects.filter(pandaid=pandaid).order_by('type').values())
if len(files) > 0:
for f in files:
if 'creationdate' not in f: f['creationdate'] = f['modificationtime']
if 'fileid' not in f: f['fileid'] = f['row_id']
if 'datasetname' not in f: f['datasetname'] = f['dataset']
if 'modificationtime' in f: f['oldfiletable'] = 1
if 'destinationdblock' in f and f['destinationdblock'] is not None:
f['destinationdblock_vis'] = f['destinationdblock'].split('_')[-1]
files = sorted(files, key=lambda x: x['type'])
nfiles = len(files)
logfile = {}
for file in files:
if file['type'] == 'log':
logfile['lfn'] = file['lfn']
logfile['guid'] = file['guid']
if 'destinationse' in file:
logfile['site'] = file['destinationse']
else:
logfilerec = Filestable4.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) == 0:
logfilerec = FilestableArch.objects.filter(pandaid=pandaid, lfn=logfile['lfn']).values()
if len(logfilerec) > 0:
logfile['site'] = logfilerec[0]['destinationse']
logfile['guid'] = logfilerec[0]['guid']
logfile['scope'] = file['scope']
file['fsize'] = int(file['fsize'])
if 'pilotid' in job and job['pilotid'] is not None and job['pilotid'].startswith('http'):
stdout = job['pilotid'].split('|')[0]
stderr = stdout.replace('.out', '.err')
stdlog = stdout.replace('.out', '.log')
else:
stdout = stderr = stdlog = None
# input,pseudo_input,output,log and alphabetically within those please
filesSorted = []
filesSorted.extend(sorted([file for file in files if file['type'] == 'input'], key=lambda x: x['lfn']))
filesSorted.extend(sorted([file for file in files if file['type'] == 'pseudo_input'], key=lambda x: x['lfn']))
filesSorted.extend(sorted([file for file in files if file['type'] == 'output'], key=lambda x: x['lfn']))
filesSorted.extend(sorted([file for file in files if file['type'] == 'log'], key=lambda x: x['lfn']))
files = filesSorted
## Check for object store based log
oslogpath = None
if 'computingsite' in job and job['computingsite'] in objectStores:
ospath = objectStores[job['computingsite']]
if 'lfn' in logfile:
if ospath.endswith('/'):
oslogpath = ospath + logfile['lfn']
else:
oslogpath = ospath + '/' + logfile['lfn']
## Check for debug info
if 'specialhandling' in job and not job['specialhandling'] is None and job['specialhandling'].find('debug') >= 0:
debugmode = True
else:
debugmode = False
debugstdout = None
if debugmode:
if 'showdebug' in request.session['requestParams']:
debugstdoutrec = Jobsdebug.objects.filter(pandaid=pandaid).values()
if len(debugstdoutrec) > 0:
if 'stdout' in debugstdoutrec[0]: debugstdout = debugstdoutrec[0]['stdout']
if 'transformation' in job and job['transformation'] is not None and job['transformation'].startswith('http'):
job['transformation'] = "<a href='%s'>%s</a>" % (job['transformation'], job['transformation'].split('/')[-1])
if 'metastruct' in job:
job['metadata'] = json.dumps(job['metastruct'], sort_keys=True, indent=4, separators=(',', ': '))
## Get job parameters
print "getting job parameters"
jobparamrec = Jobparamstable.objects.filter(pandaid=pandaid)
jobparams = None
if len(jobparamrec) > 0:
jobparams = jobparamrec[0].jobparameters
# else:
# jobparamrec = JobparamstableArch.objects.filter(pandaid=pandaid)
# if len(jobparamrec) > 0:
# jobparams = jobparamrec[0].jobparameters
dsfiles = []
countOfInvocations = []
## If this is a JEDI job, look for job retries
if 'jeditaskid' in job and job['jeditaskid'] > 0:
print "looking for retries"
## Look for retries of this job
if not isEventService(job):
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['pandaid']
retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
pretries = getSequentialRetries(job['pandaid'], job['jeditaskid'], countOfInvocations)
else:
retryquery = {}
retryquery['jeditaskid'] = job['jeditaskid']
retryquery['oldpandaid'] = job['jobsetid']
retryquery['relationtype'] = 'jobset_retry'
# retries = JediJobRetryHistory.objects.filter(**retryquery).order_by('newpandaid').reverse().values()
retries = getSequentialRetries_ESupstream(job['pandaid'], job['jobsetid'], job['jeditaskid'],
countOfInvocations)
pretries = getSequentialRetries_ES(job['pandaid'], job['jobsetid'], job['jeditaskid'], countOfInvocations)
else:
retries = None
pretries = None
countOfInvocations = len(countOfInvocations)
## jobset info
libjob = None
runjobs = []
mergejobs = []
if 'jobset' in request.session['requestParams'] and 'jobsetid' in job and job['jobsetid'] > 0:
print "jobset info"
jsquery = {}
jsquery['jobsetid'] = job['jobsetid']
jsquery['produsername'] = job['produsername']
values = ['pandaid', 'prodsourcelabel', 'processingtype', 'transformation']
jsjobs = []
jsjobs.extend(Jobsdefined4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsactive4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobswaiting4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived4.objects.filter(**jsquery).values(*values))
jsjobs.extend(Jobsarchived.objects.filter(**jsquery).values(*values))
if len(jsjobs) > 0:
for j in jsjobs:
id = j['pandaid']
if j['transformation'].find('runAthena') >= 0:
runjobs.append(id)
elif j['transformation'].find('buildJob') >= 0:
libjob = id
if j['processingtype'] == 'usermerge':
mergejobs.append(id)
runesjobs = []
mergeesjobs = []
if isEventService(job) and 'jobsetid' in job and job['jobsetid'] > 0:
print "jobset info"
esjsquery = {}
esjsquery['jobsetid'] = job['jobsetid']
esjsquery['produsername'] = job['produsername']
values = ['pandaid', 'eventservice']
esjsjobs = []
esjsjobs.extend(Jobsdefined4.objects.filter(**esjsquery).values(*values))
esjsjobs.extend(Jobsactive4.objects.filter(**esjsquery).values(*values))
esjsjobs.extend(Jobswaiting4.objects.filter(**esjsquery).values(*values))
esjsjobs.extend(Jobsarchived4.objects.filter(**esjsquery).values(*values))
esjsjobs.extend(Jobsarchived.objects.filter(**esjsquery).values(*values))
if len(esjsjobs) > 0:
for j in esjsjobs:
if j['eventservice'] == 1:
runesjobs.append(j['pandaid'])
if j['eventservice'] == 2:
mergeesjobs.append(j['pandaid'])
esjobstr = ''
if isEventService(job):
## for ES jobs, prepare the event table
esjobdict = {}
for s in eventservicestatelist:
esjobdict[s] = 0
evtable = JediEvents.objects.filter(pandaid=job['pandaid']).order_by('-def_min_eventid').values('fileid',
'datasetid',
'def_min_eventid',
'def_max_eventid',
'processed_upto_eventid',
'status',
'job_processid',
'attemptnr')
fileids = {}
datasetids = {}
# for evrange in evtable:
# fileids[int(evrange['fileid'])] = {}
# datasetids[int(evrange['datasetid'])] = {}
flist = []
for f in fileids:
flist.append(f)
dslist = []
for ds in datasetids:
dslist.append(ds)
# datasets = JediDatasets.objects.filter(datasetid__in=dslist).values()
dsfiles = JediDatasetContents.objects.filter(fileid__in=flist).values()
# for ds in datasets:
# datasetids[int(ds['datasetid'])]['dict'] = ds
# for f in dsfiles:
# fileids[int(f['fileid'])]['dict'] = f
for evrange in evtable:
# evrange['fileid'] = fileids[int(evrange['fileid'])]['dict']['lfn']
# evrange['datasetid'] = datasetids[evrange['datasetid']]['dict']['datasetname']
evrange['status'] = eventservicestatelist[evrange['status']]
esjobdict[evrange['status']] += 1
evrange['attemptnr'] = 10 - evrange['attemptnr']
esjobstr = ''
for s in esjobdict:
if esjobdict[s] > 0:
esjobstr += " %s(%s) " % (s, esjobdict[s])
else:
evtable = []
## For CORE, pick up parameters from jobparams
if VOMODE == 'core' or ('vo' in job and job['vo'] == 'core'):
coreData = {}
if jobparams:
coreParams = re.match(
'.*PIPELINE_TASK\=([a-zA-Z0-9]+).*PIPELINE_PROCESSINSTANCE\=([0-9]+).*PIPELINE_STREAM\=([0-9\.]+)',
jobparams)
if coreParams:
coreData['pipelinetask'] = coreParams.group(1)
coreData['processinstance'] = coreParams.group(2)
coreData['pipelinestream'] = coreParams.group(3)
else:
coreData = None
if 'jobstatus' in job and (job['jobstatus'] == 'failed' or job['jobstatus'] == 'holding'):
errorinfo = getErrorDescription(job)
if len(errorinfo) > 0:
job['errorinfo'] = errorinfo
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'pandaid': pandaid,
'job': job,
'columns': columns,
'files': files,
'dsfiles': dsfiles,
'nfiles': nfiles,
'logfile': logfile,
'oslogpath': oslogpath,
'stdout': stdout,
'stderr': stderr,
'stdlog': stdlog,
'jobparams': jobparams,
'jobid': jobid,
'coreData': coreData,
'logextract': logextract,
'retries': retries,
'pretries': pretries,
'countOfInvocations': countOfInvocations,
'eventservice': isEventService(job),
'evtable': evtable[:100],
'debugmode': debugmode,
'debugstdout': debugstdout,
'libjob': libjob,
'runjobs': runjobs,
'mergejobs': mergejobs,
'runesjobs': runesjobs,
'mergeesjobs': mergeesjobs,
'esjobstr': esjobstr,
'fileSummary': fileSummary,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
setCacheEntry(request, "jobInfo", json.dumps(data, cls=DateEncoder), 60 * 20)
##self monitor
endSelfMonitor(request)
if isEventService(job):
response = render_to_response('jobInfoES.html', data, RequestContext(request))
else:
response = render_to_response('jobInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
elif (
('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
del request.session['TFIRST']
del request.session['TLAST']
data = {'files': files,
'job': job,
'dsfiles': dsfiles,
}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
else:
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse('not understood', content_type='text/html')
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
def userList(request):
valid, response = initRequest(request)
if not valid: return response
nhours = 90 * 24
setupView(request, hours=nhours, limit=-99)
if VOMODE == 'atlas':
view = 'database'
else:
view = 'dynamic'
if 'view' in request.session['requestParams']:
view = request.session['requestParams']['view']
sumd = []
jobsumd = []
userdb = []
userdbl = []
userstats = {}
if view == 'database':
startdate = timezone.now() - timedelta(hours=nhours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = {'latestjob__range': [startdate, enddate]}
# viewParams['selection'] = ", last %d days" % (float(nhours)/24.)
## Use the users table
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'name':
userdb = Users.objects.filter(**query).order_by('name')
elif sortby == 'njobs':
userdb = Users.objects.filter(**query).order_by('njobsa').reverse()
elif sortby == 'date':
userdb = Users.objects.filter(**query).order_by('latestjob').reverse()
elif sortby == 'cpua1':
userdb = Users.objects.filter(**query).order_by('cpua1').reverse()
elif sortby == 'cpua7':
userdb = Users.objects.filter(**query).order_by('cpua7').reverse()
elif sortby == 'cpup1':
userdb = Users.objects.filter(**query).order_by('cpup1').reverse()
elif sortby == 'cpup7':
userdb = Users.objects.filter(**query).order_by('cpup7').reverse()
else:
userdb = Users.objects.filter(**query).order_by('name')
else:
userdb = Users.objects.filter(**query).order_by('name')
anajobs = 0
n1000 = 0
n10k = 0
nrecent3 = 0
nrecent7 = 0
nrecent30 = 0
nrecent90 = 0
## Move to a list of dicts and adjust CPU unit
for u in userdb:
udict = {}
udict['name'] = u.name
udict['njobsa'] = u.njobsa
if u.cpua1: udict['cpua1'] = "%0.1f" % (int(u.cpua1) / 3600.)
if u.cpua7: udict['cpua7'] = "%0.1f" % (int(u.cpua7) / 3600.)
if u.cpup1: udict['cpup1'] = "%0.1f" % (int(u.cpup1) / 3600.)
if u.cpup7: udict['cpup7'] = "%0.1f" % (int(u.cpup7) / 3600.)
udict['latestjob'] = u.latestjob
userdbl.append(udict)
if u.njobsa > 0: anajobs += u.njobsa
if u.njobsa >= 1000: n1000 += 1
if u.njobsa >= 10000: n10k += 1
if u.latestjob != None:
latest = timezone.now() - u.latestjob
if latest.days < 4: nrecent3 += 1
if latest.days < 8: nrecent7 += 1
if latest.days < 31: nrecent30 += 1
if latest.days < 91: nrecent90 += 1
userstats['anajobs'] = anajobs
userstats['n1000'] = n1000
userstats['n10k'] = n10k
userstats['nrecent3'] = nrecent3
userstats['nrecent7'] = nrecent7
userstats['nrecent30'] = nrecent30
userstats['nrecent90'] = nrecent90
else:
if VOMODE == 'atlas':
nhours = 12
else:
nhours = 7 * 24
query = setupView(request, hours=nhours, limit=5000)
## dynamically assemble user summary info
values = 'eventservice', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'processingtype', 'workinggroup', 'currentpriority'
jobs = QuerySetChain( \
Jobsdefined4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(
*values),
Jobsactive4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(
*values),
Jobswaiting4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(
*values),
Jobsarchived4.objects.filter(**query).order_by('-modificationtime')[:request.session['JOB_LIMIT']].values(
*values),
)
jobs = cleanJobList(request, jobs)
sumd = userSummaryDict(jobs)
sumparams = ['jobstatus', 'prodsourcelabel', 'specialhandling', 'transformation', 'processingtype',
'workinggroup', 'priorityrange', 'jobsetrange']
if VOMODE == 'atlas':
sumparams.append('atlasrelease')
else:
sumparams.append('vo')
jobsumd = jobSummaryDict(request, jobs, sumparams)[0]
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'xurl': extensibleURL(request),
'url': request.path,
'sumd': sumd,
'jobsumd': jobsumd,
'userdb': userdbl,
'userstats': userstats,
'tfirst': TFIRST,
'tlast': TLAST,
'plow': PLOW,
'phigh': PHIGH,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('userList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
elif (
('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
del request.session['TFIRST']
del request.session['TLAST']
resp = sumd
return HttpResponse(json.dumps(resp), content_type='text/html')
def userInfo(request, user=''):
valid, response = initRequest(request)
if not valid: return response
fullname = ''
login = ''
if user == '':
if 'user' in request.session['requestParams']: user = request.session['requestParams']['user']
if 'produsername' in request.session['requestParams']: user = request.session['requestParams']['produsername']
if user == '':
if 'ADFS_LOGIN' in request.session:
login = user = request.session['ADFS_LOGIN']
fullname = request.session['ADFS_FULLNAME']
if 'days' in request.session['requestParams']:
days = int(request.session['requestParams']['days'])
else:
days = 7
requestParams = {}
for param in request.session['requestParams']:
requestParams[escapeInput(param.strip())] = escapeInput(request.session['requestParams'][param.strip()].strip())
request.session['requestParams'] = requestParams
## Tasks owned by the user
startdate = timezone.now() - timedelta(hours=days * 24)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = {'modificationtime__range': [startdate, enddate]}
#
# TODO: if login we should set exact match
#
query['username__icontains'] = user.strip()
tasks = JediTasks.objects.filter(**query).values()
tasks = sorted(tasks, key=lambda x: -x['jeditaskid'])
tasks = cleanTaskList(request, tasks)
ntasks = len(tasks)
tasksumd = taskSummaryDict(request, tasks)
tasks = getTaskScoutingInfo(tasks, ntasks)
## Jobs
limit = 5000
query = setupView(request, hours=72, limit=limit)
# query['produsername__icontains'] = user.strip()
#
# TODO: if login we should set exact match
#
query['produsername__startswith'] = user.strip()
jobs = []
values = 'eventservice', 'produsername', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'pandaid', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'currentpriority', 'creationtime', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'homepackage', 'inputfileproject', 'inputfiletype', 'attemptnr', 'jobname', 'proddblock', 'destinationdblock',
jobs.extend(Jobsdefined4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsactive4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
jobsetids = None
if len(jobs) == 0 or (len(jobs) < limit and LAST_N_HOURS_MAX > 72):
jobs.extend(Jobsarchived.objects.filter(**query)[:request.session['JOB_LIMIT']].values(*values))
# if len(jobs) < limit and ntasks == 0:
# ## try at least to find some old jobsets
# startdate = timezone.now() - timedelta(hours=30*24)
# startdate = startdate.strftime(defaultDatetimeFormat)
# enddate = timezone.now().strftime(defaultDatetimeFormat)
# query = { 'modificationtime__range' : [startdate, enddate] }
# query['produsername'] = user
# jobsetids = Jobsarchived.objects.filter(**query).values('jobsetid').distinct()
jobs = cleanJobList(request, jobs)
if fullname != '':
query = {'name': fullname}
else:
query = {'name__icontains': user.strip()}
userdb = Users.objects.filter(**query).values()
if len(userdb) > 0:
userstats = userdb[0]
user = userstats['name']
for field in ['cpua1', 'cpua7', 'cpup1', 'cpup7']:
try:
userstats[field] = "%0.1f" % (float(userstats[field]) / 3600.)
except:
userstats[field] = '-'
else:
userstats = None
## Divide up jobs by jobset and summarize
jobsets = {}
for job in jobs:
if 'jobsetid' not in job or job['jobsetid'] == None: continue
if job['jobsetid'] not in jobsets:
jobsets[job['jobsetid']] = {}
jobsets[job['jobsetid']]['jobsetid'] = job['jobsetid']
jobsets[job['jobsetid']]['jobs'] = []
jobsets[job['jobsetid']]['jobs'].append(job)
for jobset in jobsets:
jobsets[jobset]['sum'] = jobStateSummary(jobsets[jobset]['jobs'])
jobsets[jobset]['njobs'] = len(jobsets[jobset]['jobs'])
tfirst = timezone.now()
tlast = timezone.now() - timedelta(hours=2400)
plow = 1000000
phigh = -1000000
for job in jobsets[jobset]['jobs']:
if job['modificationtime'] > tlast: tlast = job['modificationtime']
if job['modificationtime'] < tfirst: tfirst = job['modificationtime']
if job['currentpriority'] > phigh: phigh = job['currentpriority']
if job['currentpriority'] < plow: plow = job['currentpriority']
jobsets[jobset]['tfirst'] = tfirst
jobsets[jobset]['tlast'] = tlast
jobsets[jobset]['plow'] = plow
jobsets[jobset]['phigh'] = phigh
jobsetl = []
jsk = jobsets.keys()
jsk.sort(reverse=True)
for jobset in jsk:
jobsetl.append(jobsets[jobset])
njobsmax = len(jobs)
if 'display_limit' in request.session['requestParams'] and int(
request.session['requestParams']['display_limit']) < len(jobs):
display_limit = int(request.session['requestParams']['display_limit'])
njobsmax = display_limit
url_nolimit = removeParam(request.get_full_path(), 'display_limit')
else:
display_limit = 3000
njobsmax = display_limit
url_nolimit = request.get_full_path()
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
sumd = userSummaryDict(jobs)
flist = ['jobstatus', 'prodsourcelabel', 'processingtype', 'specialhandling', 'transformation', 'jobsetid',
'jeditaskid', 'computingsite', 'cloud', 'workinggroup', 'homepackage', 'inputfileproject',
'inputfiletype', 'attemptnr', 'priorityrange', 'jobsetrange']
if VOMODE != 'atlas':
flist.append('vo')
else:
flist.append('atlasrelease')
jobsumd = jobSummaryDict(request, jobs, flist)
njobsetmax = 200
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'xurl': xurl,
'nosorturl': nosorturl,
'user': user,
'sumd': sumd,
'jobsumd': jobsumd,
'jobList': jobs[:njobsmax],
'njobs': len(jobs),
'query': query,
'userstats': userstats,
'tfirst': TFIRST,
'tlast': TLAST,
'plow': PLOW,
'phigh': PHIGH,
'jobsets': jobsetl[:njobsetmax - 1],
'njobsetmax': njobsetmax,
'njobsets': len(jobsetl),
'url_nolimit': url_nolimit,
'display_limit': display_limit,
'tasks': tasks,
'ntasks': ntasks,
'tasksumd': tasksumd,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('userInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = sumd
return HttpResponse(json.dumps(resp), content_type='text/html')
def siteList(request):
valid, response = initRequest(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "siteList")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('siteList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
for param in request.session['requestParams']:
request.session['requestParams'][param] = escapeInput(request.session['requestParams'][param])
setupView(request, opmode='notime')
query = {}
### Add any extensions to the query determined from the URL
if VOMODE == 'core': query['siteid__contains'] = 'CORE'
prod = False
extraParCondition = '1=1'
for param in request.session['requestParams']:
if param == 'category' and request.session['requestParams'][param] == 'multicloud':
query['multicloud__isnull'] = False
if param == 'category' and request.session['requestParams'][param] == 'analysis':
query['siteid__contains'] = 'ANALY'
if param == 'category' and request.session['requestParams'][param] == 'test':
query['siteid__icontains'] = 'test'
if param == 'category' and request.session['requestParams'][param] == 'production':
prod = True
if param == 'catchall':
wildCards = request.session['requestParams'][param].split('|')
countCards = len(wildCards)
currentCardCount = 1
extraParCondition = '('
for card in wildCards:
extraParCondition += preprocessWildCardString(escapeInput(card), 'catchall')
if (currentCardCount < countCards): extraParCondition += ' OR '
currentCardCount += 1
extraParCondition += ')'
for field in Schedconfig._meta.get_fields():
if param == field.name and not (param == 'catchall'):
query[param] = escapeInput(request.session['requestParams'][param])
siteres = Schedconfig.objects.filter(**query).exclude(cloud='CMS').extra(where=[extraParCondition]).values()
mcpres = Schedconfig.objects.filter(status='online').exclude(cloud='CMS').exclude(siteid__icontains='test').values(
'siteid', 'multicloud', 'cloud').order_by('siteid')
sites = []
for site in siteres:
if 'category' in request.session['requestParams'] and request.session['requestParams'][
'category'] == 'multicloud':
if (site['multicloud'] == 'None') or (not re.match('[A-Z]+', site['multicloud'])): continue
sites.append(site)
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'maxmemory':
sites = sorted(sites, key=lambda x: -x['maxmemory'])
elif 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'maxtime':
sites = sorted(sites, key=lambda x: -x['maxtime'])
elif 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'gocname':
sites = sorted(sites, key=lambda x: x['gocname'])
else:
sites = sorted(sites, key=lambda x: x['siteid'])
if prod:
newsites = []
for site in sites:
if site['siteid'].find('ANALY') >= 0:
pass
elif site['siteid'].lower().find('test') >= 0:
pass
else:
newsites.append(site)
sites = newsites
for site in sites:
if site['maxtime'] and (site['maxtime'] > 0): site['maxtime'] = "%.1f" % (float(site['maxtime']) / 3600.)
site['space'] = "%d" % (site['space'] / 1000.)
if VOMODE == 'atlas' and (
len(request.session['requestParams']) == 0 or 'cloud' in request.session['requestParams']):
clouds = Cloudconfig.objects.filter().exclude(name='CMS').exclude(name='OSG').values()
clouds = sorted(clouds, key=lambda x: x['name'])
mcpsites = {}
for cloud in clouds:
cloud['display'] = True
if 'cloud' in request.session['requestParams'] and request.session['requestParams']['cloud'] != cloud[
'name']: cloud['display'] = False
mcpsites[cloud['name']] = []
for site in sites:
if site['siteid'] == cloud['tier1']:
cloud['space'] = site['space']
cloud['tspace'] = site['tspace']
for site in mcpres:
mcpclouds = site['multicloud'].split(',')
if cloud['name'] in mcpclouds or cloud['name'] == site['cloud']:
sited = {}
sited['name'] = site['siteid']
sited['cloud'] = site['cloud']
if site['cloud'] == cloud['name']:
sited['type'] = 'home'
else:
sited['type'] = 'mcp'
mcpsites[cloud['name']].append(sited)
cloud['mcpsites'] = ''
for s in mcpsites[cloud['name']]:
if s['type'] == 'home':
cloud['mcpsites'] += "<b>%s</b> " % s['name']
else:
cloud['mcpsites'] += "%s " % s['name']
else:
clouds = None
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
sumd = siteSummaryDict(sites)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'sites': sites,
'clouds': clouds,
'sumd': sumd,
'xurl': xurl,
'nosorturl': nosorturl,
'built': datetime.now().strftime("%H:%M:%S"),
}
if 'cloud' in request.session['requestParams']: data['mcpsites'] = mcpsites[
request.session['requestParams']['cloud']]
# data.update(getContextVariables(request))
##self monitor
setCacheEntry(request, "siteList", json.dumps(data, cls=DateEncoder), 60 * 20)
endSelfMonitor(request)
response = render_to_response('siteList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = sites
return HttpResponse(json.dumps(resp), content_type='text/html')
def siteInfo(request, site=''):
valid, response = initRequest(request)
if not valid: return response
if site == '' and 'site' in request.session['requestParams']: site = request.session['requestParams']['site']
setupView(request)
LAST_N_HOURS_MAX = 12
startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = {'siteid__iexact': site}
sites = Schedconfig.objects.filter(**query)
colnames = []
try:
siterec = sites[0]
colnames = siterec.get_all_fields()
except IndexError:
siterec = None
HPC = False
njobhours = 12
try:
if siterec.catchall.find('HPC') >= 0:
HPC = True
njobhours = 48
except AttributeError:
pass
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
attrs = []
if siterec:
attrs.append({'name': 'GOC name', 'value': siterec.gocname})
if HPC: attrs.append(
{'name': 'HPC', 'value': 'This is a High Performance Computing (HPC) supercomputer queue'})
if siterec.catchall and siterec.catchall.find('log_to_objectstore') >= 0:
attrs.append({'name': 'Object store logs', 'value': 'Logging to object store is enabled'})
if siterec.objectstore and len(siterec.objectstore) > 0:
fields = siterec.objectstore.split('|')
nfields = len(fields)
for nf in range(0, len(fields)):
if nf == 0:
attrs.append({'name': 'Object store location', 'value': fields[0]})
else:
fields2 = fields[nf].split('^')
if len(fields2) > 1:
ostype = fields2[0]
ospath = fields2[1]
attrs.append({'name': 'Object store %s path' % ostype, 'value': ospath})
if siterec.nickname != site:
attrs.append({'name': 'Queue (nickname)', 'value': siterec.nickname})
if len(sites) > 1:
attrs.append({'name': 'Total queues for this site', 'value': len(sites)})
attrs.append({'name': 'Status', 'value': siterec.status})
if siterec.comment_field and len(siterec.comment_field) > 0:
attrs.append({'name': 'Comment', 'value': siterec.comment_field})
attrs.append({'name': 'Cloud', 'value': siterec.cloud})
if siterec.multicloud and len(siterec.multicloud) > 0:
attrs.append({'name': 'Multicloud', 'value': siterec.multicloud})
attrs.append({'name': 'Tier', 'value': siterec.tier})
attrs.append({'name': 'DDM endpoint', 'value': siterec.ddm})
attrs.append({'name': 'Max rss', 'value': "%.1f GB" % (float(siterec.maxrss) / 1000.)})
attrs.append({'name': 'Min rss', 'value': "%.1f GB" % (float(siterec.minrss) / 1000.)})
if siterec.maxtime > 0:
attrs.append({'name': 'Maximum time', 'value': "%.1f hours" % (float(siterec.maxtime) / 3600.)})
attrs.append({'name': 'Space', 'value': "%d TB as of %s" % (
(float(siterec.space) / 1000.), siterec.tspace.strftime('%m-%d %H:%M'))})
attrs.append({'name': 'Last modified', 'value': "%s" % (siterec.lastmod.strftime('%Y-%m-%d %H:%M'))})
iquery = {}
startdate = timezone.now() - timedelta(hours=24 * 30)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['at_time__range'] = [startdate, enddate]
cloudQuery = Q(description__contains='queue=%s' % siterec.nickname) | Q(
description__contains='queue=%s' % siterec.siteid)
incidents = Incidents.objects.filter(**iquery).filter(cloudQuery).order_by('at_time').reverse().values()
else:
incidents = []
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'site': siterec,
'queues': sites,
'colnames': colnames,
'attrs': attrs,
'incidents': incidents,
'name': site,
'njobhours': njobhours,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('siteInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
for job in jobList:
resp.append({'pandaid': job.pandaid, 'status': job.jobstatus, 'prodsourcelabel': job.prodsourcelabel,
'produserid': job.produserid})
return HttpResponse(json.dumps(resp), content_type='text/html')
def updateCacheWithListOfMismatchedCloudSites(mismatchedSites):
listOfCloudSitesMismatched = cache.get('mismatched-cloud-sites-list')
if (listOfCloudSitesMismatched is None) or (len(listOfCloudSitesMismatched) == 0):
cache.set('mismatched-cloud-sites-list', mismatchedSites, 31536000)
else:
listOfCloudSitesMismatched.extend(mismatchedSites)
listOfCloudSitesMismatched.sort()
cache.set('mismatched-cloud-sites-list', list(listOfCloudSitesMismatched for listOfCloudSitesMismatched, _ in
itertools.groupby(listOfCloudSitesMismatched)), 31536000)
def getListOfFailedBeforeSiteAssignedJobs(query, mismatchedSites, notime=True):
jobs = []
querynotime = copy.deepcopy(query)
if notime: del querynotime['modificationtime__range']
siteCondition = ''
for site in mismatchedSites:
siteQuery = Q(computingsite=site[0]) & Q(cloud=site[1])
siteCondition = siteQuery if (siteCondition == '') else (siteCondition | siteQuery)
jobs.extend(Jobsactive4.objects.filter(siteCondition).filter(**querynotime).values('pandaid'))
jobs.extend(Jobsdefined4.objects.filter(siteCondition).filter(**querynotime).values('pandaid'))
jobs.extend(Jobswaiting4.objects.filter(siteCondition).filter(**querynotime).values('pandaid'))
jobs.extend(Jobsarchived4.objects.filter(siteCondition).filter(**query).values('pandaid'))
jobsString = ''
if (len(jobs) > 0):
jobsString = '&pandaid='
for job in jobs:
jobsString += str(job['pandaid']) + ','
jobsString = jobsString[:-1]
return jobsString
def siteSummary(query, notime=True):
summary = []
querynotime = copy.deepcopy(query)
if notime:
if 'modificationtime__range' in querynotime:
del querynotime['modificationtime__range']
summary.extend(Jobsactive4.objects.filter(**querynotime).values('cloud', 'computingsite', 'jobstatus').annotate(
Count('jobstatus')).order_by('cloud', 'computingsite', 'jobstatus'))
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('cloud', 'computingsite', 'jobstatus').annotate(
Count('jobstatus')).order_by('cloud', 'computingsite', 'jobstatus'))
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('cloud', 'computingsite', 'jobstatus').annotate(
Count('jobstatus')).order_by('cloud', 'computingsite', 'jobstatus'))
summary.extend(Jobsarchived4.objects.filter(**query).values('cloud', 'computingsite', 'jobstatus').annotate(
Count('jobstatus')).order_by('cloud', 'computingsite', 'jobstatus'))
return summary
def taskSummaryData(request, query):
summary = []
querynotime = query
del querynotime['modificationtime__range']
summary.extend(
Jobsactive4.objects.filter(**querynotime).values('taskid', 'jobstatus').annotate(Count('jobstatus')).order_by(
'taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(
Jobsdefined4.objects.filter(**querynotime).values('taskid', 'jobstatus').annotate(Count('jobstatus')).order_by(
'taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(
Jobswaiting4.objects.filter(**querynotime).values('taskid', 'jobstatus').annotate(Count('jobstatus')).order_by(
'taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(
Jobsarchived4.objects.filter(**query).values('taskid', 'jobstatus').annotate(Count('jobstatus')).order_by(
'taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobsactive4.objects.filter(**querynotime).values('jeditaskid', 'jobstatus').annotate(
Count('jobstatus')).order_by('jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('jeditaskid', 'jobstatus').annotate(
Count('jobstatus')).order_by('jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('jeditaskid', 'jobstatus').annotate(
Count('jobstatus')).order_by('jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
summary.extend(
Jobsarchived4.objects.filter(**query).values('jeditaskid', 'jobstatus').annotate(Count('jobstatus')).order_by(
'jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
return summary
def voSummary(query):
summary = []
querynotime = query
del querynotime['modificationtime__range']
summary.extend(Jobsactive4.objects.filter(**querynotime).values('vo', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsdefined4.objects.filter(**querynotime).values('vo', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobswaiting4.objects.filter(**querynotime).values('vo', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(Jobsarchived4.objects.filter(**query).values('vo', 'jobstatus').annotate(Count('jobstatus')))
return summary
def wgSummary(query):
summary = []
querynotime = query
del querynotime['modificationtime__range']
summary.extend(
Jobsdefined4.objects.filter(**querynotime).values('workinggroup', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(
Jobsactive4.objects.filter(**querynotime).values('workinggroup', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(
Jobswaiting4.objects.filter(**querynotime).values('workinggroup', 'jobstatus').annotate(Count('jobstatus')))
summary.extend(
Jobsarchived4.objects.filter(**query).values('workinggroup', 'jobstatus').annotate(Count('jobstatus')))
return summary
def wnSummary(query):
summary = []
querynotime = query
# del querynotime['modificationtime__range'] ### creates inconsistency with job lists. Stick to advertised 12hrs
summary.extend(Jobsactive4.objects.filter(**querynotime).values('modificationhost', 'jobstatus').annotate(
Count('jobstatus')).order_by('modificationhost', 'jobstatus'))
summary.extend(Jobsarchived4.objects.filter(**query).values('modificationhost', 'jobstatus').annotate(
Count('jobstatus')).order_by('modificationhost', 'jobstatus'))
return summary
def wnInfo(request, site, wnname='all'):
""" Give worker node level breakdown of site activity. Spot hot nodes, error prone nodes. """
if 'hours' in request.GET:
hours = int(request.GET['hours'])
else:
hours = 12
valid, response = initRequest(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "wnInfo")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('wnInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
errthreshold = 15
if wnname != 'all':
query = setupView(request, hours=hours, limit=999999)
query['modificationhost__endswith'] = wnname
else:
query = setupView(request, hours=hours, limit=999999)
query['computingsite'] = site
wnsummarydata = wnSummary(query)
totstates = {}
totjobs = 0
wns = {}
wnPlotFailed = {}
wnPlotFinished = {}
for state in sitestatelist:
totstates[state] = 0
for rec in wnsummarydata:
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
wnfull = rec['modificationhost']
wnsplit = wnfull.split('@')
if len(wnsplit) == 2:
if wnname == 'all':
wn = wnsplit[1]
else:
wn = wnfull
slot = wnsplit[0]
else:
wn = wnfull
slot = '1'
if wn.startswith('aipanda'): continue
if jobstatus == 'failed':
if not wn in wnPlotFailed: wnPlotFailed[wn] = 0
wnPlotFailed[wn] += count
elif jobstatus == 'finished':
if not wn in wnPlotFinished: wnPlotFinished[wn] = 0
wnPlotFinished[wn] += count
totjobs += count
if jobstatus not in totstates:
totstates[jobstatus] = 0
totstates[jobstatus] += count
if wn not in wns:
wns[wn] = {}
wns[wn]['name'] = wn
wns[wn]['count'] = 0
wns[wn]['states'] = {}
wns[wn]['slotd'] = {}
wns[wn]['statelist'] = []
for state in sitestatelist:
wns[wn]['states'][state] = {}
wns[wn]['states'][state]['name'] = state
wns[wn]['states'][state]['count'] = 0
if slot not in wns[wn]['slotd']: wns[wn]['slotd'][slot] = 0
wns[wn]['slotd'][slot] += 1
wns[wn]['count'] += count
if jobstatus not in wns[wn]['states']:
wns[wn]['states'][jobstatus] = {}
wns[wn]['states'][jobstatus]['count'] = 0
wns[wn]['states'][jobstatus]['count'] += count
## Convert dict to summary list
wnkeys = wns.keys()
wnkeys.sort()
wntot = len(wnkeys)
fullsummary = []
allstated = {}
allstated['finished'] = allstated['failed'] = 0
allwns = {}
allwns['name'] = 'All'
allwns['count'] = totjobs
allwns['states'] = totstates
allwns['statelist'] = []
for state in sitestatelist:
allstate = {}
allstate['name'] = state
allstate['count'] = totstates[state]
allstated[state] = totstates[state]
allwns['statelist'].append(allstate)
if int(allstated['finished']) + int(allstated['failed']) > 0:
allwns['pctfail'] = int(100. * float(allstated['failed']) / (allstated['finished'] + allstated['failed']))
else:
allwns['pctfail'] = 0
if wnname == 'all': fullsummary.append(allwns)
avgwns = {}
avgwns['name'] = 'Average'
if wntot > 0:
avgwns['count'] = "%0.2f" % (totjobs / wntot)
else:
avgwns['count'] = ''
avgwns['states'] = totstates
avgwns['statelist'] = []
avgstates = {}
for state in sitestatelist:
if wntot > 0:
avgstates[state] = totstates[state] / wntot
else:
avgstates[state] = ''
allstate = {}
allstate['name'] = state
if wntot > 0:
allstate['count'] = "%0.2f" % (int(totstates[state]) / wntot)
allstated[state] = "%0.2f" % (int(totstates[state]) / wntot)
else:
allstate['count'] = ''
allstated[state] = ''
avgwns['statelist'].append(allstate)
avgwns['pctfail'] = allwns['pctfail']
if wnname == 'all': fullsummary.append(avgwns)
for wn in wnkeys:
outlier = ''
wns[wn]['slotcount'] = len(wns[wn]['slotd'])
wns[wn]['pctfail'] = 0
for state in sitestatelist:
wns[wn]['statelist'].append(wns[wn]['states'][state])
if wns[wn]['states']['finished']['count'] + wns[wn]['states']['failed']['count'] > 0:
wns[wn]['pctfail'] = int(100. * float(wns[wn]['states']['failed']['count']) / (
wns[wn]['states']['finished']['count'] + wns[wn]['states']['failed']['count']))
if float(wns[wn]['states']['finished']['count']) < float(avgstates['finished']) / 5.:
outlier += " LowFinished "
if float(wns[wn]['states']['failed']['count']) > max(float(avgstates['failed']) * 3., 5.):
outlier += " HighFailed "
wns[wn]['outlier'] = outlier
fullsummary.append(wns[wn])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in sitestatelist:
fullsummary = sorted(fullsummary, key=lambda x: x['states'][request.session['requestParams']['sortby']],
reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x: x['pctfail'], reverse=True)
kys = wnPlotFailed.keys()
kys.sort()
wnPlotFailedL = []
for k in kys:
wnPlotFailedL.append([k, wnPlotFailed[k]])
kys = wnPlotFinished.keys()
kys.sort()
wnPlotFinishedL = []
for k in kys:
wnPlotFinishedL.append([k, wnPlotFinished[k]])
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'site': site,
'wnname': wnname,
'user': None,
'summary': fullsummary,
'wnPlotFailed': wnPlotFailedL,
'wnPlotFinished': wnPlotFinishedL,
'hours': hours,
'errthreshold': errthreshold,
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('wnInfo.html', data, RequestContext(request))
setCacheEntry(request, "wnInfo", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'site': site,
'wnname': wnname,
'user': None,
'summary': fullsummary,
'wnPlotFailed': wnPlotFailedL,
'wnPlotFinished': wnPlotFinishedL,
'hours': hours,
'errthreshold': errthreshold,
'built': datetime.now().strftime("%H:%M:%S"),
}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
def dashSummary(request, hours, limit=999999, view='all', cloudview='region', notime=True):
pilots = getPilotCounts(view)
query = setupView(request, hours=hours, limit=limit, opmode=view)
if VOMODE == 'atlas' and len(request.session['requestParams']) == 0:
cloudinfol = Cloudconfig.objects.filter().exclude(name='CMS').exclude(name='OSG').values('name', 'status')
else:
cloudinfol = []
cloudinfo = {}
for c in cloudinfol:
cloudinfo[c['name']] = c['status']
siteinfol = Schedconfig.objects.filter().exclude(cloud='CMS').values('siteid', 'status')
siteinfo = {}
for s in siteinfol:
siteinfo[s['siteid']] = s['status']
sitesummarydata = siteSummary(query, notime)
nojobabs = Sitedata.objects.filter(hours=3).values('site').annotate(dcount=Sum('nojobabs'))
nojobabshash = {}
for item in nojobabs:
nojobabshash[item['site']] = item['dcount']
mismatchedSites = []
clouds = {}
totstates = {}
totjobs = 0
for state in sitestatelist:
totstates[state] = 0
for rec in sitesummarydata:
if cloudview == 'region':
if rec['computingsite'] in homeCloud:
cloud = homeCloud[rec['computingsite']]
else:
print "ERROR cloud not known", rec
mismatchedSites.append([rec['computingsite'], rec['cloud']])
cloud = ''
else:
cloud = rec['cloud']
site = rec['computingsite']
if view.find('test') < 0:
if view != 'analysis' and site.startswith('ANALY'): continue
if view == 'analysis' and not site.startswith('ANALY'): continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if jobstatus not in sitestatelist: continue
totjobs += count
totstates[jobstatus] += count
if cloud not in clouds:
print "Cloud:" + cloud
clouds[cloud] = {}
clouds[cloud]['name'] = cloud
if cloud in cloudinfo: clouds[cloud]['status'] = cloudinfo[cloud]
clouds[cloud]['count'] = 0
clouds[cloud]['pilots'] = 0
clouds[cloud]['nojobabs'] = 0
clouds[cloud]['sites'] = {}
clouds[cloud]['states'] = {}
clouds[cloud]['statelist'] = []
for state in sitestatelist:
clouds[cloud]['states'][state] = {}
clouds[cloud]['states'][state]['name'] = state
clouds[cloud]['states'][state]['count'] = 0
clouds[cloud]['count'] += count
clouds[cloud]['states'][jobstatus]['count'] += count
if site not in clouds[cloud]['sites']:
clouds[cloud]['sites'][site] = {}
clouds[cloud]['sites'][site]['name'] = site
if site in siteinfo: clouds[cloud]['sites'][site]['status'] = siteinfo[site]
clouds[cloud]['sites'][site]['count'] = 0
if site in pilots:
clouds[cloud]['sites'][site]['pilots'] = pilots[site]['count']
clouds[cloud]['pilots'] += pilots[site]['count']
else:
clouds[cloud]['sites'][site]['pilots'] = 0
if site in nojobabshash:
clouds[cloud]['sites'][site]['nojobabs'] = nojobabshash[site]
clouds[cloud]['nojobabs'] += nojobabshash[site]
else:
clouds[cloud]['sites'][site]['nojobabs'] = 0
clouds[cloud]['sites'][site]['states'] = {}
for state in sitestatelist:
clouds[cloud]['sites'][site]['states'][state] = {}
clouds[cloud]['sites'][site]['states'][state]['name'] = state
clouds[cloud]['sites'][site]['states'][state]['count'] = 0
clouds[cloud]['sites'][site]['count'] += count
clouds[cloud]['sites'][site]['states'][jobstatus]['count'] += count
updateCacheWithListOfMismatchedCloudSites(mismatchedSites)
## Go through the sites, add any that are missing (because they have no jobs in the interval)
if cloudview != 'cloud':
for site in pandaSites:
if view.find('test') < 0:
if view != 'analysis' and site.startswith('ANALY'): continue
if view == 'analysis' and not site.startswith('ANALY'): continue
cloud = pandaSites[site]['cloud']
if cloud not in clouds:
## Bail. Adding sites is one thing; adding clouds is another
continue
if site not in clouds[cloud]['sites']:
clouds[cloud]['sites'][site] = {}
clouds[cloud]['sites'][site]['name'] = site
if site in siteinfo: clouds[cloud]['sites'][site]['status'] = siteinfo[site]
clouds[cloud]['sites'][site]['count'] = 0
clouds[cloud]['sites'][site]['pctfail'] = 0
if site in nojobabshash:
clouds[cloud]['sites'][site]['nojobabs'] = nojobabshash[site]
clouds[cloud]['nojobabs'] += nojobabshash[site]
else:
clouds[cloud]['sites'][site]['nojobabs'] = 0
if site in pilots:
clouds[cloud]['sites'][site]['pilots'] = pilots[site]['count']
clouds[cloud]['pilots'] += pilots[site]['count']
else:
clouds[cloud]['sites'][site]['pilots'] = 0
clouds[cloud]['sites'][site]['states'] = {}
for state in sitestatelist:
clouds[cloud]['sites'][site]['states'][state] = {}
clouds[cloud]['sites'][site]['states'][state]['name'] = state
clouds[cloud]['sites'][site]['states'][state]['count'] = 0
## Convert dict to summary list
cloudkeys = clouds.keys()
cloudkeys.sort()
fullsummary = []
allstated = {}
allstated['finished'] = allstated['failed'] = 0
allclouds = {}
allclouds['name'] = 'All'
allclouds['count'] = totjobs
allclouds['pilots'] = 0
allclouds['nojobabs'] = 0
allclouds['sites'] = {}
allclouds['states'] = totstates
allclouds['statelist'] = []
for state in sitestatelist:
allstate = {}
allstate['name'] = state
allstate['count'] = totstates[state]
allstated[state] = totstates[state]
allclouds['statelist'].append(allstate)
if int(allstated['finished']) + int(allstated['failed']) > 0:
allclouds['pctfail'] = int(100. * float(allstated['failed']) / (allstated['finished'] + allstated['failed']))
else:
allclouds['pctfail'] = 0
for cloud in cloudkeys:
allclouds['pilots'] += clouds[cloud]['pilots']
fullsummary.append(allclouds)
for cloud in cloudkeys:
for state in sitestatelist:
clouds[cloud]['statelist'].append(clouds[cloud]['states'][state])
sites = clouds[cloud]['sites']
sitekeys = sites.keys()
sitekeys.sort()
cloudsummary = []
for site in sitekeys:
sitesummary = []
for state in sitestatelist:
sitesummary.append(sites[site]['states'][state])
sites[site]['summary'] = sitesummary
if sites[site]['states']['finished']['count'] + sites[site]['states']['failed']['count'] > 0:
sites[site]['pctfail'] = int(100. * float(sites[site]['states']['failed']['count']) / (
sites[site]['states']['finished']['count'] + sites[site]['states']['failed']['count']))
else:
sites[site]['pctfail'] = 0
cloudsummary.append(sites[site])
clouds[cloud]['summary'] = cloudsummary
if clouds[cloud]['states']['finished']['count'] + clouds[cloud]['states']['failed']['count'] > 0:
clouds[cloud]['pctfail'] = int(100. * float(clouds[cloud]['states']['failed']['count']) / (
clouds[cloud]['states']['finished']['count'] + clouds[cloud]['states']['failed']['count']))
fullsummary.append(clouds[cloud])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in statelist:
fullsummary = sorted(fullsummary, key=lambda x: x['states'][request.session['requestParams']['sortby']],
reverse=True)
cloudsummary = sorted(cloudsummary, key=lambda x: x['states'][request.session['requestParams']['sortby']],
reverse=True)
for cloud in clouds:
clouds[cloud]['summary'] = sorted(clouds[cloud]['summary'],
key=lambda x: x['states'][request.session['requestParams']['sortby']][
'count'], reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x: x['pctfail'], reverse=True)
cloudsummary = sorted(cloudsummary, key=lambda x: x['pctfail'], reverse=True)
for cloud in clouds:
clouds[cloud]['summary'] = sorted(clouds[cloud]['summary'], key=lambda x: x['pctfail'], reverse=True)
return fullsummary
def dashTaskSummary(request, hours, limit=999999, view='all'):
query = setupView(request, hours=hours, limit=limit, opmode=view)
tasksummarydata = taskSummaryData(request, query)
tasks = {}
totstates = {}
totjobs = 0
for state in sitestatelist:
totstates[state] = 0
taskids = []
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskids.append({'jeditaskid': rec['jeditaskid']})
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0:
taskids.append({'taskid': rec['taskid']})
tasknamedict = taskNameDict(taskids)
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskid = rec['jeditaskid']
tasktype = 'JEDI'
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0:
taskid = rec['taskid']
tasktype = 'old'
else:
continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if jobstatus not in sitestatelist: continue
totjobs += count
totstates[jobstatus] += count
if taskid not in tasks:
tasks[taskid] = {}
tasks[taskid]['taskid'] = taskid
if taskid in tasknamedict:
tasks[taskid]['name'] = tasknamedict[taskid]
else:
tasks[taskid]['name'] = str(taskid)
tasks[taskid]['count'] = 0
tasks[taskid]['states'] = {}
tasks[taskid]['statelist'] = []
for state in sitestatelist:
tasks[taskid]['states'][state] = {}
tasks[taskid]['states'][state]['name'] = state
tasks[taskid]['states'][state]['count'] = 0
tasks[taskid]['count'] += count
tasks[taskid]['states'][jobstatus]['count'] += count
if view == 'analysis':
## Show only tasks starting with 'user.'
kys = tasks.keys()
for t in kys:
if not str(tasks[t]['name'].encode('ascii', 'ignore')).startswith('user.'): del tasks[t]
## Convert dict to summary list
taskkeys = tasks.keys()
taskkeys.sort()
fullsummary = []
for taskid in taskkeys:
for state in sitestatelist:
tasks[taskid]['statelist'].append(tasks[taskid]['states'][state])
if tasks[taskid]['states']['finished']['count'] + tasks[taskid]['states']['failed']['count'] > 0:
tasks[taskid]['pctfail'] = int(100. * float(tasks[taskid]['states']['failed']['count']) / (
tasks[taskid]['states']['finished']['count'] + tasks[taskid]['states']['failed']['count']))
fullsummary.append(tasks[taskid])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in sitestatelist:
fullsummary = sorted(fullsummary, key=lambda x: x['states'][request.session['requestParams']['sortby']],
reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x: x['pctfail'], reverse=True)
return fullsummary
def preProcess(request):
''' todo:
0. Decide tables structure and parameters aggregates approach
1. Get List of Jobs modified later than previosly saved last modified job
2. For each of them calculate output variables of Error summary.
Factorize using set of request parameters causing different flow.
3. Save new variables in the dedicated table in form - jobid ~ variable
4. When a new query comes, select from job tables correspondent ids.
5. Select variables from the transistent table.
6. Merge them and display output.
'''
# data = {}
# dashTaskSummary_preprocess(request)
# response = render_to_response('preprocessLog.html', data, RequestContext(request))
# patch_response_headers(response, cache_timeout=-1)
return None
# class prepDashTaskSummary:
def dashTaskSummary_preprocess(request):
# query = setupView(request,hours=hours,limit=limit,opmode=view)
query = {'modificationtime__range': [timezone.now() - timedelta(hours=LAST_N_HOURS_MAX), timezone.now()]}
tasksummarydata = []
querynotime = query
del querynotime['modificationtime__range']
tasksummarydata.extend(
Jobsactive4.objects.filter(**querynotime).values('taskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'),
Count('prodsourcelabel')).order_by(
'taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(
Jobsdefined4.objects.filter(**querynotime).values('taskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'), Count(
'prodsourcelabel')).order_by('taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(
Jobswaiting4.objects.filter(**querynotime).values('taskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'), Count(
'prodsourcelabel')).order_by('taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(
Jobsarchived4.objects.filter(**query).values('taskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'),
Count('prodsourcelabel')).order_by(
'taskid', 'jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(
Jobsactive4.objects.filter(**querynotime).values('jeditaskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'),
Count('prodsourcelabel')).order_by(
'jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(
Jobsdefined4.objects.filter(**querynotime).values('jeditaskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'), Count(
'prodsourcelabel')).order_by('jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(
Jobswaiting4.objects.filter(**querynotime).values('jeditaskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'), Count(
'prodsourcelabel')).order_by('jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
tasksummarydata.extend(
Jobsarchived4.objects.filter(**query).values('jeditaskid', 'jobstatus', 'computingsite', 'produsername',
'transexitcode', 'piloterrorcode', 'processingtype',
'prodsourcelabel').annotate(Count('jobstatus'),
Count('computingsite'),
Count('produsername'),
Count('transexitcode'),
Count('piloterrorcode'),
Count('processingtype'),
Count('prodsourcelabel')).order_by(
'jeditaskid', 'jobstatus')[:request.session['JOB_LIMIT']])
'''
tasks = {}
totstates = {}
totjobs = 0
for state in sitestatelist:
totstates[state] = 0
taskids = []
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskids.append( { 'jeditaskid' : rec['jeditaskid'] } )
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0 :
taskids.append( { 'taskid' : rec['taskid'] } )
tasknamedict = taskNameDict(taskids)
for rec in tasksummarydata:
if 'jeditaskid' in rec and rec['jeditaskid'] and rec['jeditaskid'] > 0:
taskid = rec['jeditaskid']
tasktype = 'JEDI'
elif 'taskid' in rec and rec['taskid'] and rec['taskid'] > 0 :
taskid = rec['taskid']
tasktype = 'old'
else:
continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if jobstatus not in sitestatelist: continue
totjobs += count
totstates[jobstatus] += count
if taskid not in tasks:
tasks[taskid] = {}
tasks[taskid]['taskid'] = taskid
if taskid in tasknamedict:
tasks[taskid]['name'] = tasknamedict[taskid]
else:
tasks[taskid]['name'] = str(taskid)
tasks[taskid]['count'] = 0
tasks[taskid]['states'] = {}
tasks[taskid]['statelist'] = []
for state in sitestatelist:
tasks[taskid]['states'][state] = {}
tasks[taskid]['states'][state]['name'] = state
tasks[taskid]['states'][state]['count'] = 0
tasks[taskid]['count'] += count
tasks[taskid]['states'][jobstatus]['count'] += count
if view == 'analysis':
## Show only tasks starting with 'user.'
kys = tasks.keys()
for t in kys:
if not str(tasks[t]['name'].encode('ascii','ignore')).startswith('user.'): del tasks[t]
## Convert dict to summary list
taskkeys = tasks.keys()
taskkeys.sort()
fullsummary = []
for taskid in taskkeys:
for state in sitestatelist:
tasks[taskid]['statelist'].append(tasks[taskid]['states'][state])
if tasks[taskid]['states']['finished']['count'] + tasks[taskid]['states']['failed']['count'] > 0:
tasks[taskid]['pctfail'] = int(100.*float(tasks[taskid]['states']['failed']['count'])/(tasks[taskid]['states']['finished']['count']+tasks[taskid]['states']['failed']['count']))
fullsummary.append(tasks[taskid])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in sitestatelist:
fullsummary = sorted(fullsummary, key=lambda x:x['states'][request.session['requestParams']['sortby']],reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
fullsummary = sorted(fullsummary, key=lambda x:x['pctfail'],reverse=True)
'''
return -1
# https://github.com/PanDAWMS/panda-jedi/blob/master/pandajedi/jedicore/JediCoreUtils.py
def getEffectiveFileSize(fsize, startEvent, endEvent, nEvents):
inMB = 1024 * 1024
if fsize in [None, 0]:
# use dummy size for pseudo input
effectiveFsize = inMB
elif nEvents != None and startEvent != None and endEvent != None:
# take event range into account
effectiveFsize = long(float(fsize) * float(endEvent - startEvent + 1) / float(nEvents))
else:
effectiveFsize = fsize
# use dummy size if input is too small
if effectiveFsize == 0:
effectiveFsize = inMB
# in MB
effectiveFsize = float(effectiveFsize) / inMB
# return
return effectiveFsize
def calculateRWwithPrio_JEDI(query):
# query = {}
retRWMap = {}
retNREMJMap = {}
values = ['jeditaskid', 'datasetid', 'modificationtime', 'cloud', 'nrem', 'walltime', 'fsize', 'startevent',
'endevent', 'nevents']
progressEntries = []
progressEntries.extend(GetRWWithPrioJedi3DAYS.objects.filter(**query).values(*values))
allCloudsRW = 0;
allCloudsNREMJ = 0;
if len(progressEntries) > 0:
for progrEntry in progressEntries:
if progrEntry['fsize'] != None:
effectiveFsize = getEffectiveFileSize(progrEntry['fsize'], progrEntry['startevent'],
progrEntry['endevent'], progrEntry['nevents'])
tmpRW = progrEntry['nrem'] * effectiveFsize * progrEntry['walltime']
if not progrEntry['cloud'] in retRWMap:
retRWMap[progrEntry['cloud']] = 0
retRWMap[progrEntry['cloud']] += tmpRW
allCloudsRW += tmpRW
if not progrEntry['cloud'] in retNREMJMap:
retNREMJMap[progrEntry['cloud']] = 0
retNREMJMap[progrEntry['cloud']] += progrEntry['nrem']
allCloudsNREMJ += progrEntry['nrem']
retRWMap['All'] = allCloudsRW
retNREMJMap['All'] = allCloudsNREMJ
for cloudName, rwValue in retRWMap.iteritems():
retRWMap[cloudName] = int(rwValue / 24 / 3600)
return retRWMap, retNREMJMap
def dashWorldAnalysis(request):
return worldjobs(request, view='analysis')
def dashWorldProduction(request):
return worldjobs(request, view='production')
def worldjobs(request, view='production'):
valid, response = initRequest(request)
data = getCacheEntry(request, "worldjobs")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('worldjobs.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
query = {}
values = ['nucleus', 'computingsite', 'jobstatus', 'countjobsinstate']
worldTasksSummary = []
if view=='production':
query['tasktype'] = 'prod'
else:
query['tasktype'] = 'anal'
worldTasksSummary.extend(JobsWorldViewTaskType.objects.filter(**query).values(*values))
nucleus = {}
statelist1 = statelist
# del statelist1[statelist1.index('jclosed')]
# del statelist1[statelist1.index('pending')]
if len(worldTasksSummary) > 0:
for jobs in worldTasksSummary:
if jobs['nucleus'] in nucleus:
if jobs['computingsite'] in nucleus[jobs['nucleus']]:
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
else:
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
else:
nucleus[jobs['nucleus']] = {}
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
nucleusSummary = {}
for nucleusInfo in nucleus:
nucleusSummary[nucleusInfo] = {}
for site in nucleus[nucleusInfo]:
for state in nucleus[nucleusInfo][site]:
if state in nucleusSummary[nucleusInfo]:
nucleusSummary[nucleusInfo][state] += nucleus[nucleusInfo][site][state]
else:
nucleusSummary[nucleusInfo][state] = nucleus[nucleusInfo][site][state]
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
# del request.session['TFIRST']
# del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'nucleuses': nucleus,
'nucleussummary': nucleusSummary,
'statelist': statelist1,
'xurl': xurl,
'nosorturl': nosorturl,
'user': None,
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
setCacheEntry(request, "worldjobs", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('worldjobs.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
# del request.session['TFIRST']
# del request.session['TLAST']
data = {
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
def worldhs06s(request):
valid, response = initRequest(request)
# Here we try to get cached data
data = getCacheEntry(request, "worldhs06s")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('worldHS06s.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
roundflag = False
condition = ''
for param in request.session['requestParams']:
if param == 'reqid':
condition += ('t.reqid=' + str(request.session['requestParams']['reqid']))
if param == 'jeditaskid' and len(condition) > 1:
condition += (' AND t.jeditaskid=' + str(request.session['requestParams']['jeditaskid']))
elif param == 'jeditaskid':
condition += ('t.jeditaskid=' + str(request.session['requestParams']['jeditaskid']))
if len(condition) < 1:
condition = '(1=1)'
roundflag = True
cur = connection.cursor()
cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.GETHS06SSUMMARY('%s'))" % condition)
hspersite = cur.fetchall()
cur.close()
newcur = connection.cursor()
newcur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.GETHS06STOTSUMMARY('%s'))" % condition)
hspernucleus = newcur.fetchall()
newcur.close()
keys = ['nucleus', 'computingsite', 'usedhs06spersite', 'failedhs06spersite']
totkeys = ['nucleus', 'ntaskspernucleus', 'toths06spernucleus']
worldHS06sSummary = [dict(zip(keys, row)) for row in hspersite]
worldHS06sTotSummary = [dict(zip(totkeys, row)) for row in hspernucleus]
worldHS06sSummaryByNucleus = {}
nucleus = {}
totnucleus = {}
for nucl in worldHS06sTotSummary:
totnucleus[nucl['nucleus']] = {}
totnucleus[nucl['nucleus']]['ntaskspernucleus'] = nucl['ntaskspernucleus']
if roundflag:
totnucleus[nucl['nucleus']]['toths06spernucleus'] = round(nucl['toths06spernucleus'] / 1000. / 3600 / 24,
2) if nucl[
'toths06spernucleus'] is not None else 0
else:
totnucleus[nucl['nucleus']]['toths06spernucleus'] = nucl['toths06spernucleus'] if nucl[
'toths06spernucleus'] is not None else 0
for site in worldHS06sSummary:
if site['nucleus'] not in nucleus:
nucleus[site['nucleus']] = []
dictsite = {}
dictsite['computingsite'] = site['computingsite']
dictsite['usedhs06spersite'] = site['usedhs06spersite'] if site['usedhs06spersite'] else 0
dictsite['failedhs06spersite'] = site['failedhs06spersite'] if site['failedhs06spersite'] else 0
dictsite['failedhs06spersitepct'] = 100 * dictsite['failedhs06spersite'] / dictsite['usedhs06spersite'] if (
site['usedhs06spersite'] and site['usedhs06spersite'] > 0) else 0
nucleus[site['nucleus']].append(dictsite)
for nuc in nucleus:
worldHS06sSummaryByNucleus[nuc] = {}
worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus'] = sum(
[site['usedhs06spersite'] for site in nucleus[nuc]])
worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus'] = sum(
[site['failedhs06spersite'] for site in nucleus[nuc]])
if roundflag:
worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus'] = round(
worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus'] / 1000. / 3600 / 24, 2)
worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus'] = round(
worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus'] / 1000. / 3600 / 24, 2)
worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleuspct'] = int(
100 * worldHS06sSummaryByNucleus[nuc]['failedhs06spernucleus'] / worldHS06sSummaryByNucleus[nuc][
'usedhs06spernucleus']) if worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus'] and \
worldHS06sSummaryByNucleus[nuc]['usedhs06spernucleus'] > 0 else 0
if nuc in totnucleus:
worldHS06sSummaryByNucleus[nuc]['ntaskspernucleus'] = totnucleus[nuc]['ntaskspernucleus']
worldHS06sSummaryByNucleus[nuc]['toths06spernucleus'] = totnucleus[nuc]['toths06spernucleus']
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
reverseflag = False
if request.session['requestParams']['sortby'] == 'used-desc':
sortcol = 'usedhs06spersite'
reverseflag = True
elif request.session['requestParams']['sortby'] == 'used-asc':
sortcol = 'usedhs06spersite'
elif request.session['requestParams']['sortby'] == 'failed-desc':
sortcol = 'failedhs06spersite'
reverseflag = True
elif request.session['requestParams']['sortby'] == 'failed-asc':
sortcol = 'failedhs06spersite'
elif request.session['requestParams']['sortby'] == 'failedpct-desc':
sortcol = 'failedhs06spersitepct'
reverseflag = True
elif request.session['requestParams']['sortby'] == 'failedpct-asc':
sortcol = 'failedhs06spersitepct'
elif request.session['requestParams']['sortby'] == 'satellite-desc':
sortcol = 'computingsite'
reverseflag = True
else:
sortcol = 'computingsite'
for nuc in nucleus:
nucleus[nuc] = sorted(nucleus[nuc], key=lambda x: x[sortcol], reverse=reverseflag)
else:
sortby = 'satellite-asc'
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'nosorturl': nosorturl,
'user': None,
'hssitesum': nucleus,
'hsnucleussum': worldHS06sSummaryByNucleus,
'roundflag': roundflag,
'sortby': sortby,
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
setCacheEntry(request, "worldhs06s", json.dumps(data, cls=DateEncoder), 60 * 20)
endSelfMonitor(request)
response = render_to_response('worldHS06s.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
data = {
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
def dashboard(request, view='production'):
valid, response = initRequest(request)
if not valid: return response
data = getCacheEntry(request, "dashboard")
if data is not None:
data = json.loads(data)
data['request'] = request
template = data['template']
response = render_to_response(template, data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
taskdays = 3
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
else:
VOMODE = ''
if VOMODE != 'atlas':
hours = 24 * taskdays
else:
hours = 12
hoursSinceUpdate = 36
if view == 'production':
noldtransjobs, transclouds, transrclouds = stateNotUpdated(request, state='transferring',
hoursSinceUpdate=hoursSinceUpdate, count=True)
else:
hours = 3
noldtransjobs = 0
transclouds = []
transrclouds = []
errthreshold = 10
query = setupView(request, hours=hours, limit=999999, opmode=view)
if 'mode' in request.session['requestParams'] and request.session['requestParams']['mode'] == 'task':
return dashTasks(request, hours, view)
if VOMODE != 'atlas':
vosummarydata = voSummary(query)
vos = {}
for rec in vosummarydata:
vo = rec['vo']
# if vo == None: vo = 'Unassigned'
if vo == None: continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if vo not in vos:
vos[vo] = {}
vos[vo]['name'] = vo
vos[vo]['count'] = 0
vos[vo]['states'] = {}
vos[vo]['statelist'] = []
for state in sitestatelist:
vos[vo]['states'][state] = {}
vos[vo]['states'][state]['name'] = state
vos[vo]['states'][state]['count'] = 0
vos[vo]['count'] += count
vos[vo]['states'][jobstatus]['count'] += count
## Convert dict to summary list
vokeys = vos.keys()
vokeys.sort()
vosummary = []
for vo in vokeys:
for state in sitestatelist:
vos[vo]['statelist'].append(vos[vo]['states'][state])
if int(vos[vo]['states']['finished']['count']) + int(vos[vo]['states']['failed']['count']) > 0:
vos[vo]['pctfail'] = int(100. * float(vos[vo]['states']['failed']['count']) / (
vos[vo]['states']['finished']['count'] + vos[vo]['states']['failed']['count']))
vosummary.append(vos[vo])
if 'sortby' in request.session['requestParams']:
if request.session['requestParams']['sortby'] in statelist:
vosummary = sorted(vosummary, key=lambda x: x['states'][request.session['requestParams']['sortby']],
reverse=True)
elif request.session['requestParams']['sortby'] == 'pctfail':
vosummary = sorted(vosummary, key=lambda x: x['pctfail'], reverse=True)
else:
if view == 'production':
errthreshold = 5
else:
errthreshold = 15
vosummary = []
cloudview = 'region'
if 'cloudview' in request.session['requestParams']:
cloudview = request.session['requestParams']['cloudview']
if view == 'analysis':
cloudview = 'region'
elif view != 'production':
cloudview = 'N/A'
if view == 'production' and (cloudview == 'world' or cloudview == 'cloud'): #cloud view is the old way of jobs distributing;
# just to avoid redirecting
query = {}
values = ['nucleus', 'computingsite', 'jobstatus', 'countjobsinstate']
worldJobsSummary = []
if view == 'production':
query['tasktype'] = 'prod'
else:
query['tasktype'] = 'anal'
worldJobsSummary.extend(JobsWorldViewTaskType.objects.filter(**query).values(*values))
nucleus = {}
statelist1 = statelist
# del statelist1[statelist1.index('jclosed')]
# del statelist1[statelist1.index('pending')]
if len(worldJobsSummary) > 0:
for jobs in worldJobsSummary:
if jobs['nucleus'] in nucleus:
if jobs['computingsite'] in nucleus[jobs['nucleus']]:
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
else:
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
else:
nucleus[jobs['nucleus']] = {}
nucleus[jobs['nucleus']][jobs['computingsite']] = {}
for state in statelist1:
nucleus[jobs['nucleus']][jobs['computingsite']][state] = 0
nucleus[jobs['nucleus']][jobs['computingsite']][jobs['jobstatus']] = jobs['countjobsinstate']
nucleusSummary = {}
for nucleusInfo in nucleus:
nucleusSummary[nucleusInfo] = {}
for site in nucleus[nucleusInfo]:
for state in nucleus[nucleusInfo][site]:
if state in nucleusSummary[nucleusInfo]:
nucleusSummary[nucleusInfo][state] += nucleus[nucleusInfo][site][state]
else:
nucleusSummary[nucleusInfo][state] = nucleus[nucleusInfo][site][state]
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
if 'TFIRST' in request.session: del request.session['TFIRST']
if 'TLAST' in request.session: del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'nucleuses': nucleus,
'nucleussummary': nucleusSummary,
'statelist': statelist1,
'xurl': xurl,
'nosorturl': nosorturl,
'user': None,
'template': 'worldjobs.html',
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('worldjobs.html', data, RequestContext(request))
setCacheEntry(request, "dashboard", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
# del request.session['TFIRST']
# del request.session['TLAST']
data = {
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
else:
fullsummary = dashSummary(request, hours=hours, view=view, cloudview=cloudview)
cloudTaskSummary = wgTaskSummary(request, fieldname='cloud', view=view, taskdays=taskdays)
jobsLeft = {}
rw = {}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
rwData, nRemJobs = calculateRWwithPrio_JEDI(query)
for cloud in fullsummary:
if cloud['name'] in nRemJobs.keys():
jobsLeft[cloud['name']] = nRemJobs[cloud['name']]
if cloud['name'] in rwData.keys():
rw[cloud['name']] = rwData[cloud['name']]
request.session['max_age_minutes'] = 6
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'nosorturl': nosorturl,
'user': None,
'summary': fullsummary,
'vosummary': vosummary,
'view': view,
'mode': 'site',
'cloudview': cloudview,
'hours': hours,
'errthreshold': errthreshold,
'cloudTaskSummary': cloudTaskSummary,
'taskstates': taskstatedict,
'taskdays': taskdays,
'noldtransjobs': noldtransjobs,
'transclouds': transclouds,
'transrclouds': transrclouds,
'hoursSinceUpdate': hoursSinceUpdate,
'jobsLeft': jobsLeft,
'rw': rw,
'template': 'dashboard.html',
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('dashboard.html', data, RequestContext(request))
setCacheEntry(request, "dashboard", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
data = {
'summary': fullsummary,
'vosummary': vosummary,
'view': view,
'mode': 'site',
'cloudview': cloudview,
'hours': hours,
'errthreshold': errthreshold,
'cloudTaskSummary': cloudTaskSummary,
'taskstates': taskstatedict,
'taskdays': taskdays,
'noldtransjobs': noldtransjobs,
'transclouds': transclouds,
'transrclouds': transrclouds,
'hoursSinceUpdate': hoursSinceUpdate,
'jobsLeft': jobsLeft,
'rw': rw,
'built': datetime.now().strftime("%H:%M:%S"),
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
def dashAnalysis(request):
return dashboard(request, view='analysis')
def dashProduction(request):
return dashboard(request, view='production')
def dashTasks(request, hours, view='production'):
valid, response = initRequest(request)
if not valid: return response
if view == 'production':
errthreshold = 5
else:
errthreshold = 15
if 'days' in request.session['requestParams']:
taskdays = int(request.session['requestParams']['days'])
else:
taskdays = 7
hours = taskdays * 24
query = setupView(request, hours=hours, limit=999999, opmode=view, querytype='task')
cloudTaskSummary = wgTaskSummary(request, fieldname='cloud', view=view, taskdays=taskdays)
# taskJobSummary = dashTaskSummary(request, hours, view) not particularly informative
taskJobSummary = []
if 'display_limit' in request.session['requestParams']:
try:
display_limit = int(request.session['requestParams']['display_limit'])
except:
display_limit = 300
else:
display_limit = 300
cloudview = 'cloud'
if 'cloudview' in request.session['requestParams']:
cloudview = request.session['requestParams']['cloudview']
if view == 'analysis':
cloudview = 'region'
elif view != 'production':
cloudview = 'N/A'
fullsummary = dashSummary(request, hours=hours, view=view, cloudview=cloudview)
jobsLeft = {}
rw = {}
rwData, nRemJobs = calculateRWwithPrio_JEDI(query)
for cloud in fullsummary:
leftCount = 0
if cloud['name'] in nRemJobs.keys():
jobsLeft[cloud['name']] = nRemJobs[cloud['name']]
if cloud['name'] in rwData.keys():
rw[cloud['name']] = rwData[cloud['name']]
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'nosorturl': nosorturl,
'user': None,
'view': view,
'mode': 'task',
'hours': hours,
'errthreshold': errthreshold,
'cloudTaskSummary': cloudTaskSummary,
'taskstates': taskstatedict,
'taskdays': taskdays,
'taskJobSummary': taskJobSummary[:display_limit],
'display_limit': display_limit,
'jobsLeft': jobsLeft,
'rw': rw,
'template': 'dashboard.html',
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
setCacheEntry(request, "dashboard", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('dashboard.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
remainingEvents = RemainedEventsPerCloud3dayswind.objects.values('cloud', 'nrem')
remainingEventsSet = {}
for remev in remainingEvents:
remainingEventsSet[remev['cloud']] = remev['nrem']
data = {
'jobsLeft': jobsLeft,
'remainingWeightedEvents': remainingEventsSet,
}
return HttpResponse(json.dumps(data), content_type='text/html')
def taskESExtendedInfo(request):
if 'jeditaskid' in request.GET:
jeditaskid = int(request.GET['jeditaskid'])
else:
return HttpResponse("Not jeditaskid supplied", content_type='text/html')
eventsdict=[]
equery = {'jeditaskid': jeditaskid}
eventsdict.extend(
JediEvents.objects.filter(**equery).values('status').annotate(count=Count('status')).order_by('status'))
for state in eventsdict: state['statusname'] = eventservicestatelist[state['status']]
estaskstr = ''
for s in eventsdict:
estaskstr += " %s(%s) " % (s['statusname'], s['count'])
return HttpResponse(estaskstr, content_type='text/html')
@csrf_exempt
def taskList(request):
valid, response = initRequest(request)
dkey = digkey(request)
# Here we try to get cached data
data = getCacheEntry(request, "taskList")
if data is not None:
data = json.loads(data)
data['request'] = request
if data['eventservice'] == True:
response = render_to_response('taskListES.html', data, RequestContext(request))
else:
response = render_to_response('taskList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
else:
limit = 1000
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'pctfailed':
limit = 50000
if not valid: return response
if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith(
'anal'):
hours = 3 * 24
else:
hours = 7 * 24
eventservice = False
if 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or request.session['requestParams'][
'eventservice'] == '1'):
eventservice = True
hours = 7 * 24
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True)
listTasks = []
if 'statenotupdated' in request.session['requestParams']:
tasks = taskNotUpdated(request, query, wildCardExtension)
else:
tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values()
listTasks.append(JediTasksOrdered)
thread = Thread(target=totalCount, args=(listTasks, query, wildCardExtension, dkey))
thread.start()
tasks = cleanTaskList(request, tasks)
ntasks = len(tasks)
nmax = ntasks
# if 'display_limit' in request.session['requestParams']:
# and int(request.session['requestParams']['display_limit']) < nmax:
# display_limit = int(request.session['requestParams']['display_limit'])
# nmax = display_limit
# url_nolimit = removeParam(request.get_full_path(), 'display_limit')
# else:
# display_limit = 300
# nmax = display_limit
# url_nolimit = request.get_full_path()
if 'display_limit' not in request.session['requestParams']:
display_limit = 100
nmax = display_limit
url_nolimit = request.get_full_path() + "&display_limit=" + str(nmax)
else:
display_limit = int(request.session['requestParams']['display_limit'])
nmax = display_limit
url_nolimit = request.get_full_path() + "&display_limit=" + str(nmax)
# from django.db import connection
# print 'SQL query:', connection.queries
tasks = getTaskScoutingInfo(tasks, nmax)
## For event service, pull the jobs and event ranges
doESCalc = False
if eventservice and doESCalc:
taskl = []
for task in tasks:
taskl.append(task['jeditaskid'])
jquery = {}
jquery['jeditaskid__in'] = taskl
jobs = []
jobs.extend(Jobsactive4.objects.filter(**jquery).values('pandaid', 'jeditaskid'))
jobs.extend(Jobsarchived4.objects.filter(**jquery).values('pandaid', 'jeditaskid'))
taskdict = {}
for job in jobs:
taskdict[job['pandaid']] = job['jeditaskid']
estaskdict = {}
esjobs = []
for job in jobs:
esjobs.append(job['pandaid'])
random.seed()
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
# connection.commit()
new_cur.execute(
"""
SELECT /*+ dynamic_sampling(TMP_IDS1 0) cardinality(TMP_IDS1 10) INDEX_RS_ASC(ev JEDI_EVENTS_PANDAID_STATUS_IDX) NO_INDEX_FFS(ev JEDI_EVENTS_PK) NO_INDEX_SS(ev JEDI_EVENTS_PK) */ PANDAID,STATUS FROM ATLAS_PANDA.JEDI_EVENTS ev, %s WHERE TRANSACTIONKEY=%i AND PANDAID = ID
""" % (tmpTableName, transactionKey)
)
evtable = dictfetchall(new_cur)
# esquery = {}
# esquery['pandaid__in'] = esjobs
# evtable = JediEvents.objects.filter(**esquery).values('pandaid','status')
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
# connection.commit()
# connection.leave_transaction_management()
for ev in evtable:
taskid = taskdict[ev['PANDAID']]
if taskid not in estaskdict:
estaskdict[taskid] = {}
for s in eventservicestatelist:
estaskdict[taskid][s] = 0
evstat = eventservicestatelist[ev['STATUS']]
estaskdict[taskid][evstat] += 1
for task in tasks:
taskid = task['jeditaskid']
if taskid in estaskdict:
estaskstr = ''
for s in estaskdict[taskid]:
if estaskdict[taskid][s] > 0:
estaskstr += " %s(%s) " % (s, estaskdict[taskid][s])
task['estaskstr'] = estaskstr
## set up google flow diagram
flowstruct = buildGoogleFlowDiagram(request, tasks=tasks)
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
try:
thread.join()
tasksTotalCount = sum(tcount[dkey])
print dkey
print tcount[dkey]
del tcount[dkey]
print tcount
print tasksTotalCount
except:
tasksTotalCount = -1
listPar = []
for key, val in request.session['requestParams'].iteritems():
if (key != 'limit' and key != 'display_limit'):
listPar.append(key + '=' + str(val))
if len(listPar) > 0:
urlParametrs = '&'.join(listPar) + '&'
else:
urlParametrs = None
print listPar
del listPar
if (math.fabs(ntasks - tasksTotalCount) < 1000 or tasksTotalCount == -1):
tasksTotalCount = None
else:
tasksTotalCount = int(math.ceil((tasksTotalCount + 10000) / 10000) * 10000)
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
## Add info to the json dump if the request is for a single task
if len(tasks) == 1:
id = tasks[0]['jeditaskid']
dsquery = {'jeditaskid': id, 'type__in': ['input', 'output']}
dsets = JediDatasets.objects.filter(**dsquery).values()
dslist = []
for ds in dsets:
dslist.append(ds)
tasks[0]['datasets'] = dslist
dump = json.dumps(tasks, cls=DateEncoder)
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(dump, content_type='text/html')
else:
sumd = taskSummaryDict(request, tasks)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'tasks': tasks[:nmax],
'ntasks': ntasks,
'sumd': sumd,
'xurl': xurl,
'nosorturl': nosorturl,
'url_nolimit': url_nolimit,
'display_limit': nmax,
'flowstruct': flowstruct,
'eventservice': eventservice,
'requestString': urlParametrs,
'tasksTotalCount': tasksTotalCount,
'built': datetime.now().strftime("%H:%M:%S"),
}
setCacheEntry(request, "taskList", json.dumps(data, cls=DateEncoder), 60 * 20)
##self monitor
endSelfMonitor(request)
if eventservice:
response = render_to_response('taskListES.html', data, RequestContext(request))
else:
response = render_to_response('taskList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
@never_cache
def killtasks(request):
valid, response = initRequest(request)
if not valid: return response
taskid = -1
action = -1
if 'task' in request.session['requestParams']:
taskid = int(request.session['requestParams']['task'])
if 'action' in request.session['requestParams']:
action = int(request.session['requestParams']['action'])
prodsysHost = None
prodsysToken = None
prodsysUrl = None
username = None
fullname = None
if 'prodsysHost' in PRODSYS:
prodsysHost = PRODSYS['prodsysHost']
if 'prodsysToken' in PRODSYS:
prodsysToken = PRODSYS['prodsysToken']
if action == 0:
prodsysUrl = '/prodtask/task_action_ext/finish/'
elif action == 1:
prodsysUrl = '/prodtask/task_action_ext/abort/'
else:
resp = {"detail": "Action is not recognized"}
dump = json.dumps(resp, cls=DateEncoder)
response = HttpResponse(dump, content_type='text/plain')
return response
if 'ADFS_FULLNAME' in request.session and 'ADFS_LOGIN' in request.session:
username = request.session['ADFS_LOGIN']
fullname = request.session['ADFS_FULLNAME']
else:
resp = {"detail": "User not authenticated. Please login to bigpanda mon"}
dump = json.dumps(resp, cls=DateEncoder)
response = HttpResponse(dump, content_type='text/plain')
return response
if action == 1:
postdata = {"username": username, "task": taskid, "userfullname":fullname}
else:
postdata = {"username": username, "task": taskid, "parameters":[1], "userfullname":fullname}
headers = {'Accept': 'application/json', 'Authorization': 'Token '+prodsysToken}
conn = urllib3.HTTPSConnectionPool(prodsysHost, timeout=20)
resp = None
# if request.session['IS_TESTER']:
resp = conn.urlopen('POST', prodsysUrl, body=json.dumps(postdata, cls=DateEncoder), headers=headers, retries=1, assert_same_host=False)
# else:
# resp = {"detail": "You are not allowed to test. Sorry"}
# dump = json.dumps(resp, cls=DateEncoder)
# response = HttpResponse(dump, mimetype='text/plain')
# return response
if resp and len(resp.data) > 0:
try:
resp = json.loads(resp.data)
if resp['result'] == "FAILED":
resp['detail'] = 'Result:' + resp['result'] + ' with reason:' + resp['exception']
elif resp['result'] == "OK":
resp['detail'] = 'Action peformed successfully, details: ' + resp['details']
except:
resp = {"detail":"prodsys responce could not be parced"}
else:
resp = {"detail": "Error with sending request to prodsys"}
dump = json.dumps(resp, cls=DateEncoder)
response = HttpResponse(dump, content_type='text/plain')
return response
def getTaskScoutingInfo(tasks, nmax):
taskslToBeDisplayed = tasks[:nmax]
tasksIdToBeDisplayed = [task['jeditaskid'] for task in taskslToBeDisplayed]
tquery = {}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
new_cur = connection.cursor()
executionData = []
for id in tasksIdToBeDisplayed:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
tasksEventInfo = GetEventsForTask.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid', 'totevrem', 'totev')
#We do it because we intermix raw and queryset queries. With next new_cur.execute tasksEventInfo cleares
tasksEventInfoList = []
for tasksEventInfoItem in tasksEventInfo:
listItem = {}
listItem["jeditaskid"] = tasksEventInfoItem["jeditaskid"]
listItem["totevrem"] = tasksEventInfoItem["totevrem"]
listItem["totev"] = tasksEventInfoItem["totev"]
tasksEventInfoList.append(listItem)
tasksEventInfoList.reverse()
failedInScouting = JediDatasets.objects.filter(**tquery).extra(where=["NFILESFAILED > NFILESTOBEUSED AND JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey) ]).values('jeditaskid')
taskStatuses = dict((task['jeditaskid'], task['status']) for task in tasks)
failedInScouting = [item['jeditaskid'] for item in failedInScouting if
(taskStatuses[item['jeditaskid']] in ('failed', 'broken'))]
# scoutingHasCritFailures
tquery['nfilesfailed__gt'] = 0
scoutingHasCritFailures = JediDatasets.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid')
scoutingHasCritFailures = [item['jeditaskid'] for item in scoutingHasCritFailures if
(taskStatuses[item['jeditaskid']] in ('scouting'))]
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
transactionKey = random.randrange(1000000)
executionData = []
for id in scoutingHasCritFailures:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
tquery = {}
tquery['nfilesfailed'] = 0
scoutingHasNonCritFailures = JediDatasets.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid')
scoutingHasNonCritFailures = [item['jeditaskid'] for item in scoutingHasNonCritFailures if (
taskStatuses[item['jeditaskid']] == 'scouting' and item['jeditaskid'] not in scoutingHasCritFailures)]
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
transactionKey = random.randrange(1000000)
executionData = []
for id in scoutingHasNonCritFailures:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
tquery = {}
tquery['relationtype'] = 'retry'
scoutingHasNonCritFailures = JediJobRetryHistory.objects.filter(**tquery).extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values('jeditaskid')
scoutingHasNonCritFailures = [item['jeditaskid'] for item in scoutingHasNonCritFailures]
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
for task in taskslToBeDisplayed:
correspondendEventInfo = []
if tasksEventInfoList and len(tasksEventInfoList) > 0:
correspondendEventInfo = [item for item in tasksEventInfoList if item["jeditaskid"]==task['jeditaskid']] #filter(lambda n: n.get('jeditaskid') == task['jeditaskid'], tasksEventInfo)
if len(correspondendEventInfo) > 0:
task['totevrem'] = int(correspondendEventInfo[0]['totevrem'])
task['totev'] = correspondendEventInfo[0]['totev']
else:
task['totevrem'] = 0
task['totev'] = 0
if (task['jeditaskid'] in failedInScouting):
task['failedscouting'] = True
if (task['jeditaskid'] in scoutingHasCritFailures):
task['scoutinghascritfailures'] = True
if (task['jeditaskid'] in scoutingHasNonCritFailures):
task['scoutinghasnoncritfailures'] = True
return tasks
def getErrorSummaryForEvents(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
eventsErrors = []
print 'getting error summary for events'
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
else:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
if 'mode' in request.session['requestParams']:
mode = request.session['requestParams']['mode']
else:
mode = 'drop'
if 'tk' in request.session['requestParams'] and request.session['requestParams']['tk'] > 0:
transactionKey = int(request.session['requestParams']['tk'])
else:
data = {"error": "no failed events found"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
equery = {}
equery['jeditaskid']=jeditaskid
equery['error_code__isnull'] = False
if mode == 'drop':
eventsErrors = []
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG"
else:
tmpTableName = "TMP_IDS1DEBUG"
new_cur = connection.cursor()
new_cur.execute(
"""select e.error_code, sum(e.neventsinjob) as nevents, sum(nerrorsinjob) as nerrors , count(e.pandaid) as njobs,
LISTAGG(case when e.aff <= 10 then e.pandaid end,',' ) WITHIN group (order by error_code, e.aff) as pandaidlist
from (select pandaid, error_code,
sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) as neventsinjob,
count(*) as nerrorsinjob,
row_number() over (partition by error_code ORDER BY sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) desc) as aff
from ATLAS_PANDA.Jedi_events
where jeditaskid=%i and ERROR_CODE is not null
group by error_code, pandaid) e ,
(select ID, TRANSACTIONKEY from %s ) j
where j.TRANSACTIONKEY=%i and e.pandaid = j.ID
group by e.error_code
""" % (jeditaskid, tmpTableName, transactionKey)
)
eventsErrorsUP = dictfetchall(new_cur)
elif mode == 'nodrop':
# eventsErrors = JediEvents.objects.filter(**equery).values('error_code').annotate(njobs=Count('pandaid',distinct=True),nevents=Sum('def_max_eventid', field='def_max_eventid-def_min_eventid+1'))
new_cur = connection.cursor()
new_cur.execute(
"""select error_code, sum(neventsinjob) as nevents, sum(nerrorsinjob) as nerrors , count(pandaid) as njobs,
LISTAGG(case when aff <= 10 then pandaid end,',' ) WITHIN group (order by error_code, aff) as pandaidlist
from (select pandaid, error_code,
sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) as neventsinjob,
count(*) as nerrorsinjob,
row_number() over (partition by error_code ORDER BY sum(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) desc) as aff
from ATLAS_PANDA.Jedi_events
where jeditaskid=%i and ERROR_CODE is not null
group by error_code, pandaid)
group by error_code
""" % (jeditaskid)
)
eventsErrorsUP = dictfetchall(new_cur)
else:
data = {"error": "wrong mode specified"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), mimetype='text/html')
for error in eventsErrorsUP:
line = dict()
for key, value in error.items():
line[key.lower()] = value
eventsErrors.append(line)
for eventserror in eventsErrors:
try:
eventserror['error_code']=int(eventserror['error_code'])
if eventserror['error_code'] in errorCodes['piloterrorcode'].keys():
eventserror['error_description'] = errorCodes['piloterrorcode'][eventserror['error_code']]
else:
eventserror['error_description'] = ''
except:
eventserror['error_description'] = ''
if len(eventserror['pandaidlist']) > 0:
eventserror['pandaidlist'] = eventserror['pandaidlist'].split(',')
data = {'errors' : eventsErrors}
response = render_to_response('eventsErrorSummary.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def getSummaryForTaskList(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
else:
limit = 5000
if not valid: return response
if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith(
'anal'):
hours = 3 * 24
else:
hours = 7 * 24
eventservice = False
if 'eventservice' in request.session['requestParams'] and (
request.session['requestParams']['eventservice'] == 'eventservice' or
request.session['requestParams']['eventservice'] == '1'): eventservice = True
if eventservice: hours = 7 * 24
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task',
wildCardExt=True)
if 'statenotupdated' in request.session['requestParams']:
tasks = taskNotUpdated(request, query, wildCardExtension)
else:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values('jeditaskid',
'status',
'creationdate',
'modificationtime')
taskl = []
for t in tasks:
taskl.append(t['jeditaskid'])
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
taskEvents = []
random.seed()
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
for id in taskl:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (
tmpTableName, id, transactionKey)) # Backend dependable
# connection.commit()
taske = GetEventsForTask.objects.extra(
where=["JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)]).values()
for task in taske:
taskEvents.append(task)
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
# connection.commit()
# connection.leave_transaction_management()
nevents = {'neventstot': 0, 'neventsrem': 0}
for task in taskEvents:
if 'totev' in task and task['totev'] is not None:
nevents['neventstot'] += task['totev']
if 'totevrem' in task and task['totevrem'] is not None:
nevents['neventsrem'] += task['totevrem']
endSelfMonitor(request)
del request.session['TFIRST']
del request.session['TLAST']
response = render_to_response('taskListSummary.html', {'nevents': nevents}, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
#def runningProdTasks(request):
# return redirect('runningMCProdTasks')
def runningMCProdTasks(request):
valid, response = initRequest(request)
# Here we try to get cached data
data = getCacheEntry(request, "runningMCProdTasks")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('runningMCProdTasks.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
# xurl = extensibleURL(request)
xurl = request.get_full_path()
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
tquery = {}
if 'processingtype' in request.session['requestParams']:
tquery['processingtype'] = request.session['requestParams']['processingtype']
if 'username' in request.session['requestParams']:
tquery['username'] = request.session['requestParams']['username']
if 'campaign' in request.session['requestParams']:
tquery['campaign__contains'] = request.session['requestParams']['campaign']
if 'reqid' in request.session['requestParams']:
tquery['reqid'] = request.session['requestParams']['reqid']
if 'corecount' in request.session['requestParams']:
tquery['corecount'] = request.session['requestParams']['corecount']
if 'status' in request.session['requestParams']:
tquery['status'] = request.session['requestParams']['status']
tasks = RunningMCProductionTasks.objects.filter(**tquery).values()
ntasks = len(tasks)
slots = 0
ages = []
neventsAFIItasksSum = {'evgen': 0, 'pile': 0, 'simul': 0, 'recon': 0}
neventsFStasksSum = {'evgen': 0, 'pile': 0, 'simul': 0, 'recon': 0}
neventsTotSum = 0
neventsUsedTotSum = 0
rjobs1coreTot = 0
rjobs8coreTot = 0
for task in tasks:
if task['rjobs'] is None:
task['rjobs'] = 0
task['neventsused'] = task['totev'] - task['totevrem'] if task['totev'] is not None else 0
task['percentage'] = round(100. * task['neventsused'] / task['totev'], 1) if task['totev'] > 0 else 0.
neventsTotSum += task['totev'] if task['totev'] is not None else 0
neventsUsedTotSum += task['neventsused']
slots += task['rjobs'] * task['corecount']
if task['corecount'] == 1:
rjobs1coreTot += task['rjobs']
if task['corecount'] == 8:
rjobs8coreTot += task['rjobs']
task['age'] = (datetime.now() - task['creationdate']).days
ages.append(task['age'])
if len(task['campaign'].split(':')) > 1:
task['cutcampaign'] = task['campaign'].split(':')[1]
else:
task['cutcampaign'] = task['campaign'].split(':')[0]
task['datasetname'] = task['taskname'].split('.')[1]
ltag = len(task['taskname'].split("_"))
rtag = task['taskname'].split("_")[ltag - 1]
if "." in rtag:
rtag = rtag.split(".")[len(rtag.split(".")) - 1]
if 'a' in rtag:
task['simtype'] = 'AFII'
neventsAFIItasksSum[task['processingtype']] += task['totev'] if task['totev'] is not None else 0
else:
task['simtype'] = 'FS'
neventsFStasksSum[task['processingtype']] += task['totev'] if task['totev'] is not None else 0
plotageshistogram = 1
if sum(ages) == 0: plotageshistogram = 0
sumd = taskSummaryDict(request, tasks, ['status', 'processingtype', 'simtype'])
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'campaign-asc':
tasks = sorted(tasks, key=lambda x: x['campaign'])
elif sortby == 'campaign-desc':
tasks = sorted(tasks, key=lambda x: x['campaign'], reverse=True)
elif sortby == 'reqid-asc':
tasks = sorted(tasks, key=lambda x: x['reqid'])
elif sortby == 'reqid-desc':
tasks = sorted(tasks, key=lambda x: x['reqid'], reverse=True)
elif sortby == 'jeditaskid-asc':
tasks = sorted(tasks, key=lambda x: x['jeditaskid'])
elif sortby == 'jeditaskid-desc':
tasks = sorted(tasks, key=lambda x: x['jeditaskid'], reverse=True)
elif sortby == 'rjobs-asc':
tasks = sorted(tasks, key=lambda x: x['rjobs'])
elif sortby == 'rjobs-desc':
tasks = sorted(tasks, key=lambda x: x['rjobs'], reverse=True)
elif sortby == 'status-asc':
tasks = sorted(tasks, key=lambda x: x['status'])
elif sortby == 'status-desc':
tasks = sorted(tasks, key=lambda x: x['status'], reverse=True)
elif sortby == 'processingtype-asc':
tasks = sorted(tasks, key=lambda x: x['processingtype'])
elif sortby == 'processingtype-desc':
tasks = sorted(tasks, key=lambda x: x['processingtype'], reverse=True)
elif sortby == 'nevents-asc':
tasks = sorted(tasks, key=lambda x: x['totev'])
elif sortby == 'nevents-desc':
tasks = sorted(tasks, key=lambda x: x['totev'], reverse=True)
elif sortby == 'neventsused-asc':
tasks = sorted(tasks, key=lambda x: x['neventsused'])
elif sortby == 'neventsused-desc':
tasks = sorted(tasks, key=lambda x: x['neventsused'], reverse=True)
elif sortby == 'neventstobeused-asc':
tasks = sorted(tasks, key=lambda x: x['totevrem'])
elif sortby == 'neventstobeused-desc':
tasks = sorted(tasks, key=lambda x: x['totevrem'], reverse=True)
elif sortby == 'percentage-asc':
tasks = sorted(tasks, key=lambda x: x['percentage'])
elif sortby == 'percentage-desc':
tasks = sorted(tasks, key=lambda x: x['percentage'], reverse=True)
elif sortby == 'nfilesfailed-asc':
tasks = sorted(tasks, key=lambda x: x['nfilesfailed'])
elif sortby == 'nfilesfailed-desc':
tasks = sorted(tasks, key=lambda x: x['nfilesfailed'], reverse=True)
elif sortby == 'priority-asc':
tasks = sorted(tasks, key=lambda x: x['currentpriority'])
elif sortby == 'priority-desc':
tasks = sorted(tasks, key=lambda x: x['currentpriority'], reverse=True)
elif sortby == 'simtype-asc':
tasks = sorted(tasks, key=lambda x: x['simtype'])
elif sortby == 'simtype-desc':
tasks = sorted(tasks, key=lambda x: x['simtype'], reverse=True)
elif sortby == 'age-asc':
tasks = sorted(tasks, key=lambda x: x['age'])
elif sortby == 'age-desc':
tasks = sorted(tasks, key=lambda x: x['age'], reverse=True)
elif sortby == 'corecount-asc':
tasks = sorted(tasks, key=lambda x: x['corecount'])
elif sortby == 'corecount-desc':
tasks = sorted(tasks, key=lambda x: x['corecount'], reverse=True)
elif sortby == 'username-asc':
tasks = sorted(tasks, key=lambda x: x['username'])
elif sortby == 'username-desc':
tasks = sorted(tasks, key=lambda x: x['username'], reverse=True)
elif sortby == 'datasetname-asc':
tasks = sorted(tasks, key=lambda x: x['datasetname'])
elif sortby == 'datasetname-desc':
tasks = sorted(tasks, key=lambda x: x['datasetname'], reverse=True)
else:
sortby = 'age-asc'
tasks = sorted(tasks, key=lambda x: x['age'])
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
dump = json.dumps(tasks, cls=DateEncoder)
return HttpResponse(dump, content_type='text/html')
else:
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'xurl': xurl,
'nosorturl': nosorturl,
'tasks': tasks,
'ntasks': ntasks,
'sortby': sortby,
'ages': ages,
'slots': slots,
'sumd': sumd,
'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1),
'neventsTotSum': round(neventsTotSum / 1000000., 1),
'rjobs1coreTot': rjobs1coreTot,
'rjobs8coreTot': rjobs8coreTot,
'neventsAFIItasksSum': neventsAFIItasksSum,
'neventsFStasksSum': neventsFStasksSum,
'plotageshistogram': plotageshistogram,
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
setCacheEntry(request, "runningMCProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('runningMCProdTasks.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def runningProdTasks(request):
valid, response = initRequest(request)
# Here we try to get cached data
data = getCacheEntry(request, "runningProdTasks")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('runningProdTasks.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
# xurl = extensibleURL(request)
xurl = request.get_full_path()
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
tquery = {}
if 'campaign' in request.session['requestParams']:
tquery['campaign__contains'] = request.session['requestParams']['campaign']
if 'corecount' in request.session['requestParams']:
tquery['corecount'] = request.session['requestParams']['corecount']
if 'status' in request.session['requestParams']:
tquery['status'] = request.session['requestParams']['status']
if 'reqid' in request.session['requestParams']:
tquery['reqid'] = request.session['requestParams']['reqid']
if 'inputdataset' in request.session['requestParams']:
tquery['taskname__contains'] = request.session['requestParams']['inputdataset']
if 'campaignstart' in request.session['requestParams']:
tquery['campaign__startswith'] = request.session['requestParams']['campaignstart']
productionType = ''
extraQueryString = ''
if 'workinggroup' in request.session['requestParams']:
workinggroupQuery = request.session['requestParams']['workinggroup']
for card in workinggroupQuery.split(','):
if card[0] == '!':
extraQueryString += ' NOT workinggroup=\''+escapeInput(card[1:])+'\' AND'
else:
extraQueryString += ' workinggroup=\''+escapeInput(card[0:])+'\' OR '
productionType = 'DPD' if workinggroupQuery == 'GP_PHYS' else ''
if 'processingtype' in request.session['requestParams']:
val = escapeInput(request.session['requestParams']['processingtype'])
values = val.split(',')
tquery['processingtype__in'] = values
extraQueryString = extraQueryString[:-3]
if (len(extraQueryString) < 2):
extraQueryString = '1=1'
#processingtype in ('evgen', 'pile', 'simul', 'recon')
tasks = RunningProdTasksModel.objects.filter(**tquery).extra(where=[extraQueryString]).values()
ntasks = len(tasks)
slots = 0
ages = []
neventsTotSum = 0
neventsUsedTotSum = 0
rjobs1coreTot = 0
rjobs8coreTot = 0
for task in tasks:
if task['rjobs'] is None:
task['rjobs'] = 0
task['neventsused'] = task['totev'] - task['totevrem'] if task['totev'] is not None else 0
task['percentage'] = round(100. * task['neventsused'] / task['totev'], 1) if task['totev'] > 0 else 0.
neventsTotSum += task['totev'] if task['totev'] is not None else 0
neventsUsedTotSum += task['neventsused']
slots += task['rjobs'] * task['corecount']
if task['corecount'] == 1:
rjobs1coreTot += task['rjobs']
if task['corecount'] == 8:
rjobs8coreTot += task['rjobs']
task['age'] = round(
(datetime.now() - task['creationdate']).days + (datetime.now() - task['creationdate']).seconds / 3600. / 24,
1)
ages.append(task['age'])
if len(task['campaign'].split(':')) > 1:
task['cutcampaign'] = task['campaign'].split(':')[1]
else:
task['cutcampaign'] = task['campaign'].split(':')[0]
if (len(task['taskname'].split('.'))>1):
task['inputdataset'] = task['taskname'].split('.')[1]
else:
task['inputdataset'] = ''
if task['inputdataset'].startswith('00'):
task['inputdataset'] = task['inputdataset'][2:]
task['tid'] = task['outputtype'].split('_tid')[1].split('_')[0] if '_tid' in task['outputtype'] else None
task['outputtypes'] = ''
outputtypes = []
outputtypes = task['outputtype'].split(',')
if len(outputtypes) > 0:
for outputtype in outputtypes:
task['outputtypes'] += outputtype.split('_')[1].split('_p')[0] + ' ' if '_' in outputtype else ''
task['ptag'] = task['outputtype'].split('_')[2] if '_' in task['outputtype'] else ''
plotageshistogram = 1
if sum(ages) == 0: plotageshistogram = 0
sumd = taskSummaryDict(request, tasks, ['status'])
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'campaign-asc':
tasks = sorted(tasks, key=lambda x: x['campaign'])
elif sortby == 'campaign-desc':
tasks = sorted(tasks, key=lambda x: x['campaign'], reverse=True)
elif sortby == 'reqid-asc':
tasks = sorted(tasks, key=lambda x: x['reqid'])
elif sortby == 'reqid-desc':
tasks = sorted(tasks, key=lambda x: x['reqid'], reverse=True)
elif sortby == 'jeditaskid-asc':
tasks = sorted(tasks, key=lambda x: x['jeditaskid'])
elif sortby == 'jeditaskid-desc':
tasks = sorted(tasks, key=lambda x: x['jeditaskid'], reverse=True)
elif sortby == 'rjobs-asc':
tasks = sorted(tasks, key=lambda x: x['rjobs'])
elif sortby == 'rjobs-desc':
tasks = sorted(tasks, key=lambda x: x['rjobs'], reverse=True)
elif sortby == 'status-asc':
tasks = sorted(tasks, key=lambda x: x['status'])
elif sortby == 'status-desc':
tasks = sorted(tasks, key=lambda x: x['status'], reverse=True)
elif sortby == 'nevents-asc':
tasks = sorted(tasks, key=lambda x: x['totev'])
elif sortby == 'nevents-desc':
tasks = sorted(tasks, key=lambda x: x['totev'], reverse=True)
elif sortby == 'neventsused-asc':
tasks = sorted(tasks, key=lambda x: x['neventsused'])
elif sortby == 'neventsused-desc':
tasks = sorted(tasks, key=lambda x: x['neventsused'], reverse=True)
elif sortby == 'neventstobeused-asc':
tasks = sorted(tasks, key=lambda x: x['totevrem'])
elif sortby == 'neventstobeused-desc':
tasks = sorted(tasks, key=lambda x: x['totevrem'], reverse=True)
elif sortby == 'percentage-asc':
tasks = sorted(tasks, key=lambda x: x['percentage'])
elif sortby == 'percentage-desc':
tasks = sorted(tasks, key=lambda x: x['percentage'], reverse=True)
elif sortby == 'nfilesfailed-asc':
tasks = sorted(tasks, key=lambda x: x['nfilesfailed'])
elif sortby == 'nfilesfailed-desc':
tasks = sorted(tasks, key=lambda x: x['nfilesfailed'], reverse=True)
elif sortby == 'priority-asc':
tasks = sorted(tasks, key=lambda x: x['currentpriority'])
elif sortby == 'priority-desc':
tasks = sorted(tasks, key=lambda x: x['currentpriority'], reverse=True)
elif sortby == 'ptag-asc':
tasks = sorted(tasks, key=lambda x: x['ptag'])
elif sortby == 'ptag-desc':
tasks = sorted(tasks, key=lambda x: x['ptag'], reverse=True)
elif sortby == 'outputtype-asc':
tasks = sorted(tasks, key=lambda x: x['outputtypes'])
elif sortby == 'output-desc':
tasks = sorted(tasks, key=lambda x: x['outputtypes'], reverse=True)
elif sortby == 'age-asc':
tasks = sorted(tasks, key=lambda x: x['age'])
elif sortby == 'age-desc':
tasks = sorted(tasks, key=lambda x: x['age'], reverse=True)
elif sortby == 'corecount-asc':
tasks = sorted(tasks, key=lambda x: x['corecount'])
elif sortby == 'corecount-desc':
tasks = sorted(tasks, key=lambda x: x['corecount'], reverse=True)
elif sortby == 'username-asc':
tasks = sorted(tasks, key=lambda x: x['username'])
elif sortby == 'username-desc':
tasks = sorted(tasks, key=lambda x: x['username'], reverse=True)
elif sortby == 'inputdataset-asc':
tasks = sorted(tasks, key=lambda x: x['inputdataset'])
elif sortby == 'inputdataset-desc':
tasks = sorted(tasks, key=lambda x: x['inputdataset'], reverse=True)
else:
sortby = 'age-asc'
tasks = sorted(tasks, key=lambda x: x['age'])
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
dump = json.dumps(tasks, cls=DateEncoder)
return HttpResponse(dump, content_type='text/html')
else:
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'xurl': xurl,
'nosorturl': nosorturl,
'tasks': tasks,
'ntasks': ntasks,
'sortby': sortby,
'ages': ages,
'slots': slots,
'sumd': sumd,
'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1),
'neventsTotSum': round(neventsTotSum / 1000000., 1),
'rjobs1coreTot': rjobs1coreTot,
'rjobs8coreTot': rjobs8coreTot,
'plotageshistogram': plotageshistogram,
'productiontype' : json.dumps(productionType),
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('runningProdTasks.html', data, RequestContext(request))
setCacheEntry(request, "runningProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def runningDPDProdTasks(request):
valid, response = initRequest(request)
data = getCacheEntry(request, "runningDPDProdTasks")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('runningDPDProdTasks.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
# xurl = extensibleURL(request)
xurl = request.get_full_path()
if xurl.find('?') > 0:
xurl += '&'
else:
xurl += '?'
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
tquery = {}
if 'campaign' in request.session['requestParams']:
tquery['campaign__contains'] = request.session['requestParams']['campaign']
if 'corecount' in request.session['requestParams']:
tquery['corecount'] = request.session['requestParams']['corecount']
if 'status' in request.session['requestParams']:
tquery['status'] = request.session['requestParams']['status']
if 'reqid' in request.session['requestParams']:
tquery['reqid'] = request.session['requestParams']['reqid']
if 'inputdataset' in request.session['requestParams']:
tquery['taskname__contains'] = request.session['requestParams']['inputdataset']
tasks = RunningDPDProductionTasks.objects.filter(**tquery).values()
ntasks = len(tasks)
slots = 0
ages = []
neventsTotSum = 0
neventsUsedTotSum = 0
rjobs1coreTot = 0
rjobs8coreTot = 0
for task in tasks:
if task['rjobs'] is None:
task['rjobs'] = 0
task['neventsused'] = task['totev'] - task['totevrem'] if task['totev'] is not None else 0
task['percentage'] = round(100. * task['neventsused'] / task['totev'], 1) if task['totev'] > 0 else 0.
neventsTotSum += task['totev'] if task['totev'] is not None else 0
neventsUsedTotSum += task['neventsused']
slots += task['rjobs'] * task['corecount']
if task['corecount'] == 1:
rjobs1coreTot += task['rjobs']
if task['corecount'] == 8:
rjobs8coreTot += task['rjobs']
task['age'] = round(
(datetime.now() - task['creationdate']).days + (datetime.now() - task['creationdate']).seconds / 3600. / 24,
1)
ages.append(task['age'])
if len(task['campaign'].split(':')) > 1:
task['cutcampaign'] = task['campaign'].split(':')[1]
else:
task['cutcampaign'] = task['campaign'].split(':')[0]
task['inputdataset'] = task['taskname'].split('.')[1]
if task['inputdataset'].startswith('00'):
task['inputdataset'] = task['inputdataset'][2:]
task['tid'] = task['outputtype'].split('_tid')[1].split('_')[0] if '_tid' in task['outputtype'] else None
task['outputtypes'] = ''
outputtypes = []
outputtypes = task['outputtype'].split(',')
if len(outputtypes) > 0:
for outputtype in outputtypes:
task['outputtypes'] += outputtype.split('_')[1].split('_p')[0] + ' ' if '_' in outputtype else ''
task['ptag'] = task['outputtype'].split('_')[2] if '_' in task['outputtype'] else ''
plotageshistogram = 1
if sum(ages) == 0: plotageshistogram = 0
sumd = taskSummaryDict(request, tasks, ['status'])
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'campaign-asc':
tasks = sorted(tasks, key=lambda x: x['campaign'])
elif sortby == 'campaign-desc':
tasks = sorted(tasks, key=lambda x: x['campaign'], reverse=True)
elif sortby == 'reqid-asc':
tasks = sorted(tasks, key=lambda x: x['reqid'])
elif sortby == 'reqid-desc':
tasks = sorted(tasks, key=lambda x: x['reqid'], reverse=True)
elif sortby == 'jeditaskid-asc':
tasks = sorted(tasks, key=lambda x: x['jeditaskid'])
elif sortby == 'jeditaskid-desc':
tasks = sorted(tasks, key=lambda x: x['jeditaskid'], reverse=True)
elif sortby == 'rjobs-asc':
tasks = sorted(tasks, key=lambda x: x['rjobs'])
elif sortby == 'rjobs-desc':
tasks = sorted(tasks, key=lambda x: x['rjobs'], reverse=True)
elif sortby == 'status-asc':
tasks = sorted(tasks, key=lambda x: x['status'])
elif sortby == 'status-desc':
tasks = sorted(tasks, key=lambda x: x['status'], reverse=True)
elif sortby == 'nevents-asc':
tasks = sorted(tasks, key=lambda x: x['totev'])
elif sortby == 'nevents-desc':
tasks = sorted(tasks, key=lambda x: x['totev'], reverse=True)
elif sortby == 'neventsused-asc':
tasks = sorted(tasks, key=lambda x: x['neventsused'])
elif sortby == 'neventsused-desc':
tasks = sorted(tasks, key=lambda x: x['neventsused'], reverse=True)
elif sortby == 'neventstobeused-asc':
tasks = sorted(tasks, key=lambda x: x['totevrem'])
elif sortby == 'neventstobeused-desc':
tasks = sorted(tasks, key=lambda x: x['totevrem'], reverse=True)
elif sortby == 'percentage-asc':
tasks = sorted(tasks, key=lambda x: x['percentage'])
elif sortby == 'percentage-desc':
tasks = sorted(tasks, key=lambda x: x['percentage'], reverse=True)
elif sortby == 'nfilesfailed-asc':
tasks = sorted(tasks, key=lambda x: x['nfilesfailed'])
elif sortby == 'nfilesfailed-desc':
tasks = sorted(tasks, key=lambda x: x['nfilesfailed'], reverse=True)
elif sortby == 'priority-asc':
tasks = sorted(tasks, key=lambda x: x['currentpriority'])
elif sortby == 'priority-desc':
tasks = sorted(tasks, key=lambda x: x['currentpriority'], reverse=True)
elif sortby == 'ptag-asc':
tasks = sorted(tasks, key=lambda x: x['ptag'])
elif sortby == 'ptag-desc':
tasks = sorted(tasks, key=lambda x: x['ptag'], reverse=True)
elif sortby == 'outputtype-asc':
tasks = sorted(tasks, key=lambda x: x['outputtypes'])
elif sortby == 'output-desc':
tasks = sorted(tasks, key=lambda x: x['outputtypes'], reverse=True)
elif sortby == 'age-asc':
tasks = sorted(tasks, key=lambda x: x['age'])
elif sortby == 'age-desc':
tasks = sorted(tasks, key=lambda x: x['age'], reverse=True)
elif sortby == 'corecount-asc':
tasks = sorted(tasks, key=lambda x: x['corecount'])
elif sortby == 'corecount-desc':
tasks = sorted(tasks, key=lambda x: x['corecount'], reverse=True)
elif sortby == 'username-asc':
tasks = sorted(tasks, key=lambda x: x['username'])
elif sortby == 'username-desc':
tasks = sorted(tasks, key=lambda x: x['username'], reverse=True)
elif sortby == 'inputdataset-asc':
tasks = sorted(tasks, key=lambda x: x['inputdataset'])
elif sortby == 'inputdataset-desc':
tasks = sorted(tasks, key=lambda x: x['inputdataset'], reverse=True)
else:
sortby = 'age-asc'
tasks = sorted(tasks, key=lambda x: x['age'])
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
dump = json.dumps(tasks, cls=DateEncoder)
return HttpResponse(dump, content_type='text/html')
else:
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'xurl': xurl,
'nosorturl': nosorturl,
'tasks': tasks,
'ntasks': ntasks,
'sortby': sortby,
'ages': ages,
'slots': slots,
'sumd': sumd,
'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1),
'neventsTotSum': round(neventsTotSum / 1000000., 1),
'rjobs1coreTot': rjobs1coreTot,
'rjobs8coreTot': rjobs8coreTot,
'plotageshistogram': plotageshistogram,
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('runningDPDProdTasks.html', data, RequestContext(request))
setCacheEntry(request, "runningDPDProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def getBrokerageLog(request):
iquery = {}
iquery['type'] = 'prod_brokerage'
iquery['name'] = 'panda.mon.jedi'
if 'taskid' in request.session['requestParams']:
iquery['message__startswith'] = request.session['requestParams']['taskid']
if 'jeditaskid' in request.session['requestParams']:
iquery['message__icontains'] = "jeditaskid=%s" % request.session['requestParams']['jeditaskid']
if 'hours' not in request.session['requestParams']:
hours = 72
else:
hours = int(request.session['requestParams']['hours'])
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['bintime__range'] = [startdate, enddate]
records = Pandalog.objects.filter(**iquery).order_by('bintime').reverse()[:request.session['JOB_LIMIT']].values()
sites = {}
for record in records:
message = records['message']
print message
def taskprofileplot(request):
jeditaskid = 0
if 'jeditaskid' in request.GET: jeditaskid = int(request.GET['jeditaskid'])
image = None
if jeditaskid != 0:
dp = TaskProgressPlot.TaskProgressPlot()
image = dp.get_task_profile(taskid=jeditaskid)
if image is not None:
return HttpResponse(image, content_type="image/png")
else:
return HttpResponse('')
# response = HttpResponse(content_type="image/jpeg")
# red.save(response, "JPEG")
# return response
def taskESprofileplot(request):
jeditaskid = 0
if 'jeditaskid' in request.GET: jeditaskid = int(request.GET['jeditaskid'])
image = None
if jeditaskid != 0:
dp = TaskProgressPlot.TaskProgressPlot()
image = dp.get_es_task_profile(taskid=jeditaskid)
if image is not None:
return HttpResponse(image, content_type="image/png")
else:
return HttpResponse('')
# response = HttpResponse(content_type="image/jpeg")
# red.save(response, "JPEG")
# return response
def taskInfo(request, jeditaskid=0):
jeditaskid = int(jeditaskid)
valid, response = initRequest(request)
furl = request.get_full_path()
nomodeurl = removeParam(furl, 'mode')
nomodeurl = extensibleURL(request, nomodeurl)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "taskInfo")
if data is not None:
data = json.loads(data)
data['request'] = request
if data['eventservice'] == True:
response = render_to_response('taskInfoES.html', data, RequestContext(request))
else:
response = render_to_response('taskInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
if 'taskname' in request.session['requestParams'] and request.session['requestParams']['taskname'].find('*') >= 0:
return taskList(request)
setupView(request, hours=365 * 24, limit=999999999, querytype='task')
eventservice = False
query = {}
tasks = []
taskrec = None
colnames = []
columns = []
jobsummary = []
maxpss = []
walltime = []
jobsummaryESMerge = []
jobsummaryPMERGE = []
eventsdict=[]
objectStoreDict=[]
if 'jeditaskid' in request.session['requestParams']: jeditaskid = int(
request.session['requestParams']['jeditaskid'])
if jeditaskid != 0:
query = {'jeditaskid': jeditaskid}
tasks = JediTasks.objects.filter(**query).values()
if len(tasks) > 0:
if 'eventservice' in tasks[0] and tasks[0]['eventservice'] == 1: eventservice = True
if eventservice:
mode = 'drop'
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'drop': mode = 'drop'
if 'mode' in request.session['requestParams'] and request.session['requestParams'][
'mode'] == 'nodrop': mode = 'nodrop'
jobsummary, eventssummary, transactionKey, jobScoutIDs, hs06sSum, nevents, maxpss, walltime, sitepss, sitewalltime, maxpssf, walltimef, sitepssf, sitewalltimef, maxpsspercore, maxpssfpercore, hs06s, hs06sf, walltimeperevent = jobSummary2(
query, exclude={}, mode=mode, isEventServiceFlag=True, substatusfilter='non_es_merge')
jobsummaryESMerge, eventssummaryESM, transactionKeyESM, jobScoutIDsESM, hs06sSumESM, neventsESM, maxpssESM, walltimeESM, sitepssESM, sitewalltimeESM, maxpssfESM, walltimefESM, sitepssfESM, sitewalltimefESM, maxpsspercoreESM, maxpssfpercoreESM, hs06sESM, hs06sfESM, walltimepereventESM = jobSummary2(
query, exclude={}, mode=mode, isEventServiceFlag=True, substatusfilter='es_merge')
for state in eventservicestatelist:
eventstatus = {}
eventstatus['statusname'] = state
eventstatus['count'] = eventssummary[state]
eventsdict.append(eventstatus)
if mode=='nodrop':
sqlRequest = """select j.computingsite, j.COMPUTINGELEMENT,e.objstore_id,e.status,count(*) as nevents
from atlas_panda.jedi_events e
join
(select computingsite, computingelement,pandaid from ATLAS_PANDA.JOBSARCHIVED4 where jeditaskid=%s
UNION
select computingsite, computingelement,pandaid from ATLAS_PANDAARCH.JOBSARCHIVED where jeditaskid=%s
) j
on (e.jeditaskid=%s and e.pandaid=j.pandaid)
group by j.computingsite, j.COMPUTINGELEMENT, objstore_id, status""" % (
jeditaskid, jeditaskid, jeditaskid)
cur = connection.cursor()
cur.execute(sqlRequest)
ossummary = cur.fetchall()
cur.close()
ossummarynames = ['computingsite', 'computingelement', 'objectstoreid', 'statusindex', 'nevents']
objectStoreDict = [dict(zip(ossummarynames, row)) for row in ossummary]
for row in objectStoreDict: row['statusname'] = eventservicestatelist[row['statusindex']]
else:
## Exclude merge jobs. Can be misleading. Can show failures with no downstream successes.
exclude = {'processingtype': 'pmerge'}
mode = 'drop'
if 'mode' in request.session['requestParams']:
mode = request.session['requestParams']['mode']
jobsummary, eventssummary, transactionKey, jobScoutIDs, hs06sSum, nevents, maxpss, walltime, sitepss, sitewalltime, maxpssf, walltimef, sitepssf, sitewalltimef, maxpsspercore, maxpssfpercore, hs06s, hs06sf, walltimeperevent = jobSummary2(
query, exclude=exclude, mode=mode)
jobsummaryPMERGE, eventssummaryPM, transactionKeyPM, jobScoutIDsPMERGE, hs06sSumPMERGE, neventsPMERGE, maxpssPMERGE, walltimePMERGE, sitepssPMERGE, sitewalltimePMERGE, maxpssfPMERGE, walltimefPMERGE, sitepssfPMERGE, sitewalltimefPMERGE, maxpsspercorePMERGE, maxpssfpercorePMERGE, hs06sPMERGE, hs06sfPMERGE, walltimepereventPMERGE = jobSummary2(
query, exclude={}, mode=mode, processingtype='pmerge')
elif 'taskname' in request.session['requestParams']:
querybyname = {'taskname': request.session['requestParams']['taskname']}
tasks = JediTasks.objects.filter(**querybyname).values()
if len(tasks) > 0:
jeditaskid = tasks[0]['jeditaskid']
query = {'jeditaskid': jeditaskid}
nonzeroPMERGE = 0
for status in jobsummaryPMERGE:
if status['count'] > 0:
nonzeroPMERGE += 1
break
if nonzeroPMERGE == 0:
jobsummaryPMERGE = None
maxpssave = 0
maxpsscount = 0
for maxpssjob in maxpss:
if maxpssjob > 0:
maxpssave += maxpssjob
maxpsscount += 1
if maxpsscount > 0:
maxpssave = maxpssave / maxpsscount
else:
maxpssave = ''
tasks = cleanTaskList(request, tasks)
try:
taskrec = tasks[0]
colnames = taskrec.keys()
colnames.sort()
for k in colnames:
val = taskrec[k]
if taskrec[k] == None:
val = ''
continue
pair = {'name': k, 'value': val}
columns.append(pair)
except IndexError:
taskrec = None
taskpars = JediTaskparams.objects.filter(**query).values()[:1000]
jobparams = None
taskparams = None
taskparaml = None
jobparamstxt = []
if len(taskpars) > 0:
taskparams = taskpars[0]['taskparams']
try:
taskparams = json.loads(taskparams)
tpkeys = taskparams.keys()
tpkeys.sort()
taskparaml = []
for k in tpkeys:
rec = {'name': k, 'value': taskparams[k]}
taskparaml.append(rec)
jobparams = taskparams['jobParameters']
jobparams.append(taskparams['log'])
for p in jobparams:
if p['type'] == 'constant':
ptxt = p['value']
elif p['type'] == 'template':
ptxt = "<i>%s template:</i> value='%s' " % (p['param_type'], p['value'])
for v in p:
if v in ['type', 'param_type', 'value']: continue
ptxt += " %s='%s'" % (v, p[v])
else:
ptxt = '<i>unknown parameter type %s:</i> ' % p['type']
for v in p:
if v in ['type', ]: continue
ptxt += " %s='%s'" % (v, p[v])
jobparamstxt.append(ptxt)
jobparamstxt = sorted(jobparamstxt, key=lambda x: x.lower())
except ValueError:
pass
if taskrec and 'ticketsystemtype' in taskrec and taskrec['ticketsystemtype'] == '' and taskparams != None:
if 'ticketID' in taskparams: taskrec['ticketid'] = taskparams['ticketID']
if 'ticketSystemType' in taskparams: taskrec['ticketsystemtype'] = taskparams['ticketSystemType']
if taskrec:
taskname = taskrec['taskname']
elif 'taskname' in request.session['requestParams']:
taskname = request.session['requestParams']['taskname']
else:
taskname = ''
logtxt = None
if taskrec and taskrec['errordialog']:
mat = re.match('^.*"([^"]+)"', taskrec['errordialog'])
if mat:
errurl = mat.group(1)
cmd = "curl -s -f --compressed '%s'" % errurl
logpfx = u"logtxt: %s\n" % cmd
logout = commands.getoutput(cmd)
if len(logout) > 0: logtxt = logout
dsquery = {}
dsquery['jeditaskid'] = jeditaskid
dsets = JediDatasets.objects.filter(**dsquery).values()
dsinfo = None
nfiles = 0
nfinished = 0
nfailed = 0
neventsTot = 0
neventsUsedTot = 0
if len(dsets) > 0:
for ds in dsets:
if ds['type'] not in ['input', 'pseudo_input']: continue
if ds['masterid']: continue
if int(ds['nevents']) > 0:
neventsTot += int(ds['nevents'])
neventsUsedTot += int(ds['neventsused'])
if int(ds['nfiles']) > 0:
nfiles += int(ds['nfiles'])
nfinished += int(ds['nfilesfinished'])
nfailed += int(ds['nfilesfailed'])
dsets = sorted(dsets, key=lambda x: x['datasetname'].lower())
if nfiles > 0:
dsinfo = {}
dsinfo['nfiles'] = nfiles
dsinfo['nfilesfinished'] = nfinished
dsinfo['nfilesfailed'] = nfailed
dsinfo['pctfinished'] = int(100. * nfinished / nfiles)
dsinfo['pctfailed'] = int(100. * nfailed / nfiles)
if taskrec: taskrec['dsinfo'] = dsinfo
## get dataset types
dstypesd = {}
for ds in dsets:
dstype = ds['type']
if dstype not in dstypesd: dstypesd[dstype] = 0
dstypesd[dstype] += 1
dstkeys = dstypesd.keys()
dstkeys.sort()
dstypes = []
for dst in dstkeys:
dstd = {'type': dst, 'count': dstypesd[dst]}
dstypes.append(dstd)
## get input containers
inctrs = []
if taskparams and 'dsForIN' in taskparams:
inctrs = [taskparams['dsForIN'], ]
## get output containers
cquery = {}
cquery['jeditaskid'] = jeditaskid
cquery['type__in'] = ('output', 'log')
outctrs = []
outctrs.extend(JediDatasets.objects.filter(**cquery).values_list('containername', flat=True).distinct())
if len(outctrs) == 0 or outctrs[0] == '':
outctrs = None
if isinstance(outctrs, basestring):
outctrs = [outctrs]
# getBrokerageLog(request)
# neventsTot = 0
# neventsUsedTot = 0
if taskrec:
taskrec['totev'] = neventsTot
taskrec['totevproc'] = neventsUsedTot
taskrec['pctfinished'] = (100 * taskrec['totevproc'] / taskrec['totev']) if (taskrec['totev'] > 0) else ''
taskrec['totevhs06'] = (neventsTot) * taskrec['cputime'] if (
taskrec['cputime'] is not None and neventsTot > 0) else None
# if taskrec['pctfinished']<=20 or hs06sSum['total']==0:
# taskrec['totevhs06'] = (neventsTot)*taskrec['cputime'] if (taskrec['cputime'] is not None and neventsTot > 0) else None
# else:
# taskrec['totevhs06'] = int(hs06sSum['total']*neventsTot)
taskrec['totevprochs06'] = int(hs06sSum['finished'])
taskrec['failedevprochs06'] = int(hs06sSum['failed'])
taskrec['currenttotevhs06'] = int(hs06sSum['total'])
taskrec['maxpssave'] = maxpssave
if 'creationdate' in taskrec:
taskrec['kibanatimefrom'] = taskrec['creationdate'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
taskrec['kibanatimefrom'] = None
if taskrec['status'] in ['cancelled', 'failed', 'broken', 'aborted', 'finished', 'done']:
taskrec['kibanatimeto'] = taskrec['modificationtime'].strftime("%Y-%m-%dT%H:%M:%SZ")
else:
taskrec['kibanatimeto'] = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
tquery = {}
tquery['jeditaskid'] = jeditaskid
tquery['storagetoken__isnull'] = False
storagetoken = JediDatasets.objects.filter(**tquery).values('storagetoken')
taskbrokerage = 'prod_brokerage' if (taskrec != None and taskrec['tasktype'] == 'prod') else 'analy_brokerage'
if storagetoken:
if taskrec:
taskrec['destination'] = storagetoken[0]['storagetoken']
if (taskrec != None and taskrec['cloud'] == 'WORLD'):
taskrec['destination'] = taskrec['nucleus']
showtaskprof = False
countfailed = [val['count'] for val in jobsummary if val['name'] == 'finished']
if len(countfailed) > 0 and countfailed[0] > 0:
showtaskprof = True
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
del tasks
del columns
del ds
if taskrec:
taskrec['creationdate'] = taskrec['creationdate'].strftime(defaultDatetimeFormat)
taskrec['modificationtime'] = taskrec['modificationtime'].strftime(defaultDatetimeFormat)
taskrec['starttime'] = taskrec['starttime'].strftime(defaultDatetimeFormat)
taskrec['statechangetime'] = taskrec['statechangetime'].strftime(defaultDatetimeFormat)
for dset in dsets:
dset['creationtime'] = dset['creationtime'].strftime(defaultDatetimeFormat)
dset['modificationtime'] = dset['modificationtime'].strftime(defaultDatetimeFormat)
if dset['statechecktime'] is not None:
dset['statechecktime'] = dset['statechecktime'].strftime(defaultDatetimeFormat)
data = {
'task': taskrec,
'taskparams': taskparams,
'datasets': dsets,
}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
else:
attrs = []
do_redirect = False
try:
if int(jeditaskid) > 0 and int(jeditaskid) < 4000000:
do_redirect = True
except:
pass
if taskrec:
attrs.append({'name': 'Status', 'value': taskrec['status']})
del request.session['TFIRST']
del request.session['TLAST']
data = {
'furl': furl,
'nomodeurl': nomodeurl,
'mode': mode,
'showtaskprof': showtaskprof,
'jobsummaryESMerge': jobsummaryESMerge,
'jobsummaryPMERGE': jobsummaryPMERGE,
'nevents' : nevents,
'maxpss': maxpss,
'taskbrokerage': taskbrokerage,
'walltime': walltime,
'sitepss': json.dumps(sitepss),
'sitewalltime': json.dumps(sitewalltime),
'maxpssf': maxpssf,
'walltimef': walltimef,
'sitepssf': json.dumps(sitepssf),
'sitewalltimef': json.dumps(sitewalltimef),
'maxpsspercore': maxpsspercore,
'maxpssfpercore': maxpssfpercore,
'hs06s': hs06s,
'hs06sf': hs06sf,
'walltimeperevent': walltimeperevent,
'jobscoutids' : jobScoutIDs,
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'task': taskrec,
'taskname': taskname,
'taskparams': taskparams,
'taskparaml': taskparaml,
'jobparams': jobparamstxt,
'columns': columns,
'attrs': attrs,
'jobsummary': jobsummary,
'eventssummary': eventsdict,
'ossummary': objectStoreDict,
'jeditaskid': jeditaskid,
'logtxt': logtxt,
'datasets': dsets,
'dstypes': dstypes,
'inctrs': inctrs,
'outctrs': outctrs,
'vomode': VOMODE,
'eventservice': eventservice,
'tk': transactionKey,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
setCacheEntry(request, "taskInfo", json.dumps(data, cls=DateEncoder), 60 * 20)
##self monitor
endSelfMonitor(request)
if eventservice:
response = render_to_response('taskInfoES.html', data, RequestContext(request))
else:
response = render_to_response('taskInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def taskchain(request):
valid, response = initRequest(request)
jeditaskid = -1
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid == -1:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
new_cur = connection.cursor()
taskChainSQL = "SELECT * FROM table(ATLAS_PANDABIGMON.GETTASKSCHAIN_TEST(%i))" % jeditaskid
new_cur.execute(taskChainSQL)
taskChain = new_cur.fetchall()
results = ["".join(map(str, r)) for r in taskChain]
ts = "".join(results)
data = {
'viewParams': request.session['viewParams'],
'taskChain': ts,
'jeditaskid': jeditaskid
}
response = render_to_response('taskchain.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def ganttTaskChain(request):
from django.db import connections
valid, response = initRequest(request)
jeditaskid = -1
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid == -1:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
new_cur = connections["deft_adcr"].cursor()
sql_request_str = chainsql.query.replace('%i', str(jeditaskid))
new_cur.execute(sql_request_str)
results = new_cur.fetchall()
results_list = ["".join(map(str, r)) for r in results]
results_str = results_list[0].replace("\n", "")
substr_end = results_str.index(">")
data = {
'viewParams': request.session['viewParams'],
'ganttTaskChain': results_str[substr_end+1:],
'jeditaskid': jeditaskid,
'request': request,
}
response = render_to_response('ganttTaskChain.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
def jobSummary2(query, exclude={}, mode='drop', isEventServiceFlag=False, substatusfilter='', processingtype=''):
jobs = []
jobScoutIDs = {}
jobScoutIDs['cputimescoutjob'] = []
jobScoutIDs['walltimescoutjob'] = []
newquery = copy.deepcopy(query)
isESMerge = False
if substatusfilter != '':
if (substatusfilter == 'es_merge'):
newquery['eventservice'] = 2
isESMerge = True
else:
exclude['eventservice'] = 2
isReturnDroppedPMerge=False
if processingtype != '':
newquery['processingtype'] = 'pmerge'
isReturnDroppedPMerge=True
values = 'actualcorecount', 'eventservice', 'specialhandling', 'modificationtime', 'jobsubstatus', 'pandaid', 'jobstatus', 'jeditaskid', 'processingtype', 'maxpss', 'starttime', 'endtime', 'computingsite', 'jobsetid', 'jobmetrics', 'nevents', 'hs06', 'hs06sec'
# newquery['jobstatus'] = 'finished'
# Here we apply sort for implem rule about two jobs in Jobsarchived and Jobsarchived4 with 'finished' and closed statuses
# print str(datetime.now())
jobs.extend(Jobsarchived.objects.filter(**newquery).exclude(**exclude).values(*values))
jobs.extend(Jobsdefined4.objects.filter(**newquery).exclude(**exclude).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**newquery).exclude(**exclude).values(*values))
jobs.extend(Jobsactive4.objects.filter(**newquery).exclude(**exclude).values(*values))
jobs.extend(Jobsarchived4.objects.filter(**newquery).exclude(**exclude).values(*values))
# print str(datetime.now())
jobsSet = {}
newjobs = []
hs06sSum = {'finished': 0, 'failed': 0, 'total': 0}
cpuTimeCurrent = []
for job in jobs:
if not job['pandaid'] in jobsSet:
jobsSet[job['pandaid']] = job['jobstatus']
newjobs.append(job)
elif jobsSet[job['pandaid']] == 'closed' and job['jobstatus'] == 'finished':
jobsSet[job['pandaid']] = job['jobstatus']
newjobs.append(job)
if 'scout=cpuTime' in job['jobmetrics'] or (
'scout=' in job['jobmetrics'] and 'cpuTime' in job['jobmetrics'][job['jobmetrics'].index('scout='):]):
jobScoutIDs['cputimescoutjob'].append(job['pandaid'])
if 'scout=ioIntensity' in job['jobmetrics']:
jobScoutIDs['iointensityscoutjob'] = job['pandaid']
if 'scout=outDiskCount' in job['jobmetrics']:
jobScoutIDs['outdiskcountscoutjob'] = job['pandaid']
if 'scout=ramCount' in job['jobmetrics'] or (
'scout=' in job['jobmetrics'] and 'ramCount' in job['jobmetrics'][job['jobmetrics'].index('scout='):]):
jobScoutIDs['ramcountscoutjob'] = job['pandaid']
if 'scout=walltime' in job['jobmetrics'] or (
'scout=' in job['jobmetrics'] and 'walltime' in job['jobmetrics'][job['jobmetrics'].index('scout='):]):
jobScoutIDs['walltimescoutjob'].append(job['pandaid'])
if 'actualcorecount' in job and job['actualcorecount'] is None:
job['actualcorecount'] = 1
if job['jobstatus'] in ['finished', 'failed'] and 'endtime' in job and 'starttime' in job and job[
'starttime'] and job['endtime']:
duration = max(job['endtime'] - job['starttime'], timedelta(seconds=0))
job['duration'] = duration.days * 24 * 3600 + duration.seconds
if job['hs06sec'] is None:
if job['computingsite'] in pandaSites:
job['hs06sec'] = (job['duration']) * float(pandaSites[job['computingsite']]['corepower']) * job[
'actualcorecount']
else:
job['hs06sec'] = 0
if job['nevents'] and job['nevents'] > 0:
cpuTimeCurrent.append(job['hs06sec'] / job['nevents'])
job['walltimeperevent'] = job['duration'] * job['actualcorecount'] / job['nevents']
hs06sSum['finished'] += job['hs06sec'] if job['jobstatus'] == 'finished' else 0
hs06sSum['failed'] += job['hs06sec'] if job['jobstatus'] == 'failed' else 0
hs06sSum['total'] += job['hs06sec']
jobs = newjobs
if mode == 'drop' and len(jobs) < 300000:
print 'filtering retries'
jobs, droplist, droppedPMerge = dropRetrielsJobs(jobs, newquery['jeditaskid'], isReturnDroppedPMerge)
nevents = []
maxpss = []
maxpsspercore = []
walltime = []
sitepss = []
sitewalltime = []
maxpssf = []
maxpssfpercore = []
walltimef = []
sitepssf = []
sitewalltimef = []
hs06s = []
hs06sf = []
walltimeperevent = []
for job in jobs:
if job['actualcorecount'] is None:
job['actualcorecount'] = 1
if job['maxpss'] is not None and job['maxpss'] != -1:
if job['jobstatus'] == 'finished':
maxpss.append(job['maxpss'] / 1024)
if job['actualcorecount'] and job['actualcorecount']>0:
maxpsspercore.append(job['maxpss'] / 1024 / job['actualcorecount'])
sitepss.append(job['computingsite'])
nevents.append(job['nevents'])
if job['jobstatus'] == 'failed':
maxpssf.append(job['maxpss'] / 1024)
if job['actualcorecount'] and job['actualcorecount']>0:
maxpssfpercore.append(job['maxpss'] / 1024 / job['actualcorecount'])
sitepssf.append(job['computingsite'])
if 'duration' in job and job['duration']:
if job['jobstatus'] == 'finished':
walltime.append(job['duration'])
sitewalltime.append(job['computingsite'])
hs06s.append(job['hs06sec'])
if 'walltimeperevent' in job:
walltimeperevent.append(job['walltimeperevent'])
if job['jobstatus'] == 'failed':
walltimef.append(job['duration'])
sitewalltimef.append(job['computingsite'])
hs06sf.append(job['hs06sec'])
jobstates = []
global statelist
for state in statelist:
statecount = {}
statecount['name'] = state
statecount['count'] = 0
for job in jobs:
# if isEventService and job['jobstatus'] == 'cancelled':
# job['jobstatus'] = 'finished'
if job['jobstatus'] == state:
statecount['count'] += 1
continue
jobstates.append(statecount)
essummary = dict((key, 0) for key in eventservicestatelist)
transactionKey = -1
if isEventServiceFlag and not isESMerge:
print 'getting events states summary'
if mode == 'drop':
esjobs = []
for job in jobs:
esjobs.append(job['pandaid'])
random.seed()
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG"
else:
tmpTableName = "TMP_IDS1DEBUG"
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in esjobs:
executionData.append((id, transactionKey, timezone.now().strftime(defaultDatetimeFormat) ))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY,INS_TIME) VALUES (%s, %s, %s)"""
new_cur.executemany(query, executionData)
# connection.commit()
new_cur.execute(
"""
SELECT /*+ dynamic_sampling(TMP_IDS1 0) cardinality(TMP_IDS1 10) INDEX_RS_ASC(ev JEDI_EVENTS_PANDAID_STATUS_IDX) NO_INDEX_FFS(ev JEDI_EVENTS_PK) NO_INDEX_SS(ev JEDI_EVENTS_PK) */ SUM(DEF_MAX_EVENTID-DEF_MIN_EVENTID+1) AS EVCOUNT, STATUS FROM ATLAS_PANDA.JEDI_EVENTS ev, %s WHERE TRANSACTIONKEY=%i AND PANDAID = ID GROUP BY STATUS
""" % (tmpTableName, transactionKey)
)
evtable = dictfetchall(new_cur)
# new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
# connection.commit()
# connection.leave_transaction_management()
for ev in evtable:
essummary[eventservicestatelist[ev['STATUS']]] += ev['EVCOUNT']
eventsdict=[]
if mode == 'nodrop':
equery = {'jeditaskid': newquery['jeditaskid']}
eventsdict.extend(
JediEvents.objects.filter(**equery).values('status').annotate(count=Count('status')).order_by('status'))
for state in eventsdict:
essummary[eventservicestatelist[state['status']]]=state['count']
return jobstates, essummary, transactionKey, jobScoutIDs, hs06sSum, nevents, maxpss, walltime, sitepss, sitewalltime, maxpssf, walltimef, sitepssf, sitewalltimef, maxpsspercore, maxpssfpercore, hs06s, hs06sf, walltimeperevent
def jobStateSummary(jobs):
global statelist
statecount = {}
for state in statelist:
statecount[state] = 0
for job in jobs:
statecount[job['jobstatus']] += 1
return statecount
def errorSummaryDict(request, jobs, tasknamedict, testjobs):
""" take a job list and produce error summaries from it """
errsByCount = {}
errsBySite = {}
errsByUser = {}
errsByTask = {}
sumd = {}
## histogram of errors vs. time, for plotting
errHist = {}
flist = ['cloud', 'computingsite', 'produsername', 'taskid', 'jeditaskid', 'processingtype', 'prodsourcelabel',
'transformation', 'workinggroup', 'specialhandling', 'jobstatus']
print len(jobs)
for job in jobs:
if not testjobs:
if job['jobstatus'] not in ['failed', 'holding']: continue
site = job['computingsite']
# if 'cloud' in request.session['requestParams']:
# if site in homeCloud and homeCloud[site] != request.session['requestParams']['cloud']: continue
user = job['produsername']
taskname = ''
if job['jeditaskid'] > 0:
taskid = job['jeditaskid']
if taskid in tasknamedict:
taskname = tasknamedict[taskid]
tasktype = 'jeditaskid'
else:
taskid = job['taskid']
if taskid in tasknamedict:
taskname = tasknamedict[taskid]
tasktype = 'taskid'
if 'modificationtime' in job and job['jobstatus'] == 'failed':
tm = job['modificationtime']
if tm is not None:
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in errHist: errHist[tm] = 1
else:
errHist[tm] += 1
## Overall summary
for f in flist:
if job[f]:
if f == 'taskid' and job[f] < 1000000 and 'produsername' not in request.session['requestParams']:
pass
else:
if not f in sumd: sumd[f] = {}
if not job[f] in sumd[f]: sumd[f][job[f]] = 0
sumd[f][job[f]] += 1
if job['specialhandling']:
if not 'specialhandling' in sumd: sumd['specialhandling'] = {}
shl = job['specialhandling'].split()
for v in shl:
if not v in sumd['specialhandling']: sumd['specialhandling'][v] = 0
sumd['specialhandling'][v] += 1
for err in errorcodelist:
if job[err['error']] != 0 and job[err['error']] != '' and job[err['error']] != None:
errval = job[err['error']]
## error code of zero is not an error
if errval == 0 or errval == '0' or errval == None: continue
errdiag = ''
try:
errnum = int(errval)
if err['error'] in errorCodes and errnum in errorCodes[err['error']]:
errdiag = errorCodes[err['error']][errnum]
except:
errnum = errval
errcode = "%s:%s" % (err['name'], errnum)
if err['diag']:
errdiag = job[err['diag']]
if errcode not in errsByCount:
errsByCount[errcode] = {}
errsByCount[errcode]['error'] = errcode
errsByCount[errcode]['codename'] = err['error']
errsByCount[errcode]['codeval'] = errnum
errsByCount[errcode]['diag'] = errdiag
errsByCount[errcode]['count'] = 0
errsByCount[errcode]['count'] += 1
if user not in errsByUser:
errsByUser[user] = {}
errsByUser[user]['name'] = user
errsByUser[user]['errors'] = {}
errsByUser[user]['toterrors'] = 0
if errcode not in errsByUser[user]['errors']:
errsByUser[user]['errors'][errcode] = {}
errsByUser[user]['errors'][errcode]['error'] = errcode
errsByUser[user]['errors'][errcode]['codename'] = err['error']
errsByUser[user]['errors'][errcode]['codeval'] = errnum
errsByUser[user]['errors'][errcode]['diag'] = errdiag
errsByUser[user]['errors'][errcode]['count'] = 0
errsByUser[user]['errors'][errcode]['count'] += 1
errsByUser[user]['toterrors'] += 1
if site not in errsBySite:
errsBySite[site] = {}
errsBySite[site]['name'] = site
errsBySite[site]['errors'] = {}
errsBySite[site]['toterrors'] = 0
errsBySite[site]['toterrjobs'] = 0
if errcode not in errsBySite[site]['errors']:
errsBySite[site]['errors'][errcode] = {}
errsBySite[site]['errors'][errcode]['error'] = errcode
errsBySite[site]['errors'][errcode]['codename'] = err['error']
errsBySite[site]['errors'][errcode]['codeval'] = errnum
errsBySite[site]['errors'][errcode]['diag'] = errdiag
errsBySite[site]['errors'][errcode]['count'] = 0
errsBySite[site]['errors'][errcode]['count'] += 1
errsBySite[site]['toterrors'] += 1
if tasktype == 'jeditaskid' or taskid > 1000000 or 'produsername' in request.session['requestParams']:
if taskid not in errsByTask:
errsByTask[taskid] = {}
errsByTask[taskid]['name'] = taskid
errsByTask[taskid]['longname'] = taskname
errsByTask[taskid]['errors'] = {}
errsByTask[taskid]['toterrors'] = 0
errsByTask[taskid]['toterrjobs'] = 0
errsByTask[taskid]['tasktype'] = tasktype
if errcode not in errsByTask[taskid]['errors']:
errsByTask[taskid]['errors'][errcode] = {}
errsByTask[taskid]['errors'][errcode]['error'] = errcode
errsByTask[taskid]['errors'][errcode]['codename'] = err['error']
errsByTask[taskid]['errors'][errcode]['codeval'] = errnum
errsByTask[taskid]['errors'][errcode]['diag'] = errdiag
errsByTask[taskid]['errors'][errcode]['count'] = 0
errsByTask[taskid]['errors'][errcode]['count'] += 1
errsByTask[taskid]['toterrors'] += 1
if site in errsBySite: errsBySite[site]['toterrjobs'] += 1
if taskid in errsByTask: errsByTask[taskid]['toterrjobs'] += 1
## reorganize as sorted lists
errsByCountL = []
errsBySiteL = []
errsByUserL = []
errsByTaskL = []
kys = errsByCount.keys()
kys.sort()
for err in kys:
errsByCountL.append(errsByCount[err])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsByCountL = sorted(errsByCountL, key=lambda x: -x['count'])
kys = errsByUser.keys()
kys.sort()
for user in kys:
errsByUser[user]['errorlist'] = []
errkeys = errsByUser[user]['errors'].keys()
errkeys.sort()
for err in errkeys:
errsByUser[user]['errorlist'].append(errsByUser[user]['errors'][err])
errsByUserL.append(errsByUser[user])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsByUserL = sorted(errsByUserL, key=lambda x: -x['toterrors'])
kys = errsBySite.keys()
kys.sort()
for site in kys:
errsBySite[site]['errorlist'] = []
errkeys = errsBySite[site]['errors'].keys()
errkeys.sort()
for err in errkeys:
errsBySite[site]['errorlist'].append(errsBySite[site]['errors'][err])
errsBySiteL.append(errsBySite[site])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsBySiteL = sorted(errsBySiteL, key=lambda x: -x['toterrors'])
kys = errsByTask.keys()
kys.sort()
for taskid in kys:
errsByTask[taskid]['errorlist'] = []
errkeys = errsByTask[taskid]['errors'].keys()
errkeys.sort()
for err in errkeys:
errsByTask[taskid]['errorlist'].append(errsByTask[taskid]['errors'][err])
errsByTaskL.append(errsByTask[taskid])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
errsByTaskL = sorted(errsByTaskL, key=lambda x: -x['toterrors'])
suml = []
for f in sumd:
itemd = {}
itemd['field'] = f
iteml = []
kys = sumd[f].keys()
kys.sort()
for ky in kys:
iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x: x['field'])
if 'sortby' in request.session['requestParams'] and request.session['requestParams']['sortby'] == 'count':
for item in suml:
item['list'] = sorted(item['list'], key=lambda x: -x['kvalue'])
kys = errHist.keys()
kys.sort()
errHistL = []
for k in kys:
errHistL.append([k, errHist[k]])
return errsByCountL, errsBySiteL, errsByUserL, errsByTaskL, suml, errHistL
def getTaskName(tasktype, taskid):
taskname = ''
if tasktype == 'taskid':
taskname = ''
elif tasktype == 'jeditaskid' and taskid and taskid != 'None':
tasks = JediTasks.objects.filter(jeditaskid=taskid).values('taskname')
if len(tasks) > 0:
taskname = tasks[0]['taskname']
return taskname
tcount = {}
lock = Lock()
def totalCount(panJobList, query, wildCardExtension,dkey):
print 'Thread started'
lock.acquire()
try:
tcount.setdefault(dkey,[])
for panJob in panJobList:
tcount[dkey].append(panJob.objects.filter(**query).extra(where=[wildCardExtension]).count())
finally:
lock.release()
print 'Thread finished'
def digkey (rq):
sk = rq.session.session_key
qt = rq.session['qtime']
if sk is None:
sk = random.randrange(1000000)
hashkey = hashlib.sha256(str(sk) + ' ' + qt)
return hashkey.hexdigest()
def errorSummary(request):
valid, response = initRequest(request)
dkey = digkey(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "errorSummary")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('errorSummary.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
testjobs = False
if 'prodsourcelabel' in request.session['requestParams'] and request.session['requestParams'][
'prodsourcelabel'].lower().find('test') >= 0:
testjobs = True
jobtype = ''
if 'jobtype' in request.session['requestParams']:
jobtype = request.session['requestParams']['jobtype']
elif '/analysis' in request.path:
jobtype = 'analysis'
elif '/production' in request.path:
jobtype = 'production'
elif testjobs:
jobtype = 'rc_test'
if jobtype == '':
hours = 3
limit = 100000
elif jobtype.startswith('anal'):
hours = 6
limit = 100000
elif 'JOB_LIMIT' in request.session:
hours = 6
limit = request.session['JOB_LIMIT']
else:
hours = 12
limit = 100000
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
xurlsubst = extensibleURL(request)
xurlsubstNoSite = xurlsubst
# Preprocess request to cover all sites for cloud to view jobs assigned to the World
if ('cloud' in request.session['requestParams']) and ('computingsite' not in request.session['requestParams']) and (
request.session['requestParams']['cloud'] != 'WORLD') and (
'|' not in request.session['requestParams']['cloud']):
cloud = request.session['requestParams']['cloud']
del request.session['requestParams']['cloud']
sites = set([site['site'] for site in pandaSites.values() if site['cloud'] == cloud])
siteStr = ""
for site in sites:
siteStr += "|" + site
siteStr = siteStr[1:]
request.session['requestParams']['computingsite'] = siteStr
# this substitution is nessessary to propagate update in the xurl
updatedRequest = ""
updatedRequestNoSite = ""
for param in request.session['requestParams']:
updatedRequest += '&' + param + '=' + request.session['requestParams'][param]
if param != 'computingsite':
updatedRequestNoSite += '&' + param + '=' + request.session['requestParams'][param]
updatedRequest = updatedRequest[1:]
updatedRequestNoSite = updatedRequestNoSite[1:]
xurlsubst = '/errors/?' + updatedRequest + '&'
xurlsubstNoSite = '/errors/?' + updatedRequestNoSite + '&'
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=limit, wildCardExt=True)
if not testjobs: query['jobstatus__in'] = ['failed', 'holding']
jobs = []
values = 'eventservice', 'produsername', 'pandaid', 'cloud', 'computingsite', 'cpuconsumptiontime', 'jobstatus', 'transformation', 'prodsourcelabel', 'specialhandling', 'vo', 'modificationtime', 'atlasrelease', 'jobsetid', 'processingtype', 'workinggroup', 'jeditaskid', 'taskid', 'starttime', 'endtime', 'brokerageerrorcode', 'brokerageerrordiag', 'ddmerrorcode', 'ddmerrordiag', 'exeerrorcode', 'exeerrordiag', 'jobdispatchererrorcode', 'jobdispatchererrordiag', 'piloterrorcode', 'piloterrordiag', 'superrorcode', 'superrordiag', 'taskbuffererrorcode', 'taskbuffererrordiag', 'transexitcode', 'destinationse', 'currentpriority', 'computingelement'
print "step3-1"
print str(datetime.now())
if testjobs:
jobs.extend(
Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(
*values))
jobs.extend(
Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(
*values))
listJobs = Jobsactive4, Jobsarchived4, Jobsdefined4, Jobswaiting4
jobs.extend(
Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(
*values))
jobs.extend(
Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(
*values))
if (((datetime.now() - datetime.strptime(query['modificationtime__range'][0], "%Y-%m-%d %H:%M:%S")).days > 1) or \
((datetime.now() - datetime.strptime(query['modificationtime__range'][1],
"%Y-%m-%d %H:%M:%S")).days > 1)):
jobs.extend(
Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values(
*values))
listJobs = Jobsactive4, Jobsarchived4, Jobsdefined4, Jobswaiting4, Jobsarchived
thread = Thread(target=totalCount, args=(listJobs, query, wildCardExtension,dkey))
thread.start()
print "step3-1-0"
print str(datetime.now())
jobs = cleanJobList(request, jobs, mode='nodrop', doAddMeta=False)
njobs = len(jobs)
tasknamedict = taskNameDict(jobs)
print "step3-1-1"
print str(datetime.now())
## Build the error summary.
errsByCount, errsBySite, errsByUser, errsByTask, sumd, errHist = errorSummaryDict(request, jobs, tasknamedict,
testjobs)
## Build the state summary and add state info to site error summary
# notime = True
# if testjobs: notime = False
notime = False #### behave as it used to before introducing notime for dashboards. Pull only 12hrs.
statesummary = dashSummary(request, hours, limit=limit, view=jobtype, cloudview='region', notime=notime)
sitestates = {}
savestates = ['finished', 'failed', 'cancelled', 'holding', ]
for cloud in statesummary:
for site in cloud['sites']:
sitename = cloud['sites'][site]['name']
sitestates[sitename] = {}
for s in savestates:
sitestates[sitename][s] = cloud['sites'][site]['states'][s]['count']
sitestates[sitename]['pctfail'] = cloud['sites'][site]['pctfail']
for site in errsBySite:
sitename = site['name']
if sitename in sitestates:
for s in savestates:
if s in sitestates[sitename]: site[s] = sitestates[sitename][s]
if 'pctfail' in sitestates[sitename]: site['pctfail'] = sitestates[sitename]['pctfail']
taskname = ''
if not testjobs:
## Build the task state summary and add task state info to task error summary
print "step3-1-2"
print str(datetime.now())
taskstatesummary = dashTaskSummary(request, hours, limit=limit, view=jobtype)
print "step3-2"
print str(datetime.now())
taskstates = {}
for task in taskstatesummary:
taskid = task['taskid']
taskstates[taskid] = {}
for s in savestates:
taskstates[taskid][s] = task['states'][s]['count']
if 'pctfail' in task: taskstates[taskid]['pctfail'] = task['pctfail']
for task in errsByTask:
taskid = task['name']
if taskid in taskstates:
for s in savestates:
if s in taskstates[taskid]: task[s] = taskstates[taskid][s]
if 'pctfail' in taskstates[taskid]: task['pctfail'] = taskstates[taskid]['pctfail']
if 'jeditaskid' in request.session['requestParams']:
taskname = getTaskName('jeditaskid', request.session['requestParams']['jeditaskid'])
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
else:
sortby = 'alpha'
flowstruct = buildGoogleFlowDiagram(request, jobs=jobs)
print "step3-3"
print str(datetime.now())
thread.join()
jobsErrorsTotalCount = sum(tcount[dkey])
print dkey
print tcount[dkey]
del tcount[dkey]
print tcount
print jobsErrorsTotalCount
listPar =[]
for key, val in request.session['requestParams'].iteritems():
if (key!='limit' and key!='display_limit'):
listPar.append(key + '=' + str(val))
if len(listPar)>0:
urlParametrs = '&'.join(listPar)+'&'
else:
urlParametrs = None
print listPar
del listPar
if (math.fabs(njobs-jobsErrorsTotalCount)<1000):
jobsErrorsTotalCount=None
else:
jobsErrorsTotalCount = int(math.ceil((jobsErrorsTotalCount+10000)/10000)*10000)
request.session['max_age_minutes'] = 6
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
nosorturl = removeParam(request.get_full_path(), 'sortby')
xurl = extensibleURL(request)
jobsurl = xurlsubst.replace('/errors/', '/jobs/')
jobsurlNoSite = xurlsubstNoSite.replace('/errors/', '')
TFIRST = request.session['TFIRST']
TLAST = request.session['TLAST']
del request.session['TFIRST']
del request.session['TLAST']
data = {
'prefix': getPrefix(request),
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'requestString': urlParametrs,
'jobtype': jobtype,
'njobs': njobs,
'hours': LAST_N_HOURS_MAX,
'limit': limit,
'user': None,
'xurl': xurl,
'xurlsubst': xurlsubst,
'xurlsubstNoSite': xurlsubstNoSite,
'jobsurlNoSite': jobsurlNoSite,
'jobsurl': jobsurl,
'nosorturl': nosorturl,
'errsByCount': errsByCount,
'errsBySite': errsBySite,
'errsByUser': errsByUser,
'errsByTask': errsByTask,
'sumd': sumd,
'errHist': errHist,
'tfirst': TFIRST,
'tlast': TLAST,
'sortby': sortby,
'taskname': taskname,
'flowstruct': flowstruct,
'jobsErrorsTotalCount': jobsErrorsTotalCount,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
setCacheEntry(request, "errorSummary", json.dumps(data, cls=DateEncoder), 60 * 20)
##self monitor
endSelfMonitor(request)
response = render_to_response('errorSummary.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
for job in jobs:
resp.append({'pandaid': job.pandaid, 'status': job.jobstatus, 'prodsourcelabel': job.prodsourcelabel,
'produserid': job.produserid})
return HttpResponse(json.dumps(resp), content_type='text/html')
def removeParam(urlquery, parname, mode='complete'):
"""Remove a parameter from current query"""
urlquery = urlquery.replace('&&', '&')
urlquery = urlquery.replace('?&', '?')
pstr = '.*(%s=[a-zA-Z0-9\.\-]*).*' % parname
pat = re.compile(pstr)
mat = pat.match(urlquery)
if mat:
pstr = mat.group(1)
urlquery = urlquery.replace(pstr, '')
urlquery = urlquery.replace('&&', '&')
urlquery = urlquery.replace('?&', '?')
if mode != 'extensible':
if urlquery.endswith('?') or urlquery.endswith('&'): urlquery = urlquery[:len(urlquery) - 1]
return urlquery
def incidentList(request):
valid, response = initRequest(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "incidents")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('incidents.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
if 'days' in request.session['requestParams']:
hours = int(request.session['requestParams']['days']) * 24
else:
if 'hours' not in request.session['requestParams']:
hours = 24 * 3
else:
hours = int(request.session['requestParams']['hours'])
setupView(request, hours=hours, limit=9999999)
iquery = {}
cloudQuery = Q()
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['at_time__range'] = [startdate, enddate]
if 'site' in request.session['requestParams']:
iquery['description__contains'] = 'queue=%s' % request.session['requestParams']['site']
if 'category' in request.session['requestParams']:
iquery['description__startswith'] = '%s:' % request.session['requestParams']['category']
if 'comment' in request.session['requestParams']:
iquery['description__contains'] = '%s' % request.session['requestParams']['comment']
if 'notifier' in request.session['requestParams']:
iquery['description__contains'] = 'DN=%s' % request.session['requestParams']['notifier']
if 'cloud' in request.session['requestParams']:
sites = [site for site, cloud in homeCloud.items() if cloud == request.session['requestParams']['cloud']]
for site in sites:
cloudQuery = cloudQuery | Q(description__contains='queue=%s' % site)
incidents = []
incidents.extend(Incidents.objects.filter(**iquery).filter(cloudQuery).order_by('at_time').reverse().values())
sumd = {}
pars = {}
incHist = {}
for inc in incidents:
desc = inc['description']
desc = desc.replace(' ', ' ')
parsmat = re.match('^([a-z\s]+):\s+queue=([^\s]+)\s+DN=(.*)\s\s\s*([A-Za-z^ \.0-9]*)$', desc)
tm = inc['at_time']
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in incHist: incHist[tm] = 0
incHist[tm] += 1
if parsmat:
pars['category'] = parsmat.group(1)
pars['site'] = parsmat.group(2)
pars['notifier'] = parsmat.group(3)
pars['type'] = inc['typekey']
if homeCloud.has_key(pars['site']):
pars['cloud'] = homeCloud[pars['site']]
if parsmat.group(4): pars['comment'] = parsmat.group(4)
else:
parsmat = re.match('^([A-Za-z\s]+):.*$', desc)
if parsmat:
pars['category'] = parsmat.group(1)
else:
pars['category'] = desc[:10]
for p in pars:
if p not in sumd:
sumd[p] = {}
sumd[p]['param'] = p
sumd[p]['vals'] = {}
if pars[p] not in sumd[p]['vals']:
sumd[p]['vals'][pars[p]] = {}
sumd[p]['vals'][pars[p]]['name'] = pars[p]
sumd[p]['vals'][pars[p]]['count'] = 0
sumd[p]['vals'][pars[p]]['count'] += 1
## convert incident components to URLs. Easier here than in the template.
if 'site' in pars:
inc['description'] = re.sub('queue=[^\s]+', 'queue=<a href="%ssite=%s">%s</a>' % (
extensibleURL(request), pars['site'], pars['site']), inc['description'])
inc['at_time'] = inc['at_time'].strftime(defaultDatetimeFormat)
## convert to ordered lists
suml = []
for p in sumd:
itemd = {}
itemd['param'] = p
iteml = []
kys = sumd[p]['vals'].keys()
kys.sort(key=lambda y: y.lower())
for ky in kys:
iteml.append({'kname': ky, 'kvalue': sumd[p]['vals'][ky]['count']})
itemd['list'] = iteml
suml.append(itemd)
suml = sorted(suml, key=lambda x: x['param'].lower())
kys = incHist.keys()
kys.sort()
incHistL = []
for k in kys:
incHistL.append([k, incHist[k]])
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'user': None,
'incidents': incidents,
'sumd': suml,
'incHist': incHistL,
'xurl': extensibleURL(request),
'hours': hours,
'ninc': len(incidents),
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
setCacheEntry(request, "incidents", json.dumps(data, cls=DateEncoder), 60 * 20)
response = render_to_response('incidents.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
clearedInc = []
for inc in incidents:
entry = {}
entry['at_time'] = inc['at_time'].isoformat()
entry['typekey'] = inc['typekey']
entry['description'] = inc['description']
clearedInc.append(entry)
jsonResp = json.dumps(clearedInc)
return HttpResponse(jsonResp, content_type='text/html')
def esPandaLogger(request):
valid, response = initRequest(request)
if not valid: return response
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q
es = Elasticsearch(
hosts=[{'host': 'aianalytics01.cern.ch', 'port': 9200}],
use_ssl=False,
retry_on_timeout=True,
max_retries=3
)
today = time.strftime("%Y-%m-%d")
logindex = 'pandalogger-' + str(today)
logindexdev = 'pandaloggerdev-' + str(today)
# check if dev index exists
indexdev = es.indices.exists(index=logindexdev)
if indexdev:
indices = [logindex, logindexdev]
else:
indices = [logindex]
res = es.search(index=indices, fields=['@message.name', '@message.Type', '@message.levelname'], body={
"aggs": {
"name": {
"terms": {"field": "@message.name"},
"aggs": {
"type": {
"terms": {"field": "@message.Type"},
"aggs": {
"levelname": {
"terms": {"field": "@message.levelname"}
}
}
}
}
}
}
}
)
log = {}
for agg in res['aggregations']['name']['buckets']:
name = agg['key']
log[name] = {}
for types in agg['type']['buckets']:
type = types['key']
log[name][type] = {}
for levelnames in types['levelname']['buckets']:
levelname = levelnames['key']
log[name][type][levelname] = {}
log[name][type][levelname]['levelname'] = levelname
log[name][type][levelname]['lcount'] = str(levelnames['doc_count'])
# print log
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'user': None,
'log': log,
}
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
response = render_to_response('esPandaLogger.html', data, RequestContext(request))
return response
def pandaLogger(request):
valid, response = initRequest(request)
if not valid: return response
getrecs = False
iquery = {}
if 'category' in request.session['requestParams']:
iquery['name'] = request.session['requestParams']['category']
getrecs = True
if 'type' in request.session['requestParams']:
val = escapeInput(request.session['requestParams']['type'])
iquery['type__in'] = val.split('|')
getrecs = True
if 'level' in request.session['requestParams']:
iquery['levelname'] = request.session['requestParams']['level'].upper()
getrecs = True
if 'taskid' in request.session['requestParams']:
iquery['message__startswith'] = request.session['requestParams']['taskid']
getrecs = True
if 'jeditaskid' in request.session['requestParams']:
iquery['message__icontains'] = "jeditaskid=%s" % request.session['requestParams']['jeditaskid']
getrecs = True
if 'site' in request.session['requestParams']:
iquery['message__icontains'] = "site=%s " % request.session['requestParams']['site']
getrecs = True
if 'pandaid' in request.session['requestParams']:
iquery['pid'] = request.session['requestParams']['pandaid']
getrecs = True
if 'hours' not in request.session['requestParams']:
if getrecs:
hours = 72
else:
hours = 24
else:
hours = int(request.session['requestParams']['hours'])
setupView(request, hours=hours, limit=9999999)
if 'startdate' in request.session['requestParams']:
startdate = request.session['requestParams']['startdate']
else:
startdate = timezone.now() - timedelta(hours=hours)
startdate = startdate.strftime(defaultDatetimeFormat)
if 'enddate' in request.session['requestParams']:
enddate = request.session['requestParams']['enddate']
else:
enddate = timezone.now().strftime(defaultDatetimeFormat)
iquery['bintime__range'] = [startdate, enddate]
print iquery
counts = Pandalog.objects.filter(**iquery).values('name', 'type', 'levelname').annotate(
Count('levelname')).order_by('name', 'type', 'levelname')
if getrecs:
records = Pandalog.objects.filter(**iquery).order_by('bintime').reverse()[
:request.session['JOB_LIMIT']].values()
## histogram of logs vs. time, for plotting
logHist = {}
for r in records:
r['message'] = r['message'].replace('<', '')
r['message'] = r['message'].replace('>', '')
r['levelname'] = r['levelname'].lower()
tm = r['bintime']
tm = tm - timedelta(minutes=tm.minute % 30, seconds=tm.second, microseconds=tm.microsecond)
if not tm in logHist: logHist[tm] = 0
logHist[tm] += 1
kys = logHist.keys()
kys.sort()
logHistL = []
for k in kys:
logHistL.append([k, logHist[k]])
else:
records = None
logHistL = None
logs = {}
totcount = 0
for inc in counts:
name = inc['name']
type = inc['type']
level = inc['levelname']
count = inc['levelname__count']
totcount += count
if name not in logs:
logs[name] = {}
logs[name]['name'] = name
logs[name]['count'] = 0
logs[name]['types'] = {}
logs[name]['count'] += count
if type not in logs[name]['types']:
logs[name]['types'][type] = {}
logs[name]['types'][type]['name'] = type
logs[name]['types'][type]['count'] = 0
logs[name]['types'][type]['levels'] = {}
logs[name]['types'][type]['count'] += count
if level not in logs[name]['types'][type]['levels']:
logs[name]['types'][type]['levels'][level] = {}
logs[name]['types'][type]['levels'][level]['name'] = level.lower()
logs[name]['types'][type]['levels'][level]['count'] = 0
logs[name]['types'][type]['levels'][level]['count'] += count
## convert to ordered lists
logl = []
for l in logs:
itemd = {}
itemd['name'] = logs[l]['name']
itemd['types'] = []
for t in logs[l]['types']:
logs[l]['types'][t]['levellist'] = []
for v in logs[l]['types'][t]['levels']:
logs[l]['types'][t]['levellist'].append(logs[l]['types'][t]['levels'][v])
logs[l]['types'][t]['levellist'] = sorted(logs[l]['types'][t]['levellist'], key=lambda x: x['name'])
typed = {}
typed['name'] = logs[l]['types'][t]['name']
itemd['types'].append(logs[l]['types'][t])
itemd['types'] = sorted(itemd['types'], key=lambda x: x['name'])
logl.append(itemd)
logl = sorted(logl, key=lambda x: x['name'])
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'user': None,
'logl': logl,
'records': records,
'ninc': totcount,
'logHist': logHistL,
'xurl': extensibleURL(request),
'hours': hours,
'getrecs': getrecs,
'built': datetime.now().strftime("%H:%M:%S"),
}
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
##self monitor
endSelfMonitor(request)
response = render_to_response('pandaLogger.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
'json' in request.session['requestParams']):
resp = data
return HttpResponse(json.dumps(resp, cls=DateEncoder), content_type='text/html')
# def percentile(N, percent, key=lambda x:x):
# """
# Find the percentile of a list of values.
#
# @parameter N - is a list of values. Note N MUST BE already sorted.
# @parameter percent - a float value from 0.0 to 1.0.
# @parameter key - optional key function to compute value from each element of N.
#
# @return - the percentile of the values
# """
# if not N:
# return None
# k = (len(N)-1) * percent
# f = math.floor(k)
# c = math.ceil(k)
# if f == c:
# return key(N[int(k)])
# d0 = key(N[int(f)]) * (c-k)
# d1 = key(N[int(c)]) * (k-f)
# return d0+d1
def ttc(request):
valid, response = initRequest(request)
if not valid: return response
data = {}
jeditaskid = -1
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = int(request.session['requestParams']['jeditaskid'])
if jeditaskid == -1:
data = {"error": "no jeditaskid supplied"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
query = {'jeditaskid': jeditaskid}
task = JediTasks.objects.filter(**query).values('jeditaskid', 'taskname', 'workinggroup', 'tasktype',
'processingtype', 'ttcrequested', 'starttime', 'endtime',
'creationdate', 'status')
if len(task) == 0:
data = {"error": ("jeditaskid " + str(jeditaskid) + " does not exist")}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
taskrec = task[0]
if taskrec['tasktype'] != 'prod' or taskrec['ttcrequested'] == None:
data = {"error": "TTC for this type of task has not implemented yet"}
return HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='text/html')
taskrec['ttc'] = taskrec['ttcrequested']
taskevents = GetEventsForTask.objects.filter(**query).values('jeditaskid', 'totev', 'totevrem')
if len(taskevents) > 0:
taskev = taskevents[0]
cur = connection.cursor()
cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.GETTASKPROFILE('%s'))" % taskrec['jeditaskid'])
taskprofiled = cur.fetchall()
cur.close()
keys = ['endtime', 'starttime', 'nevents', 'njob']
taskprofile = [{'endtime': taskrec['starttime'], 'starttime': taskrec['starttime'], 'nevents': 0, 'njob': 0}]
taskprofile = taskprofile + [dict(zip(keys, row)) for row in taskprofiled]
maxt = (taskrec['ttc'] - taskrec['starttime']).days * 3600 * 24 + (taskrec['ttc'] - taskrec['starttime']).seconds
neventsSum = 0
for job in taskprofile:
job['ttccoldline'] = 100. - ((job['endtime'] - taskrec['starttime']).days * 3600 * 24 + (
job['endtime'] - taskrec['starttime']).seconds) * 100 / float(maxt)
job['endtime'] = job['endtime'].strftime("%Y-%m-%d %H:%M:%S")
job['ttctime'] = job['endtime']
job['starttime'] = job['starttime'].strftime("%Y-%m-%d %H:%M:%S")
neventsSum += job['nevents']
job['tobedonepct'] = 100. - neventsSum * 100. / taskev['totev']
taskprofile.insert(len(taskprofile), {'endtime': taskprofile[len(taskprofile) - 1]['endtime'],
'starttime': taskprofile[len(taskprofile) - 1]['starttime'],
'ttctime': taskrec['ttc'].strftime("%Y-%m-%d %H:%M:%S"),
'tobedonepct': taskprofile[len(taskprofile) - 1]['tobedonepct'],
'ttccoldline': 0})
progressForBar = []
taskrec['percentage'] = ((neventsSum) * 100 / taskev['totev'])
taskrec['percentageok'] = taskrec['percentage'] - 5
if taskrec['status'] == 'running':
taskrec['ttcbasedpercentage'] = ((datetime.now() - taskrec['starttime']).days * 24 * 3600 + (
datetime.now() - taskrec['starttime']).seconds) * 100 / (
(taskrec['ttcrequested'] - taskrec['creationdate']).days * 24 * 3600 + (
taskrec['ttcrequested'] - taskrec['creationdate']).seconds) if datetime.now() < \
taskrec[
'ttc'] else 100
progressForBar = [100, taskrec['percentage'], taskrec['ttcbasedpercentage']]
data = {
'request': request,
'task': taskrec,
'progressForBar': progressForBar,
'profile': taskprofile,
'built': datetime.now().strftime("%H:%M:%S"),
}
response = render_to_response('ttc.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
#@cache_page(60 * 20)
def workingGroups(request):
valid, response = initRequest(request)
if not valid: return response
# Here we try to get cached data
data = getCacheEntry(request, "workingGroups")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('workingGroups.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
taskdays = 3
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
VOMODE = 'atlas'
else:
VOMODE = ''
if VOMODE != 'atlas':
days = 30
else:
days = taskdays
hours = days * 24
query = setupView(request, hours=hours, limit=999999)
query['workinggroup__isnull'] = False
## WG task summary
tasksummary = wgTaskSummary(request, view='working group', taskdays=taskdays)
## WG job summary
wgsummarydata = wgSummary(query)
wgs = {}
for rec in wgsummarydata:
wg = rec['workinggroup']
if wg == None: continue
jobstatus = rec['jobstatus']
count = rec['jobstatus__count']
if wg not in wgs:
wgs[wg] = {}
wgs[wg]['name'] = wg
wgs[wg]['count'] = 0
wgs[wg]['states'] = {}
wgs[wg]['statelist'] = []
for state in statelist:
wgs[wg]['states'][state] = {}
wgs[wg]['states'][state]['name'] = state
wgs[wg]['states'][state]['count'] = 0
wgs[wg]['count'] += count
wgs[wg]['states'][jobstatus]['count'] += count
errthreshold = 15
## Convert dict to summary list
wgkeys = wgs.keys()
wgkeys.sort()
wgsummary = []
for wg in wgkeys:
for state in statelist:
wgs[wg]['statelist'].append(wgs[wg]['states'][state])
if int(wgs[wg]['states']['finished']['count']) + int(wgs[wg]['states']['failed']['count']) > 0:
wgs[wg]['pctfail'] = int(100. * float(wgs[wg]['states']['failed']['count']) / (
wgs[wg]['states']['finished']['count'] + wgs[wg]['states']['failed']['count']))
wgsummary.append(wgs[wg])
if len(wgsummary) == 0: wgsummary = None
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
del request.session['TFIRST']
del request.session['TLAST']
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'url': request.path,
'xurl': xurl,
'user': None,
'wgsummary': wgsummary,
'taskstates': taskstatedict,
'tasksummary': tasksummary,
'hours': hours,
'days': days,
'errthreshold': errthreshold,
'built': datetime.now().strftime("%H:%M:%S"),
}
setCacheEntry(request, "workingGroups", json.dumps(data, cls=DateEncoder), 60 * 20)
##self monitor
endSelfMonitor(request)
response = render_to_response('workingGroups.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
del request.session['TFIRST']
del request.session['TLAST']
resp = []
return HttpResponse(json.dumps(resp), content_type='text/html')
def datasetInfo(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
dsets = []
dsrec = None
colnames = []
columns = []
if 'datasetname' in request.session['requestParams']:
dataset = request.session['requestParams']['datasetname']
query['datasetname'] = request.session['requestParams']['datasetname']
elif 'datasetid' in request.session['requestParams']:
dataset = request.session['requestParams']['datasetid']
query['datasetid'] = request.session['requestParams']['datasetid']
else:
dataset = None
if 'jeditaskid' in request.session['requestParams']:
query['jeditaskid'] = int(request.session['requestParams']['jeditaskid'])
if dataset:
dsets = JediDatasets.objects.filter(**query).values()
if len(dsets) == 0:
startdate = timezone.now() - timedelta(hours=30 * 24)
startdate = startdate.strftime(defaultDatetimeFormat)
enddate = timezone.now().strftime(defaultDatetimeFormat)
query = {'modificationdate__range': [startdate, enddate]}
if 'datasetname' in request.session['requestParams']:
query['name'] = request.session['requestParams']['datasetname']
elif 'datasetid' in request.session['requestParams']:
query['vuid'] = request.session['requestParams']['datasetid']
moredsets = Datasets.objects.filter(**query).values()
if len(moredsets) > 0:
dsets = moredsets
for ds in dsets:
ds['datasetname'] = ds['name']
ds['creationtime'] = ds['creationdate']
ds['modificationtime'] = ds['modificationdate']
ds['nfiles'] = ds['numberfiles']
ds['datasetid'] = ds['vuid']
if len(dsets) > 0:
dsrec = dsets[0]
dataset = dsrec['datasetname']
colnames = dsrec.keys()
colnames.sort()
for k in colnames:
val = dsrec[k]
if dsrec[k] == None:
val = ''
continue
pair = {'name': k, 'value': val}
columns.append(pair)
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'dsrec': dsrec,
'datasetname': dataset,
'columns': columns,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('datasetInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(dsrec), content_type='text/html')
def datasetList(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
dsets = []
for par in ('jeditaskid', 'containername'):
if par in request.session['requestParams']:
query[par] = request.session['requestParams'][par]
if len(query) > 0:
dsets = JediDatasets.objects.filter(**query).values()
dsets = sorted(dsets, key=lambda x: x['datasetname'].lower())
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'datasets': dsets,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('datasetList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(dsrec), content_type='text/html')
def fileInfo(request):
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
JediDatasetsTableName = "ATLAS_PANDA.JEDI_DATASETS"
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
JediDatasetsTableName = "JEDI_DATASETS"
tmpTableName = "TMP_IDS1"
random.seed()
transactionKey = random.randrange(1000000)
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
files = []
frec = None
colnames = []
columns = []
if 'filename' in request.session['requestParams']:
file = request.session['requestParams']['filename']
query['lfn'] = request.session['requestParams']['filename']
elif 'lfn' in request.session['requestParams']:
file = request.session['requestParams']['lfn']
query['lfn'] = request.session['requestParams']['lfn']
elif 'fileid' in request.session['requestParams']:
file = request.session['requestParams']['fileid']
query['fileid'] = request.session['requestParams']['fileid']
elif 'guid' in request.session['requestParams']:
file = request.session['requestParams']['guid']
query['guid'] = request.session['requestParams']['guid']
else:
file = None
startdate = None
if 'date_from' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_from'], '%Y-%m-%d')
startdate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if not startdate:
startdate = timezone.now() - timedelta(hours=365 * 24)
# startdate = startdate.strftime(defaultDatetimeFormat)
enddate = None
if 'date_to' in request.session['requestParams']:
time_from_struct = time.strptime(request.session['requestParams']['date_to'], '%Y-%m-%d')
enddate = datetime.utcfromtimestamp(time.mktime(time_from_struct))
if enddate == None:
enddate = timezone.now() # .strftime(defaultDatetimeFormat)
query['creationdate__range'] = [startdate.strftime(defaultDatetimeFormat), enddate.strftime(defaultDatetimeFormat)]
if 'pandaid' in request.session['requestParams'] and request.session['requestParams']['pandaid'] != '':
query['pandaid'] = request.session['requestParams']['pandaid']
if 'jeditaskid' in request.session['requestParams'] and request.session['requestParams']['jeditaskid'] != '':
query['jeditaskid'] = request.session['requestParams']['jeditaskid']
if 'scope' in request.session['requestParams']:
query['scope'] = request.session['requestParams']['scope']
if file or (query['pandaid'] is not None) or (query['jeditaskid'] is not None):
files = JediDatasetContents.objects.filter(**query).values()
if len(files) == 0:
del query['creationdate__range']
query['modificationtime__range'] = [startdate.strftime(defaultDatetimeFormat),
enddate.strftime(defaultDatetimeFormat)]
morefiles = Filestable4.objects.filter(**query).values()
if len(morefiles) == 0:
morefiles = FilestableArch.objects.filter(**query).values()
if len(morefiles) > 0:
files = morefiles
for f in files:
f['creationdate'] = f['modificationtime']
f['fileid'] = f['row_id']
f['datasetname'] = f['dataset']
f['oldfiletable'] = 1
# connection.enter_transaction_management()
new_cur = connection.cursor()
executionData = []
for id in files:
executionData.append((id['datasetid'], transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
# connection.commit()
new_cur.execute(
"SELECT DATASETNAME,DATASETID FROM %s WHERE DATASETID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (
JediDatasetsTableName, tmpTableName, transactionKey))
mrecs = dictfetchall(new_cur)
mrecsDict = {}
for mrec in mrecs:
mrecsDict[mrec['DATASETID']] = mrec['DATASETNAME']
for f in files:
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
if 'datasetid' in f and f['datasetid'] in mrecsDict and mrecsDict[f['datasetid']]:
f['datasetname'] = mrecsDict[f['datasetid']]
if len(files) > 0:
files = sorted(files, key=lambda x: x['pandaid'], reverse=True)
frec = files[0]
file = frec['lfn']
colnames = frec.keys()
colnames.sort()
for k in colnames:
val = frec[k]
if frec[k] == None:
val = ''
continue
pair = {'name': k, 'value': val}
columns.append(pair)
del request.session['TFIRST']
del request.session['TLAST']
for file_ in files:
if 'startevent' in file_:
if (file_['startevent'] != None):
file_['startevent'] += 1
if 'endevent' in file_:
if (file_['endevent'] != None):
file_['endevent'] += 1
if ((len(files) > 0) and ('jeditaskid' in files[0]) and ('startevent' in files[0]) and (
files[0]['jeditaskid'] != None)):
files = sorted(files, key=lambda k: (-k['jeditaskid'], k['startevent']))
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'frec': frec,
'files': files,
'filename': file,
'columns': columns,
'built': datetime.now().strftime("%H:%M:%S"),
}
data.update(getContextVariables(request))
##self monitor
endSelfMonitor(request)
response = render_to_response('fileInfo.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
data = {
'frec': frec,
'files': files,
'filename': file,
'columns': columns,
}
return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
def fileList(request):
valid, response = initRequest(request)
if not valid: return response
setupView(request, hours=365 * 24, limit=999999999)
query = {}
files = []
frec = None
colnames = []
columns = []
datasetname = ''
datasetid = 0
#### It's dangerous when dataset name is not unique over table
if 'datasetname' in request.session['requestParams']:
datasetname = request.session['requestParams']['datasetname']
dsets = JediDatasets.objects.filter(datasetname=datasetname).values()
if len(dsets) > 0:
datasetid = dsets[0]['datasetid']
elif 'datasetid' in request.session['requestParams']:
datasetid = request.session['requestParams']['datasetid']
dsets = JediDatasets.objects.filter(datasetid=datasetid).values()
if len(dsets) > 0:
datasetname = dsets[0]['datasetname']
files = []
limit = 100
if 'limit' in request.session['requestParams']:
limit = int(request.session['requestParams']['limit'])
sortOrder = None
reverse = None
sortby = ''
if 'sortby' in request.session['requestParams']:
sortby = request.session['requestParams']['sortby']
if sortby == 'lfn-asc':
sortOrder = 'lfn'
elif sortby == 'lfn-desc':
sortOrder = 'lfn'
reverse = True
elif sortby == 'scope-asc':
sortOrder = 'scope'
elif sortby == 'scope-desc':
sortOrder = 'scope'
reverse = True
elif sortby == 'type-asc':
sortOrder = 'type'
elif sortby == 'type-desc':
sortOrder = 'type'
reverse = True
elif sortby == 'fsizemb-asc':
sortOrder = 'fsize'
elif sortby == 'fsizemb-desc':
sortOrder = 'fsize'
reverse = True
elif sortby == 'nevents-asc':
sortOrder = 'nevents'
elif sortby == 'nevents-desc':
sortOrder = 'nevents'
reverse = True
elif sortby == 'jeditaskid-asc':
sortOrder = 'jeditaskid'
elif sortby == 'jeditaskid-desc':
sortOrder = 'jeditaskid'
reverse = True
elif sortby == 'fileid-asc':
sortOrder = 'fileid'
elif sortby == 'fileid-desc':
sortOrder = 'jeditaskid'
reverse = True
elif sortby == 'attemptnr-asc':
sortOrder = 'attemptnr'
elif sortby == 'attemptnr-desc':
sortOrder = 'attemptnr'
reverse = True
elif sortby == 'status-asc':
sortOrder = 'status'
elif sortby == 'status-desc':
sortOrder = 'status'
reverse = True
elif sortby == 'creationdate-asc':
sortOrder = 'creationdate'
elif sortby == 'creationdate-desc':
sortOrder = 'creationdate'
reverse = True
elif sortby == 'pandaid-asc':
sortOrder = 'pandaid'
elif sortby == 'pandaid-desc':
sortOrder = 'pandaid'
reverse = True
else:
sortOrder = 'lfn'
if datasetid > 0:
query['datasetid'] = datasetid
if (reverse):
files = JediDatasetContents.objects.filter(**query).values().order_by(sortOrder).reverse()[:limit + 1]
else:
files = JediDatasetContents.objects.filter(**query).values().order_by(sortOrder)[:limit + 1]
if len(files) > limit:
limitexceeded = True
else:
limitexceeded = False
files = files[:limit]
for f in files:
f['fsizemb'] = "%0.2f" % (f['fsize'] / 1000000.)
pandaids = []
for f in files:
pandaids.append(f['pandaid'])
query = {}
filesFromFileTable = []
filesFromFileTableDict = {}
query['pandaid__in'] = pandaids
# JEDITASKID, DATASETID, FILEID
filesFromFileTable.extend(
Filestable4.objects.filter(**query).values('fileid', 'dispatchdblock', 'scope', 'destinationdblock'))
if len(filesFromFileTable) == 0:
filesFromFileTable.extend(
FilestableArch.objects.filter(**query).values('fileid', 'dispatchdblock', 'scope', 'destinationdblock'))
if len(filesFromFileTable) > 0:
for f in filesFromFileTable:
filesFromFileTableDict[f['fileid']] = f
## Count the number of distinct files
filed = {}
for f in files:
filed[f['lfn']] = 1
ruciolink = ""
if f['fileid'] in filesFromFileTableDict:
if len(filesFromFileTableDict[f['fileid']]['dispatchdblock']) > 0:
ruciolink = 'https://rucio-ui.cern.ch/did?scope=panda&name=' + filesFromFileTableDict[f['fileid']][
'dispatchdblock']
else:
if len(filesFromFileTableDict[f['fileid']]['destinationdblock']) > 0:
ruciolink = 'https://rucio-ui.cern.ch/did?scope=' + filesFromFileTableDict[f['fileid']][
'scope'] + '&name=' + filesFromFileTableDict[f['fileid']]['destinationdblock']
f['rucio'] = ruciolink
nfiles = len(filed)
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
xurl = extensibleURL(request)
nosorturl = removeParam(xurl, 'sortby', mode='extensible')
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'files': files,
'nfiles': nfiles,
'nosorturl': nosorturl,
'sortby': sortby,
'limitexceeded': limitexceeded,
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
data.update(getContextVariables(request))
response = render_to_response('fileList.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(files), content_type='text/html')
#@cache_page(60 * 20)
def workQueues(request):
valid, response = initRequest(request)
data = getCacheEntry(request, "workQueues")
if data is not None:
data = json.loads(data)
data['request'] = request
response = render_to_response('workQueues.html', data, RequestContext(request))
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
endSelfMonitor(request)
return response
if not valid: return response
setupView(request, hours=180 * 24, limit=9999999)
query = {}
for param in request.session['requestParams']:
for field in JediWorkQueue._meta.get_fields():
if param == field.name:
query[param] = request.session['requestParams'][param]
queues = []
queues.extend(JediWorkQueue.objects.filter(**query).order_by('queue_type', 'queue_order').values())
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'queues': queues,
'xurl': extensibleURL(request),
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('workQueues.html', data, RequestContext(request))
setCacheEntry(request, "workQueues", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(queues), content_type='text/html')
def stateNotUpdated(request, state='transferring', hoursSinceUpdate=36, values=standard_fields, count=False,
wildCardExtension='(1=1)'):
valid, response = initRequest(request)
if not valid: return response
query = setupView(request, opmode='notime', limit=99999999)
if 'jobstatus' in request.session['requestParams']: state = request.session['requestParams']['jobstatus']
if 'transferringnotupdated' in request.session['requestParams']: hoursSinceUpdate = int(
request.session['requestParams']['transferringnotupdated'])
if 'statenotupdated' in request.session['requestParams']: hoursSinceUpdate = int(
request.session['requestParams']['statenotupdated'])
moddate = timezone.now() - timedelta(hours=hoursSinceUpdate)
moddate = moddate.strftime(defaultDatetimeFormat)
mindate = timezone.now() - timedelta(hours=24 * 30)
mindate = mindate.strftime(defaultDatetimeFormat)
query['statechangetime__lte'] = moddate
# query['statechangetime__gte'] = mindate
query['jobstatus'] = state
if count:
jobs = []
jobs.extend(
Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud', 'computingsite',
'jobstatus').annotate(
Count('jobstatus')))
jobs.extend(
Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud', 'computingsite',
'jobstatus').annotate(
Count('jobstatus')))
jobs.extend(
Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension]).values('cloud', 'computingsite',
'jobstatus').annotate(
Count('jobstatus')))
ncount = 0
perCloud = {}
perRCloud = {}
for cloud in cloudList:
perCloud[cloud] = 0
perRCloud[cloud] = 0
for job in jobs:
site = job['computingsite']
if site in homeCloud:
cloud = homeCloud[site]
if not cloud in perCloud:
perCloud[cloud] = 0
perCloud[cloud] += job['jobstatus__count']
cloud = job['cloud']
if not cloud in perRCloud:
perRCloud[cloud] = 0
perRCloud[cloud] += job['jobstatus__count']
ncount += job['jobstatus__count']
perCloudl = []
for c in perCloud:
pcd = {'name': c, 'count': perCloud[c]}
perCloudl.append(pcd)
perCloudl = sorted(perCloudl, key=lambda x: x['name'])
perRCloudl = []
for c in perRCloud:
pcd = {'name': c, 'count': perRCloud[c]}
perRCloudl.append(pcd)
perRCloudl = sorted(perRCloudl, key=lambda x: x['name'])
return ncount, perCloudl, perRCloudl
else:
jobs = []
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
jobs.extend(Jobsdefined4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).extra(where=[wildCardExtension]).values(*values))
return jobs
def taskNotUpdated(request, query, state='submitted', hoursSinceUpdate=36, values=[], count=False,
wildCardExtension='(1=1)'):
valid, response = initRequest(request)
if not valid: return response
# query = setupView(request, opmode='notime', limit=99999999)
if 'status' in request.session['requestParams']: state = request.session['requestParams']['status']
if 'statenotupdated' in request.session['requestParams']: hoursSinceUpdate = int(
request.session['requestParams']['statenotupdated'])
moddate = timezone.now() - timedelta(hours=hoursSinceUpdate)
moddate = moddate.strftime(defaultDatetimeFormat)
mindate = timezone.now() - timedelta(hours=24 * 30)
mindate = mindate.strftime(defaultDatetimeFormat)
query['statechangetime__lte'] = moddate
# query['statechangetime__gte'] = mindate
query['status'] = state
if count:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension]).values('name', 'status').annotate(
Count('status'))
statecounts = {}
for s in taskstatelist:
statecounts[s] = {}
statecounts[s]['count'] = 0
statecounts[s]['name'] = s
ncount = 0
for task in tasks:
state = task['status']
statecounts[state]['count'] += task['status__count']
ncount += job['status__count']
return ncount, statecounts
else:
tasks = JediTasks.objects.filter(**query).extra(where=[wildCardExtension]).values()
return tasks
def getErrorDescription(job, mode='html', provideProcessedCodes = False):
txt = ''
codesDescribed = []
if 'metastruct' in job and job['metastruct']['exitCode'] != 0:
meta = job['metastruct']
txt += "%s: %s" % (meta['exitAcronym'], meta['exitMsg'])
if provideProcessedCodes:
return txt, codesDescribed
else:
return txt
for errcode in errorCodes.keys():
errval = 0
if job.has_key(errcode):
errval = job[errcode]
if errval != 0 and errval != '0' and errval != None and errval != '':
try:
errval = int(errval)
except:
pass # errval = -1
codesDescribed.append(errval)
errdiag = errcode.replace('errorcode', 'errordiag')
if errcode.find('errorcode') > 0:
diagtxt = job[errdiag]
else:
diagtxt = ''
if len(diagtxt) > 0:
desc = diagtxt
elif errval in errorCodes[errcode]:
desc = errorCodes[errcode][errval]
else:
desc = "Unknown %s error code %s" % (errcode, errval)
errname = errcode.replace('errorcode', '')
errname = errname.replace('exitcode', '')
if mode == 'html':
txt += " <b>%s, %d:</b> %s" % (errname, errval, desc)
else:
txt = "%s, %d: %s" % (errname, errval, desc)
if provideProcessedCodes:
return txt, codesDescribed
else:
return txt
def getPilotCounts(view):
query = {}
query['flag'] = view
query['hours'] = 3
rows = Sitedata.objects.filter(**query).values()
pilotd = {}
for r in rows:
site = r['site']
if not site in pilotd: pilotd[site] = {}
pilotd[site]['count'] = r['getjob'] + r['updatejob']
pilotd[site]['time'] = r['lastmod']
return pilotd
def taskNameDict(jobs):
## Translate IDs to names. Awkward because models don't provide foreign keys to task records.
taskids = {}
jeditaskids = {}
for job in jobs:
if 'taskid' in job and job['taskid'] and job['taskid'] > 0:
taskids[job['taskid']] = 1
if 'jeditaskid' in job and job['jeditaskid'] and job['jeditaskid'] > 0: jeditaskids[job['jeditaskid']] = 1
taskidl = taskids.keys()
jeditaskidl = jeditaskids.keys()
tasknamedict = {}
if len(jeditaskidl) > 0:
tq = {'jeditaskid__in': jeditaskidl}
jeditasks = JediTasks.objects.filter(**tq).values('taskname', 'jeditaskid')
for t in jeditasks:
tasknamedict[t['jeditaskid']] = t['taskname']
# if len(taskidl) > 0:
# tq = { 'taskid__in' : taskidl }
# oldtasks = Etask.objects.filter(**tq).values('taskname', 'taskid')
# for t in oldtasks:
# tasknamedict[t['taskid']] = t['taskname']
return tasknamedict
class DateEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return str(obj)
return json.JSONEncoder.default(self, obj)
def getFilePathForObjectStore(objectstore, filetype="logs"):
""" Return a proper file path in the object store """
# For single object stores
# root://atlas-objectstore.cern.ch/|eventservice^/atlas/eventservice|logs^/atlas/logs
# For multiple object stores
# eventservice^root://atlas-objectstore.cern.ch//atlas/eventservice|logs^root://atlas-objectstore.bnl.gov//atlas/logs
basepath = ""
# Which form of the schedconfig.objectstore field do we currently have?
if objectstore != "":
_objectstore = objectstore.split("|")
if "^" in _objectstore[0]:
for obj in _objectstore:
if obj[:len(filetype)] == filetype:
basepath = obj.split("^")[1]
break
else:
_objectstore = objectstore.split("|")
url = _objectstore[0]
for obj in _objectstore:
if obj[:len(filetype)] == filetype:
basepath = obj.split("^")[1]
break
if basepath != "":
if url.endswith('/') and basepath.startswith('/'):
basepath = url + basepath[1:]
else:
basepath = url + basepath
if basepath == "":
print "Object store path could not be extracted using file type \'%s\' from objectstore=\'%s\'" % (
filetype, objectstore)
else:
print "Object store not defined in queuedata"
return basepath
def buildGoogleFlowDiagram(request, jobs=[], tasks=[]):
## set up google flow diagram
if 'requestParams' not in request.session or 'flow' not in request.session['requestParams']: return None
flowstruct = {}
if len(jobs) > 0:
flowstruct['maxweight'] = len(jobs)
flowrows = buildGoogleJobFlow(jobs)
elif len(tasks) > 0:
flowstruct['maxweight'] = len(tasks)
flowrows = buildGoogleTaskFlow(request, tasks)
else:
return None
flowstruct['columns'] = [['string', 'From'], ['string', 'To'], ['number', 'Weight']]
flowstruct['rows'] = flowrows[:3000]
return flowstruct
def buildGoogleJobFlow(jobs):
cloudd = {}
mcpcloudd = {}
mcpshownd = {}
errd = {}
errshownd = {}
sited = {}
statd = {}
errcountd = {}
sitecountd = {}
siteshownd = {}
ptyped = {}
ptypecountd = {}
ptypeshownd = {}
for job in jobs:
errinfo = errorInfo(job, nchars=40, mode='string')
jobstatus = job['jobstatus']
for js in ('finished', 'holding', 'merging', 'running', 'cancelled', 'transferring', 'starting'):
if jobstatus == js: errinfo = js
if errinfo not in errcountd: errcountd[errinfo] = 0
errcountd[errinfo] += 1
cloud = job['homecloud']
mcpcloud = job['cloud']
ptype = job['processingtype']
if ptype not in ptypecountd: ptypecountd[ptype] = 0
ptypecountd[ptype] += 1
site = job['computingsite']
if site not in sitecountd: sitecountd[site] = 0
sitecountd[site] += 1
if cloud not in cloudd: cloudd[cloud] = {}
if site not in cloudd[cloud]: cloudd[cloud][site] = 0
cloudd[cloud][site] += 1
if mcpcloud not in mcpcloudd: mcpcloudd[mcpcloud] = {}
if cloud not in mcpcloudd[mcpcloud]: mcpcloudd[mcpcloud][cloud] = 0
mcpcloudd[mcpcloud][cloud] += 1
if jobstatus not in errd: errd[jobstatus] = {}
if errinfo not in errd[jobstatus]: errd[jobstatus][errinfo] = 0
errd[jobstatus][errinfo] += 1
if site not in sited: sited[site] = {}
if errinfo not in sited[site]: sited[site][errinfo] = 0
sited[site][errinfo] += 1
if jobstatus not in statd: statd[jobstatus] = {}
if errinfo not in statd[jobstatus]: statd[jobstatus][errinfo] = 0
statd[jobstatus][errinfo] += 1
if ptype not in ptyped: ptyped[ptype] = {}
if errinfo not in ptyped[ptype]: ptyped[ptype][errinfo] = 0
ptyped[ptype][errinfo] += 1
flowrows = []
for mcpcloud in mcpcloudd:
for cloud in mcpcloudd[mcpcloud]:
n = mcpcloudd[mcpcloud][cloud]
if float(n) / len(jobs) > 0.0:
mcpshownd[mcpcloud] = 1
flowrows.append(["%s MCP" % mcpcloud, cloud, n])
othersited = {}
othersiteErrd = {}
for cloud in cloudd:
if cloud not in mcpshownd: continue
for e in cloudd[cloud]:
n = cloudd[cloud][e]
if float(sitecountd[e]) / len(jobs) > .01:
siteshownd[e] = 1
flowrows.append([cloud, e, n])
else:
flowrows.append([cloud, 'Other sites', n])
othersited[e] = n
# for jobstatus in errd:
# for errinfo in errd[jobstatus]:
# flowrows.append( [ errinfo, jobstatus, errd[jobstatus][errinfo] ] )
for e in errcountd:
if float(errcountd[e]) / len(jobs) > .01:
errshownd[e] = 1
for site in sited:
nother = 0
for e in sited[site]:
n = sited[site][e]
if site in siteshownd:
sitename = site
else:
sitename = "Other sites"
if e in errshownd:
errname = e
else:
errname = 'Other errors'
flowrows.append([sitename, errname, n])
if errname not in othersiteErrd: othersiteErrd[errname] = 0
othersiteErrd[errname] += n
# for e in othersiteErrd:
# if e in errshownd:
# flowrows.append( [ 'Other sites', e, othersiteErrd[e] ] )
for ptype in ptyped:
if float(ptypecountd[ptype]) / len(jobs) > .05:
ptypeshownd[ptype] = 1
ptname = ptype
else:
ptname = "Other processing types"
for e in ptyped[ptype]:
n = ptyped[ptype][e]
if e in errshownd:
flowrows.append([e, ptname, n])
else:
flowrows.append(['Other errors', ptname, n])
return flowrows
def buildGoogleTaskFlow(request, tasks):
analysis = False
if 'requestParams' in request.session:
analysis = 'tasktype' in request.session['requestParams'] and request.session['requestParams'][
'tasktype'].startswith('anal')
ptyped = {}
reqd = {}
statd = {}
substatd = {}
trfd = {}
filestatd = {}
cloudd = {}
reqsized = {}
reqokd = {}
## count the reqid's. Use only the biggest (in file count) if too many.
for task in tasks:
if not analysis and 'deftreqid' not in task: continue
req = int(task['reqid'])
dsinfo = task['dsinfo']
nfiles = dsinfo['nfiles']
if req not in reqsized: reqsized[req] = 0
reqsized[req] += nfiles
## Veto requests that are all done etc.
if task['superstatus'] != 'done': reqokd[req] = 1
if not analysis:
for req in reqsized:
# de-prioritize requests not specifically OK'd for inclusion
if req not in reqokd: reqsized[req] = 0
nmaxreq = 10
if len(reqsized) > nmaxreq:
reqkeys = reqsized.keys()
reqsortl = sorted(reqkeys, key=reqsized.__getitem__, reverse=True)
reqsortl = reqsortl[:nmaxreq - 1]
else:
reqsortl = reqsized.keys()
for task in tasks:
ptype = task['processingtype']
# if 'jedireqid' not in task: continue
req = int(task['reqid'])
if not analysis and req not in reqsortl: continue
stat = task['superstatus']
substat = task['status']
# trf = task['transpath']
trf = task['taskname']
cloud = task['cloud']
if cloud == '': cloud = 'No cloud assigned'
dsinfo = task['dsinfo']
nfailed = dsinfo['nfilesfailed']
nfinished = dsinfo['nfilesfinished']
nfiles = dsinfo['nfiles']
npending = nfiles - nfailed - nfinished
if ptype not in ptyped: ptyped[ptype] = {}
if req not in ptyped[ptype]: ptyped[ptype][req] = 0
ptyped[ptype][req] += nfiles
if req not in reqd: reqd[req] = {}
if stat not in reqd[req]: reqd[req][stat] = 0
reqd[req][stat] += nfiles
if trf not in trfd: trfd[trf] = {}
if stat not in trfd[trf]: trfd[trf][stat] = 0
trfd[trf][stat] += nfiles
if stat not in statd: statd[stat] = {}
if substat not in statd[stat]: statd[stat][substat] = 0
statd[stat][substat] += nfiles
if substat not in substatd: substatd[substat] = {}
if 'finished' not in substatd[substat]:
for filestat in ('finished', 'failed', 'pending'):
substatd[substat][filestat] = 0
substatd[substat]['finished'] += nfinished
substatd[substat]['failed'] += nfailed
substatd[substat]['pending'] += npending
if cloud not in cloudd: cloudd[cloud] = {}
if 'finished' not in cloudd[cloud]:
for filestat in ('finished', 'failed', 'pending'):
cloudd[cloud][filestat] = 0
cloudd[cloud]['finished'] += nfinished
cloudd[cloud]['failed'] += nfailed
cloudd[cloud]['pending'] += npending
flowrows = []
if analysis:
## Don't include request, task for analysis
for trf in trfd:
for stat in trfd[trf]:
n = trfd[trf][stat]
flowrows.append([trf, 'Task %s' % stat, n])
else:
for ptype in ptyped:
for req in ptyped[ptype]:
n = ptyped[ptype][req]
flowrows.append([ptype, 'Request %s' % req, n])
for req in reqd:
for stat in reqd[req]:
n = reqd[req][stat]
flowrows.append(['Request %s' % req, 'Task %s' % stat, n])
for stat in statd:
for substat in statd[stat]:
n = statd[stat][substat]
flowrows.append(['Task %s' % stat, 'Substatus %s' % substat, n])
for substat in substatd:
for filestat in substatd[substat]:
if filestat not in substatd[substat]: continue
n = substatd[substat][filestat]
flowrows.append(['Substatus %s' % substat, 'File status %s' % filestat, n])
for cloud in cloudd:
for filestat in cloudd[cloud]:
if filestat not in cloudd[cloud]: continue
n = cloudd[cloud][filestat]
flowrows.append(['File status %s' % filestat, cloud, n])
return flowrows
def dictfetchall(cursor):
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
# This function created backend dependable for avoiding numerous arguments in metadata query.
# Transaction and cursors used due to possible issues with django connection pooling
def addJobMetadata(jobs, require=False):
print 'adding metadata'
pids = []
for job in jobs:
if (job['jobstatus'] == 'failed' or require): pids.append(job['pandaid'])
query = {}
query['pandaid__in'] = pids
mdict = {}
## Get job metadata
random.seed()
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
metaTableName = "ATLAS_PANDA.METATABLE"
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
metaTableName = "METATABLE"
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
for id in pids:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (
tmpTableName, id, transactionKey)) # Backend dependable
# connection.commit()
new_cur.execute(
"SELECT METADATA,MODIFICATIONTIME,PANDAID FROM %s WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (
metaTableName, tmpTableName, transactionKey))
mrecs = dictfetchall(new_cur)
for m in mrecs:
try:
mdict[m['PANDAID']] = m['METADATA']
except:
pass
for job in jobs:
if job['pandaid'] in mdict:
try:
job['metastruct'] = json.loads(mdict[job['pandaid']].read())
except:
pass
# job['metadata'] = mdict[job['pandaid']]
print 'added metadata'
new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))
# connection.commit()
# connection.leave_transaction_management()
return jobs
##self monitor
def g4exceptions(request):
valid, response = initRequest(request)
setupView(request, hours=365 * 24, limit=999999999)
if 'hours' in request.session['requestParams']:
hours = int(request.session['requestParams']['hours'])
else:
hours = 3
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, wildCardExt=True)
query['jobstatus__in'] = ['failed', 'holding']
query['exeerrorcode'] = 68
query['exeerrordiag__icontains'] = 'G4 exception'
values = 'pandaid', 'atlasrelease', 'exeerrorcode', 'exeerrordiag', 'jobstatus', 'transformation'
jobs = []
jobs.extend(
Jobsactive4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(
*values))
jobs.extend(
Jobsarchived4.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(
*values))
if (((datetime.now() - datetime.strptime(query['modificationtime__range'][0], "%Y-%m-%d %H:%M:%S")).days > 1) or \
((datetime.now() - datetime.strptime(query['modificationtime__range'][1],
"%Y-%m-%d %H:%M:%S")).days > 1)):
jobs.extend(
Jobsarchived.objects.filter(**query).extra(where=[wildCardExtension])[:request.session['JOB_LIMIT']].values(
*values))
if 'amitag' in request.session['requestParams']:
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
transactionKey = random.randrange(1000000)
# connection.enter_transaction_management()
new_cur = connection.cursor()
for job in jobs:
new_cur.execute("INSERT INTO %s(ID,TRANSACTIONKEY) VALUES (%i,%i)" % (
tmpTableName, job['pandaid'], transactionKey)) # Backend dependable
# connection.commit()
new_cur.execute(
"SELECT JOBPARAMETERS, PANDAID FROM ATLAS_PANDA.JOBPARAMSTABLE WHERE PANDAID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i)" % (
tmpTableName, transactionKey))
mrecs = dictfetchall(new_cur)
# connection.commit()
# connection.leave_transaction_management()
jobsToRemove = set()
for rec in mrecs:
acceptJob = True
parameters = rec['JOBPARAMETERS'].read()
tagName = "--AMITag"
startPos = parameters.find(tagName)
if startPos == -1:
acceptJob = False
endPos = parameters.find(" ", startPos)
AMITag = parameters[startPos + len(tagName) + 1:endPos]
if AMITag != request.session['requestParams']['amitag']:
acceptJob = False
if acceptJob == False:
jobsToRemove.add(rec['PANDAID'])
jobs = filter(lambda x: not x['pandaid'] in jobsToRemove, jobs)
jobs = addJobMetadata(jobs, True)
errorFrequency = {}
errorJobs = {}
for job in jobs:
if (job['metastruct']['executor'][0]['logfileReport']['countSummary']['FATAL'] > 0):
message = job['metastruct']['executor'][0]['logfileReport']['details']['FATAL'][0]['message']
exceptMess = message[message.find("G4Exception :") + 14: message.find("issued by :") - 1]
if exceptMess not in errorFrequency:
errorFrequency[exceptMess] = 1
else:
errorFrequency[exceptMess] += 1
if exceptMess not in errorJobs:
errorJobs[exceptMess] = []
errorJobs[exceptMess].append(job['pandaid'])
else:
errorJobs[exceptMess].append(job['pandaid'])
resp = {'errorFrequency': errorFrequency, 'errorJobs': errorJobs}
del request.session['TFIRST']
del request.session['TLAST']
return HttpResponse(json.dumps(resp), content_type='text/plain')
def initSelfMonitor(request):
import psutil
server = request.session['hostname'],
if 'HTTP_X_FORWARDED_FOR' in request.META:
remote = request.META['HTTP_X_FORWARDED_FOR']
else:
remote = request.META['REMOTE_ADDR']
urlProto = request.META['wsgi.url_scheme']
if 'HTTP_X_FORWARDED_PROTO' in request.META:
urlProto = request.META['HTTP_X_FORWARDED_PROTO']
urlProto = str(urlProto) + "://"
try:
urls = urlProto + request.META['SERVER_NAME'] + request.META['REQUEST_URI']
except:
urls = 'localhost'
qtime = str(timezone.now())
load = psutil.cpu_percent(interval=1)
mem = psutil.virtual_memory().percent
request.session["qtime"] = qtime
request.session["load"] = load
request.session["remote"] = remote
request.session["mem"] = mem
request.session["urls"] = urls
def endSelfMonitor(request):
qduration = str(timezone.now())
request.session['qduration'] = qduration
try:
duration = (datetime.strptime(request.session['qduration'], "%Y-%m-%d %H:%M:%S.%f") - datetime.strptime(
request.session['qtime'], "%Y-%m-%d %H:%M:%S.%f")).seconds
except:
duration = 0
if 'hostname' in request.session:
reqs = RequestStat(
server=request.session['hostname'],
qtime=request.session['qtime'],
load=request.session['load'],
mem=request.session['mem'],
qduration=request.session['qduration'],
duration=duration,
remote=request.session['remote'],
urls=request.session['urls'],
description=' '
)
reqs.save()
@never_cache
def statpixel(request):
valid, response = initRequest(request)
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
url = request.META['HTTP_REFERER']
service = 0
userid = -1
if ('ADFS_LOGIN' in request.session):
userid = BPUser.objects.get(username=request.session['ADFS_LOGIN']).id
Visits.objects.create(url=url, service=service, remote=ip, time=str(timezone.now()), userid=userid)
#user = BPUser.objects.create_user(username=request.session['ADFS_LOGIN'], email=request.session['ADFS_EMAIL'],
# first_name=request.session['ADFS_FIRSTNAME'],
# last_name=request.session['ADFS_LASTNAME'])
#this is a transparent gif pixel
pixel_= "\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x80\x00\x00\xff\xff\xff\x00\x00\x00\x21\xf9\x04\x01\x00\x00\x00\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02\x44\x01\x00\x3b"
return HttpResponse(pixel_, content_type='image/gif')
#@cache_page(60 * 20)
def globalshares(request):
valid, response = initRequest(request)
data = getCacheEntry(request, "globalshares")
if data is not None:
data = json.loads(data)
data['request'] = request
gsPlotData = {}
oldGsPlotData = data['gsPlotData']
for shareName, shareValue in oldGsPlotData.iteritems():
gsPlotData[str(shareName)] = int(shareValue)
data['gsPlotData'] = gsPlotData
#response = render_to_response('globalshares.html', data, RequestContext(request))
#patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
#endSelfMonitor(request)
#return response
if not valid: return response
setupView(request, hours=180 * 24, limit=9999999)
gs, tablerows = __get_hs_leave_distribution()
gsPlotData = {}#{'Upgrade':130049 , 'Reprocessing default':568841, 'Data Derivations': 202962, 'Event Index': 143 }
for shareName, shareValue in gs.iteritems():
shareValue['delta'] = shareValue['executing'] - shareValue['pledged']
gsPlotData[str(shareName)] = int(shareValue['executing'])
del request.session['TFIRST']
del request.session['TLAST']
if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
'json' not in request.session['requestParams'])):
data = {
'request': request,
'viewParams': request.session['viewParams'],
'requestParams': request.session['requestParams'],
'globalshares': gs,
'xurl': extensibleURL(request),
'gsPlotData':gsPlotData,
'tablerows':tablerows,
'built': datetime.now().strftime("%H:%M:%S"),
}
##self monitor
endSelfMonitor(request)
response = render_to_response('globalshares.html', data, RequestContext(request))
setCacheEntry(request, "globalshares", json.dumps(data, cls=DateEncoder), 60 * 20)
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
return response
else:
return HttpResponse(json.dumps(gs), content_type='text/html')
# taken from https://raw.githubusercontent.com/PanDAWMS/panda-server/master/pandaserver/taskbuffer/OraDBProxy.py
# retrieve global shares
def get_shares(parents=''):
comment = ' /* DBProxy.get_shares */'
methodName = comment.split(' ')[-2].split('.')[-1]
sql = """
SELECT NAME, VALUE, PARENT, PRODSOURCELABEL, WORKINGGROUP, CAMPAIGN, PROCESSINGTYPE
FROM ATLAS_PANDA.GLOBAL_SHARES
"""
var_map = None
if parents == '':
# Get all shares
pass
elif parents is None:
# Get top level shares
sql += "WHERE parent IS NULL"
elif type(parents) == unicode:
# Get the children of a specific share
var_map = {':parent': parents}
sql += "WHERE parent = :parent"
elif type(parents) in (list, tuple):
# Get the children of a list of shares
i = 0
var_map = {}
for parent in parents:
key = ':parent{0}'.format(i)
var_map[key] = parent
i += 1
parentBindings = ','.join(':parent{0}'.format(i) for i in xrange(len(parents)))
sql += "WHERE parent IN ({0})".format(parentBindings)
cur = connection.cursor()
cur.execute(sql, var_map)
resList = cur.fetchall()
cur.close()
return resList
# Taken from the Panda Server https://github.com/PanDAWMS/panda-server/blob/master/pandaserver/taskbuffer/OraDBProxy.py#L18378 with minor modifications
def __load_branch(share):
"""
Recursively load a branch
"""
node = GlobalShares.Share(share.name, share.value, share.parent, share.prodsourcelabel,
share.workinggroup, share.campaign, share.processingtype)
children = get_shares(parents=share.name)
if not children:
return node
for (name, value, parent, prodsourcelabel, workinggroup, campaign, processingtype) in children:
child = GlobalShares.Share(name, value, parent, prodsourcelabel, workinggroup, campaign, processingtype)
node.children.append(__load_branch(child))
return node
# Taken from the Panda Server https://github.com/PanDAWMS/panda-server/blob/master/pandaserver/taskbuffer/OraDBProxy.py#L18378 with minor modifications
def __get_hs_leave_distribution():
"""
Get the current HS06 distribution for running and queued jobs
"""
EXECUTING = 'executing'
QUEUED = 'queued'
PLEDGED = 'pledged'
IGNORE = 'ignore'
comment = ' /* DBProxy.get_hs_leave_distribution */'
tree = GlobalShares.Share('root', 100, None, None, None, None, None)
shares_top_level = get_shares(parents=None)
for (name, value, parent, prodsourcelabel, workinggroup, campaign, processingtype) in shares_top_level:
share = GlobalShares.Share(name, value, parent, prodsourcelabel, workinggroup, campaign, processingtype)
tree.children.append(__load_branch(share))
tree.normalize()
leave_shares = tree.get_leaves()
sql_hs_distribution = "SELECT gshare, jobstatus_grouped, SUM(HS) FROM (SELECT gshare, HS, CASE WHEN jobstatus IN('activated') THEN 'queued' WHEN jobstatus IN('sent', 'running') THEN 'executing' ELSE 'ignore' END jobstatus_grouped FROM ATLAS_PANDA.JOBS_SHARE_STATS JSS) GROUP BY gshare, jobstatus_grouped"
cur = connection.cursor()
cur.execute(sql_hs_distribution)
hs_distribution_raw = cur.fetchall()
cur.close()
# get the hs distribution data into a dictionary structure
hs_distribution_dict = {}
hs_queued_total = 0
hs_executing_total = 0
hs_ignore_total = 0
for hs_entry in hs_distribution_raw:
gshare, status_group, hs = hs_entry
hs_distribution_dict.setdefault(gshare, {PLEDGED: 0, QUEUED: 0, EXECUTING: 0})
hs_distribution_dict[gshare][status_group] = hs
# calculate totals
if status_group == QUEUED:
hs_queued_total += hs
elif status_group == EXECUTING:
hs_executing_total += hs
else:
hs_ignore_total += hs
# Calculate the ideal HS06 distribution based on shares.
for share_node in leave_shares:
share_name, share_value = share_node.name, share_node.value
hs_pledged_share = hs_executing_total * decimal.Decimal(str(share_value)) / decimal.Decimal(str(100.0))
hs_distribution_dict.setdefault(share_name, {PLEDGED: 0, QUEUED: 0, EXECUTING: 0})
# Pledged HS according to global share definitions
hs_distribution_dict[share_name]['pledged'] = hs_pledged_share
getChildStat(tree, hs_distribution_dict, 0)
rows = []
stripTree(tree, rows)
return hs_distribution_dict, rows
def stripTree(node, rows):
row = {}
if node.level > 0:
if node.level == 1:
row['level1'] = node.name + ' [' + ("%0.1f" % node.value) + '%]'
row['level2'] = ''
row['level3'] = ''
if node.level == 2:
row['level1'] = ''
row['level2'] = node.name + ' [' + ("%0.1f" % node.value) + '%]'
row['level3'] = ''
if node.level == 3:
row['level1'] = ''
row['level2'] = ''
row['level3'] = node.name + ' [' + ("%0.1f" % node.value) + '%]'
row['executing'] = node.executing
row['pledged'] = node.pledged
row['delta'] = node.delta
row['queued'] = node.queued
row['ratio'] = node.ratio
rows.append(row)
for item in node.children:
stripTree(item, rows)
def getChildStat(node, hs_distribution_dict, level):
executing = 0
pledged = 0
delta = 0
queued = 0
ratio = 0
if node.name in hs_distribution_dict:
executing = hs_distribution_dict[node.name]['executing']
pledged = hs_distribution_dict[node.name]['pledged']
delta = hs_distribution_dict[node.name]['executing'] - hs_distribution_dict[node.name]['pledged']
queued = hs_distribution_dict[node.name]['queued']
else:
for item in node.children:
getChildStat(item, hs_distribution_dict, level+1)
executing += item.executing
pledged += item.pledged
delta += item.delta
queued += item.queued
#ratio = item.ratio if item.ratio!=None else 0
node.executing = executing
node.pledged = pledged
node.delta = delta
node.queued = queued
node.level = level
if (pledged != 0):
ratio = executing / pledged *100
else:
ratio = None
node.ratio = ratio
|
Foorth/panda-bigmon-core
|
core/views.py
|
Python
|
apache-2.0
| 437,120
|
[
"VisIt"
] |
aa6d6396c8b039ab36e3a816af2f8b62df9e1cef07ddd50a1320bd9e36dd40f8
|
# backprop.py
# to implement backprob for the simple features
# c.f. https://github.com/mnielsen/neural-networks-and-deep-learning/blob/master/code/network.py
"""
from armor.learning import backprop as bp
reload(bp)
NN = bp.Network()
NN()
"""
########################
# imports
import numpy as np
########################
# sample data
X = np.random.random(100)
Y = np.random.random(100)
Z = np.random.random(100)
W = (X+Y-Z<0.1)
xyzw = np.random.random(4)
sampleData = [X, Y, Z]
########################
# functions
def sigmoid(x):
return 1. / (1+np.exp(-x))
def neuron(weights, data, activationFunction=sigmoid):
f = activationFunction
weights = np.array(weights)
return f((weights*data).sum())
########################
# classes
class Network:
def __init__(self, shape=(3,4,4,1)):
N = len(shape)
self.layers = []
self.weights= []
for s in shape:
self.layers.append(np.zeros(s))
for i in range(N-1):
self.weights.append(np.random.random((shape[i], shape[i+1])))
def __call__(self, key=None):
if key:
returnValue = getattr(self, key)
else:
returnValue = {'layers' : self.layers,
'weights' : self.weights,
}
return returnValue
def randomise(self):
N = len(self.layers)
for i in range(N-1):
self.weights[i] = np.random.random(self.weights[i].shape)
def forwardProp(self, data=xyzw, verbose=False):
n = len(self.layers[0])
inData = data[:n] #single datum
outData = data[n:]
self.layers[0] = inData
for i in range(1,len(self.layers)):
data = self.layers[i-1]
weights = self.weights[i-1]
for j in range(len(self.layers[i])):
self.layers[i][j] = neuron(weights=weights[:,j], data=data)
outcome = self.layers[-1]
err = np.array(outData) - outData
return {'outcome' : outcome,
'err' : err,
}
#mapReduce, blablabla
def backProp(self):
pass
def train(self, data):
pass
def validate(self, data):
pass
def classify(self, data):
pass
def vectorise(self):
# doesn't quite work because the first argument is fixed as self
self.forwardProp = np.vectorize(self.forwardProp)
self.backProp = np.vectorize(self.backProp)
|
yaukwankiu/armor
|
learning/backprop.py
|
Python
|
cc0-1.0
| 2,556
|
[
"NEURON"
] |
abf0a23638725e9dddaf0c91bdd3bfce45bb526f67a856ffae969d57893fe8c5
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Short range part of ECP under PBC
'''
import ctypes
import copy
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.gto import AS_ECPBAS_OFFSET, AS_NECPBAS
from pyscf.pbc.df import incore
from pyscf.pbc import gto as pgto
from mpi4pyscf.lib import logger
from mpi4pyscf.tools import mpi
comm = mpi.comm
rank = mpi.rank
@mpi.parallel_call
def ecp_int(cell, kpts=None):
if rank == 0:
comm.bcast(cell.dumps())
else:
cell = pgto.loads(comm.bcast(None))
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
ecpcell = gto.Mole()
ecpcell._atm = cell._atm
# append a fictitious s function to mimic the auxiliary index in pbc.incore.
# ptr2last_env_idx to force PBCnr3c_fill_* function to copy the entire "env"
ptr2last_env_idx = len(cell._env) - 1
ecpbas = numpy.vstack([[0, 0, 1, 1, 0, ptr2last_env_idx, 0, 0],
cell._ecpbas]).astype(numpy.int32)
ecpcell._bas = ecpbas
ecpcell._env = cell._env
# In pbc.incore _ecpbas is appended to two sets of cell._bas and the
# fictitious s function.
cell._env[AS_ECPBAS_OFFSET] = cell.nbas * 2 + 1
cell._env[AS_NECPBAS] = len(cell._ecpbas)
kptij_lst = numpy.hstack((kpts_lst,kpts_lst)).reshape(-1,2,3)
nkpts = len(kpts_lst)
if abs(kpts_lst).sum() < 1e-9: # gamma_point
dtype = numpy.double
else:
dtype = numpy.complex128
ao_loc = cell.ao_loc_nr()
nao = ao_loc[-1]
mat = numpy.zeros((nkpts,nao,nao), dtype=dtype)
intor = cell._add_suffix('ECPscalar')
int3c = incore.wrap_int3c(cell, ecpcell, intor, kptij_lst=kptij_lst)
# shls_slice of auxiliary index (0,1) corresponds to the fictitious s function
tasks = [(i, i+1, j, j+1, 0, 1) # shls_slice
for i in range(cell.nbas) for j in range(i+1)]
for shls_slice in mpi.work_stealing_partition(tasks):
i0 = ao_loc[shls_slice[0]]
i1 = ao_loc[shls_slice[1]]
j0 = ao_loc[shls_slice[2]]
j1 = ao_loc[shls_slice[3]]
buf = numpy.empty((nkpts,i1-i0,j1-j0), dtype=dtype)
mat[:,i0:i1,j0:j1] = int3c(shls_slice, buf)
buf = mpi.reduce(mat)
if rank == 0:
mat = []
for k, kpt in enumerate(kpts_lst):
v = lib.unpack_tril(lib.pack_tril(buf[k]), lib.HERMITIAN)
if abs(kpt).sum() < 1e-9: # gamma_point:
v = v.real
mat.append(v)
if kpts is None or numpy.shape(kpts) == (3,):
mat = mat[0]
return mat
|
sunqm/mpi4pyscf
|
mpi4pyscf/pbc/gto/ecp.py
|
Python
|
gpl-3.0
| 2,650
|
[
"PySCF"
] |
366943d3b674dfd89973f28d46f8bd9fc641d1f8929e6531e5b27e36dff37347
|
#!/usr/bin/env python
# derive motifs from transcription factor binding data
import sys
import time
import optparse
import general
import numpy
import fasta
import metrn
import modencode
import bed
import os
import copy
import pdb
import re
import network
from Bio import Motif
print "Command:", " ".join(sys.argv)
print "Timestamp:", time.asctime(time.localtime())
""" define classes and functions of internal use """
""" define a function to build a fasta file from a bed file... """
def fastaGenerator(inputfile, coordfile, fastafile, genomefile, window=0, top="OFF"):
# load peak dictionary:
peak_dict, signal_dict = dict(), dict()
inlines = open(inputfile).read().split("\n")
for inline in inlines:
if not inline == "":
chrm, start, end, peak, score, strand, signal, pvalue, qvalue, point = inline.strip().split("\t")
middle = int(start) + (int(end)-int(start))/2
point = int(start) + int(point)
if window != 0:
start, end = point-window, point+window
peak_dict[peak], signal_dict[peak] = [chrm, start, end, peak, score, strand, signal], float(signal)
# generate target peaks file:
peaks = general.valuesort(signal_dict)
peaks.reverse()
c_output = open(coordfile, "w")
if top != "OFF":
peaks = peaks[:int(top)]
for peak in peaks:
print >>c_output, "\t".join(map(str, peak_dict[peak]))
c_output.close()
# create fasta file:
command = "fastaFromBed -fi " + genomefile + " -bed " + coordfile + " -fo " + fastafile + " -name"
os.system(command)
""" define a function to build a BioPython Motif object from input motif lines... """
def motifGenerator(motiflines, format="ACGT"):
motif = list()
for motifline in motiflines:
index = 0
position = dict()
for score in motifline.strip().replace(" "," ").replace(" "," ").split(" "):
if score != " " and score != "":
position[format[index]] = float(score)
index += 1
motif.append(position)
return motif
""" define a function to export a TRANSFAC motif file from input motif lines... """
def transfacGenerator(motiflines, name, species="", format="ACGT", zmax=2, start=0, mode="standard"):
transfaclines = list()
transfaclines.append("ID " + name)
transfaclines.append("BF " + species)
transfaclines.append("P0\t" + "\t".join(format))
position = 1
consensus = ""
# filter motif lines to grab just score lines...
scorelines, record = list(), False
if mode == "standard":
for motifline in motiflines:
if record:
scorelines.append(motifline)
if "letter-probability matrix" in motifline:
record = True
else:
scorelines = motiflines
# record scores into TRANSFAC format:
for scoreline in scorelines:
if general.clean(scoreline) != list() and not "URL" in scoreline:
index = 0
scores = list()
conbase = "N"
for score in scoreline.strip().replace(" "," ").replace(" "," ").split(" ")[start:]:
if score != " " and score != "":
scores.append(float(score))
if float(score) > 0.5:
conbase = format[index]
index += 1
transfaclines.append(str(position).zfill(zmax) + "\t" + "\t".join(map(str, scores)) + "\t" + conbase)
consensus += conbase
position += 1
transfaclines.append("//")
return transfaclines
def motifScanner(scanDict, orthologDict, cutoff=0.25, size=3, exclusions="OFF"):
scannedFactors, matchedFactors, successFactors = 0, 0, 0
successDict, matchDict = dict(), dict()
for factor in scanDict:
if factor.lower() in orthologDict:
processFactor = True
if exclusions != "OFF":
for exclusion in exclusions.split(","):
if exclusion in factor or exclusion.lower() in factor.lower():
processFactor = False
if processFactor:
matchScores = list()
for ortholog in orthologDict[factor.lower()]["hs"]:
for motif in scanDict[factor]:
if ortholog.lower()[:size] == motif.lower()[:size]:
processMotif = True
if exclusions != "OFF":
for exclusion in exclusions.split(","):
if exclusion in motif or exclusion.lower() in motif.lower():
processMotif = False
if processMotif:
if not factor in matchDict:
matchDict[factor] = dict()
if not ortholog in matchDict[factor]:
matchDict[factor][ortholog] = dict()
matchDict[factor][ortholog][motif] = float(scanDict[factor][motif])
matchScores.append(float(scanDict[factor][motif]))
if float(scanDict[factor][motif]) > cutoff:
if not factor in successDict:
successDict[factor] = dict()
if not ortholog in successDict[factor]:
successDict[factor][ortholog] = dict()
successDict[factor][ortholog][motif] = float(scanDict[factor][motif])
# tally results:
if factor in matchDict:
matchedFactors += 1
if factor in successDict:
successFactors += 1
scannedFactors += 1
# return results:
return successDict, matchDict, scannedFactors, matchedFactors, successFactors
def main():
parser = optparse.OptionParser()
parser.add_option("--path", action = "store", type = "string", dest = "path", help = "Path from script to files")
parser.add_option("--mode", action = "store", type = "string", dest = "mode", help = "Analysis mode: Determines operations to execute.")
parser.add_option("--infile", action = "store", type = "string", dest = "infile", help = "Input file for analysis...", default="OFF")
parser.add_option("--organism", action = "store", type = "string", dest = "organism", help = "Target organism for operations...", default="OFF")
parser.add_option("--peaks", action = "store", type = "string", dest = "peaks", help = "Peaks to be used for analysis...")
parser.add_option("--name", action = "store", type = "string", dest = "name", help = "Motif database name...")
parser.add_option("--max", action = "store", type = "string", dest = "max", help = "Maximum number of peaks to consider", default="OFF")
parser.add_option("--window", action = "store", type = "string", dest = "window", help = "Window surrounding peak within which the search will be performed", default="OFF")
parser.add_option("--include", action = "store", type = "string", dest = "include", help = "Targets/regions to include!", default="OFF")
parser.add_option("--exclude", action = "store", type = "string", dest = "exclude", help = "Targets/regions to exclude!", default="OFF")
parser.add_option("--repeatMask", action = "store", type = "string", dest = "repeatMask", help = "Should repeat-masked genome be used?", default="OFF")
parser.add_option("--nuclear", action = "store", type = "string", dest = "nuclear", help = "Peaks are only nuclear?", default="ON")
parser.add_option("--target", action = "store", type = "string", dest = "target", help = "Define analysis targets", default="OFF")
parser.add_option("--parameters", action = "store", type = "string", dest = "parameters", help = "MEME parameters", default="OFF")
parser.add_option("--background", action = "store", type = "string", dest = "background", help = "Base background frequency", default="OFF")
parser.add_option("--sequence", action = "store", type = "string", dest = "sequence", help = "Sequence to search for...", default="OFF")
parser.add_option("--threads", action = "store", type = "string", dest = "threads", help = "Multiprocessing threads", default="1")
parser.add_option("--chunks", action = "store", type = "int", dest = "chunks", help = "", default=100)
parser.add_option("--qsub", action = "store", type = "string", dest = "qsub", help = "qsub configuration header", default="OFF")
parser.add_option("--server", action = "store", type = "string", dest = "server", help = "Are we on the server?", default="OFF")
parser.add_option("--job", action = "store", type = "string", dest = "job", help = "Job name for cluster", default="OFF")
(option, args) = parser.parse_args()
# import paths:
if option.server == "OFF":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_path.txt")
elif option.server == "ON":
path_dict = modencode.configBuild(option.path + "/input/" + "configure_server.txt")
# specify input and output paths:
inpath = path_dict["input"]
extraspath = path_dict["extras"]
pythonpath = path_dict["python"]
scriptspath = path_dict["scripts"]
downloadpath = path_dict["download"]
fastqpath = path_dict["fastq"]
bowtiepath = path_dict["bowtie"]
bwapath = path_dict["bwa"]
macspath = path_dict["macs"]
memepath = path_dict["meme"]
idrpath = path_dict["idr"]
igvpath = path_dict["igv"]
testpath = path_dict["test"]
processingpath = path_dict["processing"]
annotationspath = path_dict["annotations"]
peakspath = path_dict["peaks"]
gopath = path_dict["go"]
hotpath = path_dict["hot"]
qsubpath = path_dict["qsub"]
# standardize paths for analysis:
if not option.mode in ["database", "measures"]:
peakspath = peakspath + option.peaks + "/"
alignerpath = bwapath
indexpath = alignerpath + "index/"
alignmentpath = alignerpath + "alignment/"
qcfilterpath = alignerpath + "qcfilter/"
qcmergepath = alignerpath + "qcmerge/"
# import configuration dictionaries:
source_dict = modencode.configBuild(inpath + "configure_source.txt")
method_dict = modencode.configBuild(inpath + "configure_method.txt")
context_dict = modencode.configBuild(inpath + "configure_context.txt")
# define organism parameters:
if option.organism == "hs" or option.organism == "h.sapiens":
organismTag = "hs"
#organismIGV = "ce6"
elif option.organism == "mm" or option.organism == "m.musculus":
organismTag = "mm"
#organismIGV = "ce6"
elif option.organism == "ce" or option.organism == "c.elegans":
organismTag = "ce"
#organismIGV = "ce6"
elif option.organism == "dm" or option.organism == "d.melanogaster":
organismTag = "dm"
#organismIGV = "dm5"
# define organisms:
organismTags = ["hs","mm","ce","dm"]
# specify genome size file:
if option.nuclear == "ON":
chromosomes = metrn.chromosomes[organismTag]["nuclear"]
genome_file = option.path + "/input/" + metrn.reference[organismTag]["genome"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["nuclear_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
chromosome_path = option.path + "/fasta/" + metrn.reference[organismTag]["chromosome_path"]
else:
chromosomes = metrn.chromosomes[organismTag]["complete"]
genome_file = option.path + "/input/" + metrn.reference[organismTag]["genome"]
genome_size_file = option.path + "/input/" + metrn.reference[organismTag]["complete_sizes"]
genome_size_dict = general.build_config(genome_size_file, mode="single", separator="\t", spaceReplace=True)
chromosome_path = option.path + "/fasta/" + metrn.reference[organismTag]["chromosome_path"]
# define window and max peaks flags:
if not option.mode in ["database", "measures"]:
windowFlag = "w" + option.window
maxpeaksFlag = "m" + option.max
# prepare output folders:
if not option.mode in ["database", "measures"]:
coordpath = memepath + option.peaks + "/" + windowFlag + "/" + maxpeaksFlag + "/coord/"
fastapath = memepath + option.peaks + "/" + windowFlag + "/" + maxpeaksFlag + "/fasta/"
outputpath = memepath + option.peaks + "/" + windowFlag + "/" + maxpeaksFlag + "/output/"
general.pathGenerator(coordpath)
general.pathGenerator(fastapath)
general.pathGenerator(outputpath)
# prepare database-dependent folders:
if option.mode in ["graphing","pairwise","scanning","ortholog"]:
graphingpath = memepath + option.peaks + "/" + windowFlag + "/" + maxpeaksFlag + "/graphing/" + option.name + "/"
pairwisepath = memepath + option.peaks + "/" + windowFlag + "/" + maxpeaksFlag + "/pairwise/" + option.name + "/"
scanningpath = memepath + option.peaks + "/" + windowFlag + "/" + maxpeaksFlag + "/scanning/" + option.name + "/"
orthologpath = memepath + option.peaks + "/" + windowFlag + "/" + maxpeaksFlag + "/ortholog/" + option.name + "/"
general.pathGenerator(graphingpath)
general.pathGenerator(pairwisepath)
general.pathGenerator(scanningpath)
general.pathGenerator(orthologpath)
# frequency measures mode:
if option.mode == "measures":
# define inputs and outputs:
genomefile = genome_file
coordfile = annotationspath + option.infile
tempsfile = annotationspath + "memecucu_" + option.infile + "_sequence.coord"
fastafile = annotationspath + "memecucu_" + option.infile + "_sequence.fasta"
print
if option.exclude != "OFF":
print "Filtering:", option.exclude
coordlines = open(coordfile).readlines()
coordfile = str(tempsfile)
c_output = open(tempsfile, "w")
for coordline in coordlines:
process = True
coorditems = coordline.strip().split("\t")
for exclusion in option.exclude.split(","):
if exclusion in coorditems:
process = False
break
if process:
print >>c_output, "\t".join(coorditems)
c_output.close()
print "Building region sequences..."
command = "fastaFromBed -fi " + genomefile + " -bed " + coordfile + " -fo " + fastafile + " -name"
os.system(command)
print "Building nucleotide frequencies..."
frequencyDict = {"A":0, "C":0, "G":0, "T":0}
fastaDict = fasta.buildfile(fastafile)
for sequence in fastaDict:
for nucleotide in sorted(frequencyDict.keys()):
frequencyDict[nucleotide] += fastaDict[sequence].count(nucleotide)
for nucleotide in sorted(frequencyDict.keys()):
print nucleotide, float(frequencyDict[nucleotide])/sum(frequencyDict.values())
print
# remove temporary files:
command = "rm -rf " + fastafile
os.system(command)
if option.exclude != "OFF":
command = "rm -rf " + tempsfile
os.system(command)
# sequence generation mode:
elif option.mode == "sequence":
# specify repeat mask flag (not implemented):
if option.repeatMask == "ON":
rm_handle = "T"
elif option.repeatMask == "OFF":
rm_handle = "F"
# define max peaks to evaluate and window around peak:
max_peaks = option.max
window = 0
if option.max != "OFF":
max_peaks = int(option.max)
if option.window != "OFF":
window = int(option.window)
# load peak files:
peakfiles = os.listdir(peakspath)
# prepare a temporary background exclusion bed file:
if option.exclude != "OFF":
m_infile = memepath + option.exclude
m_tmpfile = memepath + option.exclude + ".tmp"
command = 'grep -v "feature" ' + m_infile + ' > ' + m_tmpfile
os.system(command)
# extract sequences from peak calls:
master_dict, factors, k = dict(), list(), 0
print
print "Preparing target sequence files (FASTA):"
for peakfile in peakfiles:
if "_peaks.bed":
k += 1
print k, peakfile
dataset = peakfile.replace("_peaks.bed","")
organism, strain, factor, context, institute, method = metrn.labelComponents(dataset)
inputfile = peakspath + peakfile
coordfile = coordpath + peakfile
fastafile = fastapath + "memecucu_w" + option.window + "_m" + option.max + "_rm" + rm_handle + "_" + dataset + ".fasta"
# exclude genomic regions if necessary:
#if option.exclude != "OFF":
#
# p_infile = temppath
# p_outfile = macspath + "filtered/" + tempfile.replace(".bed","") + "_" + option.exclude.replace(".bed","") + "_filtered.bed"
#
# command = "intersectBed -v -a " + p_infile + " -b " + m_tmpfile + " > " + p_outfile
# os.system(command)
#
# tempfile = tempfile.replace(".bed","") + "_" + option.exclude.replace(".bed","") + "_filtered.bed"
# temppath = macspath + "filtered/" + tempfile
# generate FASTA file:
fastaGenerator(inputfile, coordfile, fastafile, genomefile=genome_file, window=window, top=option.max)
print
# motif discovery mode:
elif option.mode == "discover":
# launch motif analysis:
print
print "Executing MEME analysis:"
fastafiles = os.listdir(fastapath)
for fastafile in fastafiles:
if "memecucu" in fastafile and ".fasta" in fastafile:
command = "meme " + fastapath + fastafile + " -oc " + outputpath + fastafile.replace(".fasta","") + " " + option.parameters
print command
os.system(command)
print
# directed search mode:
elif option.mode == "fraction":
# launch motif analysis:
print
print "Searching for motif matches:"
searchDict = dict()
fastafiles = os.listdir(fastapath)
for fastafile in fastafiles:
if "memecucu" in fastafile and ".fasta" in fastafile:
k, i = 0, 0
sequenceDict = fasta.buildfile(fastapath + fastafile, options=["take.first"], append="")
for feature in sequenceDict:
if option.sequence in sequenceDict[feature]:
i += 1
k += 1
searchDict[fastafile] = float(i)/k
searchHits = general.valuesort(searchDict)
searchHits.reverse()
for searchHit in searchHits[:10]:
print searchHit, round(searchDict[searchHit], 2)
print
# motif database mode:
elif option.mode == "database":
# output files (standard, TRANSFAC, MEME):
b_infile = extraspath + option.background
s_utfile = extraspath + option.infile.replace(".txt",".stn")
t_utfile = extraspath + option.infile.replace(".txt",".dat")
m_utfile = extraspath + option.infile.replace(".txt",".meme")
s_output = open(s_utfile, "w")
t_output = open(t_utfile, "w")
# define header:
if option.parameters != "OFF":
header = True
for line in option.parameters.split(";"):
print >>s_output, line
# export TRANSFAC headers:
transfacLines = ["VV memeCucu:database transfac file", "XX"]
print >>t_output, "\n".join(transfacLines)
print
print "Loading input motifs..."
k = 0
motifs = open(extraspath + option.infile).read().split(">")
for motif in motifs:
lines = motif.replace("\r","\n").split("\n")
lines = general.clean(lines)
if len(lines) > 5:
title = lines.pop(0)
names = "MOTIF XXX"
names = names.replace("XXX", title)
infos = "letter-probability matrix: alength= ALENGTH w= WWW nsites= NSITES E= EVALUE "
infos = infos.replace("ALENGTH", "4")
infos = infos.replace("WWW", str(len(lines)))
infos = infos.replace("NSITES", str(option.max))
infos = infos.replace("EVALUE", "0")
# export standard format file:
print >>s_output, names
print >>s_output, infos
motiflines = list()
for line in lines:
motifline = " " + " ".join(line.split(" ")[1:]) + " "
motiflines.append(motifline)
print >>s_output, motifline
print >>s_output, ""
# build TRANSFAC format:
transfacLines = transfacGenerator(motiflines, name=title, mode="other")
print >>t_output, "\n".join(transfacLines)
k += 1
print "Processed", k, "motifs."
# close output file:
s_output.close()
t_output.close()
# convert TRANSFAC file to MEME format:
if option.background == "OFF":
command = "transfac2meme -logodds" + t_utfile + " > " + m_utfile
else:
command = "transfac2meme -logodds -bg " + b_infile + " " + t_utfile + " > " + m_utfile
os.system(command)
print
# motif graphing mode:
elif option.mode == "graphing":
print
print "Graphing motif database..."
titles = list()
motifs = open(extraspath + option.infile).read().split("MOTIF")
header = motifs.pop(0)
for motif in motifs:
inlines = motif.split("\n")
motif = inlines.pop(0)
title = motif.strip().split(" ")[0]
if title in titles:
print "Error:", title, "already processed!"
pdb.set_trace()
# select motifs of interest to search for matches:
if option.parameters in motif or option.parameters == "OFF":
titles.append(title)
# build TRANSFAC format:
transfacFile = graphingpath + title.replace("/","-") + ".dat"
transfacLines = ["VV memeCucu:" + title, "XX"]
transfacLines.extend(transfacGenerator(inlines, name=motif))
t_output = open(transfacFile, "w")
print >>t_output, "\n".join(transfacLines)
t_output.close()
# draw motif logo:
graphingFile = graphingpath + title.replace("/","-") + ".eps"
command = "weblogo -f TRANSFACFILE -D transfac -o GRAPHINGFILE --errorbars NO --size medium --color-scheme classic --units bits --sequence-type dna --composition none --aspect-ratio 4 --show-xaxis NO --show-yaxis NO --fineprint ''"
os.system(command.replace("TRANSFACFILE", transfacFile).replace("GRAPHINGFILE",graphingFile))
reversedFile = graphingpath + title.replace("/","-") + "-revComp.eps"
command = "weblogo -f TRANSFACFILE -D transfac -o GRAPHINGFILE --errorbars NO --size medium --color-scheme classic --units bits --sequence-type dna --composition none --aspect-ratio 4 --show-xaxis NO --show-yaxis NO --fineprint '' --reverse --complement"
os.system(command.replace("TRANSFACFILE", transfacFile).replace("GRAPHINGFILE",reversedFile))
# remove TRANSFAC file:
command = "rm -rf " + transfacFile
os.system(command)
print "Motifs graphed:", len(titles)
print
# motif comparison mode:
elif option.mode == "pairwise":
print
print "Loading motif database..."
titles = list()
motifs = open(extraspath + option.infile).read().split("MOTIF")
header = motifs.pop(0)
for motif in motifs:
inlines = motif.split("\n")
motif = inlines.pop(0)
title = motif.strip().split(" ")[0]
if title in titles:
print "Error:", title, "already processed!"
pdb.set_trace()
# select motifs of interest to search for matches:
if option.parameters in motif or option.parameters == "OFF":
# initialize output path for target motif:
targetpath = pairwisepath + title + "/"
general.pathGenerator(targetpath)
# build motif (meme) file for target motif:
t_utfile = targetpath + "motif.meme"
t_output = open(t_utfile, "w")
print >>t_output, header
print >>t_output, "MOTIF " + title
for inline in inlines:
print >>t_output, inline.strip()
t_output.close()
# execute motif comparison:
print "Processing:", title
command = "tomtom -o OUTPUT TARGETMEME DATABASEMEME"
command = command.replace("OUTPUT", targetpath + "tomtom")
command = command.replace("TARGETMEME", t_utfile)
command = command.replace("DATABASEMEME", extraspath + option.infile)
os.system(command)
print
print
# motif scanning mode:
elif option.mode == "scanning":
# specify repeat mask flag (not implemented):
if option.repeatMask == "ON":
rm_handle = "T"
elif option.repeatMask == "OFF":
rm_handle = "F"
# define max peaks to evaluate and window around peak:
max_peaks = option.max
window = 0
if option.max != "OFF":
max_peaks = int(option.max)
if option.window != "OFF":
window = int(option.window)
# define target flag:
if option.target == "OFF":
targetFlag = "factor"
elif option.target == "ALL":
targetFlag = "motifs"
else:
targetFlag = option.target
# load peak files:
peakfiles = os.listdir(peakspath)
# initate scanning report file:
f_utfile = scanningpath + "memecucu_" + option.mode + "_" + targetFlag + "_w" + option.window + "_m" + option.max + "_rm" + rm_handle + ".txt"
s_utfile = scanningpath + "memecucu_" + option.mode + "_" + targetFlag + "_w" + option.window + "_m" + option.max + "_rm" + rm_handle + ".sum"
f_output = open(f_utfile, "w")
s_output = open(s_utfile, "w")
print >>f_output, "\t".join(["factor","motif","fraction"])
print >>s_output, "\t".join(["factor","motif","fraction"])
print
print "Loading motif database..."
results = dict()
titles = list()
motifs = open(extraspath + option.infile).read().split("MOTIF")
header = motifs.pop(0)
for motif in motifs:
inlines = motif.split("\n")
motif = inlines.pop(0)
title = motif.strip().split(" ")[0]
if title in titles:
print "Error:", title, "already processed!"
pdb.set_trace()
# select motifs of interest to search for matches:
if (option.parameters in motif and option.target != "OFF") or (option.parameters in motif and option.target =="OFF" and "disc" in title) or (option.parameters == "OFF"):
print "Processing:", title
# initialize output path for target motif:
targetpath = scanningpath + title + "/"
general.pathGenerator(targetpath)
# build TRANSFAC format:
transfacFile = targetpath + "motif.dat"
transfacLines = ["VV memeCucu:scanning transfac file", "XX"]
transfacLines.extend(transfacGenerator(inlines, name=motif))
t_output = open(transfacFile, "w")
print >>t_output, "\n".join(transfacLines)
t_output.close()
# convert TRANSFAC file to MEME format:
memeFile = targetpath + "motif.meme"
if option.background == "OFF":
command = "transfac2meme -logodds " + transfacFile + " > " + memeFile
else:
command = "transfac2meme -logodds -bg " + extraspath + option.background + " " + transfacFile + " > " + memeFile
os.system(command)
# find peak files for factor:
matchHit, matchDict, matchFiles = "OFF", dict(), list()
for peakfile in peakfiles:
matchFlag = False
organism, strain, factor, context, institute, method = metrn.labelComponents(peakfile)
if option.target == "OFF" and factor.lower() in motif.replace(" ","_").split("_"):
matchFlag = True
elif option.target != "OFF" and factor in option.target.split(","):
matchFlag = True
elif option.target == "ALL":
matchFlag = True
if matchFlag:
matchFiles.append(peakfile)
matchHit = str(factor)
if not matchHit in matchDict:
matchDict[matchHit] = list()
matchDict[matchHit].append(peakfile)
# check that matches have been found:
if matchHit == "OFF":
print "No matches found:", motif
print
pdb.set_trace()
# process matches:
if matchDict != dict() and (matchHit.lower() in title or option.target != "OFF"):
for matchHit in matchDict:
if not matchHit in results:
results[matchHit] = dict()
fractions = list()
for matchFile in matchDict[matchHit]:
print "Scanning:", matchHit, "(" + matchFile + ")"
# specify inputs and outputs:
dataset = matchFile.replace("_peaks.bed", "")
inputfile = peakspath + matchFile
coordfile = coordpath + matchFile
fastafile = fastapath + "memecucu_w" + option.window + "_m" + option.max + "_rm" + rm_handle + "_" + dataset + ".fasta"
# generate FASTA file:
fastaGenerator(inputfile, coordfile, fastafile, genomefile=genome_file, window=window, top=option.max)
# determine number of input sequences:
sequenceCount = len(open(coordfile).readlines())
threshold = str(sequenceCount/10)
# detect motif in sequences:
if option.background == "OFF":
command = "mast -ev " + threshold + " " + memeFile + " " + fastafile
else:
command = "mast -bfile " + extraspath + option.background + " -ev " + threshold + " " + memeFile + " " + fastafile
os.system(command)
# collect motif matches:
mastlines = open("mast_out/mast.txt").read().split("SECTION I: HIGH-SCORING SEQUENCES")[1].split("SECTION II")[0].split("-------- ------")[1].split("**************")[0]
mastlines = "".join(mastlines).strip().split("\n")
matches, fraction = len(mastlines), round(float(len(mastlines))/sequenceCount, 2)
fractions.append(fraction)
print "Matches:", matches, "(" + str(100*fraction) + "%)"
print
# remove mast output:
command = "rm -rf mast_out"
os.system(command)
# export summary file per motif:
if fractions != list():
results[matchHit][title] = max(fractions)
print title, matchHit, str(100*max(fractions)) + "%"
print >>f_output, "\t".join(map(str, [matchHit, title, max(fractions)]))
print
# export summary file per factor:
for factor in sorted(results.keys()):
motifs = general.valuesort(results[factor])
motifs.reverse()
print >>s_output, "\t".join(map(str, [factor, motifs[0], results[factor][motifs[0]]]))
# close output files:
f_output.close()
s_output.close()
print
# motif orthology mode:
elif option.mode == "ortholog":
# specify repeat mask flag (not implemented):
if option.repeatMask == "ON":
rm_handle = "T"
elif option.repeatMask == "OFF":
rm_handle = "F"
# define max peaks to evaluate and window around peak:
max_peaks = option.max
window = 0
if option.max != "OFF":
max_peaks = int(option.max)
if option.window != "OFF":
window = int(option.window)
# define target flag:
if option.target == "OFF":
targetFlag = "factor"
elif option.target == "ALL":
targetFlag = "motifs"
else:
targetFlag = option.target
print
print "Loading motif scan results..."
scanfile = scanningpath + "memecucu_scanning_" + targetFlag + "_w" + option.window + "_m" + option.max + "_rm" + rm_handle + ".txt"
bestfile = scanningpath + "memecucu_scanning_" + targetFlag + "_w" + option.window + "_m" + option.max + "_rm" + rm_handle + ".sum"
scanDict = general.build2(scanfile, i="factor", j="motif", x="fraction", mode="matrix")
bestDict = general.build2(bestfile, i="factor", j="motif", x="fraction", mode="matrix")
# define output files:
f_output = open(orthologpath + "memecucu_ortholog_" + targetFlag + "_w" + option.window + "_m" + option.max + "_rm" + rm_handle + ".txt", "w")
c_output = open(orthologpath + "memecucu_ortholog_" + targetFlag + "_w" + option.window + "_m" + option.max + "_rm" + rm_handle + ".cut", "w")
print >>f_output, "\t".join(["factor","ortholog","motif","fraction"])
print >>c_output, "\t".join(["fraction.cutoff","fraction.factors"])
print "Loading orthology tree..."
orthologDict = metrn.orthologMapper(extraspath + option.infile, species=option.organism, targets=option.parameters.split(","))
print "Scanning orthologous factors..."
for cutoff in general.drange(0.2, 0.9, 0.05):
successDict, matchDict, scannedFactors, matchedFactors, successFactors = motifScanner(scanDict, orthologDict, cutoff=cutoff, size=3, exclusions=option.exclude)
print >>c_output, "\t".join(map(str, [cutoff, round(float(successFactors)/matchedFactors, 3)]))
# find highest overlap ortholog and motif (per factor):
rankDict, infoDict = dict(), dict()
for factor in matchDict:
highScore, highMotif, highOrtholog = 0, False, False
for ortholog in matchDict[factor]:
for motif in matchDict[factor][ortholog]:
if matchDict[factor][ortholog][motif] > highScore:
highScore = float(matchDict[factor][ortholog][motif])
highOrtholog = str(ortholog)
highMotif = str(motif)
if highOrtholog:
rankDict[factor] = highScore
infoDict[factor] = [highOrtholog, highMotif, highScore]
# export highest overlap ortholog and motif (per factor):
rankFactors = general.valuesort(rankDict)
rankFactors.reverse()
for factor in rankFactors:
print >>f_output, "\t".join(map(str, [factor] + infoDict[factor]))
# close output files:
f_output.close()
c_output.close()
print
if __name__ == "__main__":
main()
print "Completed:", time.asctime(time.localtime())
#weblogo -f HMBOX1_DBD.dat -D transfac -o test.eps --errorbars NO --size medium --color-scheme classic
|
claraya/meTRN
|
python/memeCucu.py
|
Python
|
mit
| 31,336
|
[
"BWA",
"Biopython",
"Bowtie"
] |
443e19c5c96b7937f981198c484ac5c6b8d3e7aa8a570d8498605a83071ec208
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2015 Stanford University and the Authors
#
# Authors: Jason Swails
# Contributors:
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this module originate from the ParmEd program, copyright (c) 2014
# Jason Swails, which is also distributed under the GNU Lesser General Public
# License
#
# Other portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
"""Load an md.Topology from CHARMM/XPLOR PSF files
"""
# Written by Jason Swails <jason.swails@gmail.com> 9/8/2014
# This code was mostly stolen and stripped down from ParmEd
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
from mdtraj.core import topology, element as elem
from mdtraj.formats import pdb
from mdtraj.utils.unit import unit_definitions as u
__all__ = ['load_psf']
##############################################################################
# Functions
##############################################################################
class PSFError(Exception):
""" Raised upon problems parsing a PSF file """
pass
class _PSFEOF(Exception):
""" Raised when EOF is hit on the parsed PSF """
pass
def _convert(string, type, message):
"""Converts a string to the desired data type, making sure to raise PSFError
with the given message in the case of a failure.
Parameters
----------
string : str
String to convert
type : simple data type
Either int, float, or str
message : str
Message to assign to the PSFError if the conversion fails
Returns
-------
The converted string to the desired datatype
"""
try:
return type(string)
except ValueError:
raise PSFError('Could not convert %s [%s]' % (message, string))
def _parse_psf_section(psf):
"""Parses a section of the PSF file, returning the data as a list of integer
pointers
Parameters
----------
psf : open file object
The open PSF file whose pointer is at the beginning of the section to be
parsed
Returns
-------
(title, pointers, data)
title : str
The label of the PSF section we are parsing
pointers : (int/tuple of ints)
If one pointer is set, pointers is simply the integer that is the value
of that pointer. Otherwise it is a tuple with every pointer value
defined in the first line
data : list of integers
A list of all data in the parsed section as integers
Raises
------
PSFError upon any parsing errors and _PSFEOF on end-of-file
"""
line = psf.readline()
while not line.strip():
if not line:
raise _PSFEOF('Unexpected EOF in PSF file')
else:
line = psf.readline()
if '!' in line:
words = line[:line.index('!')].split()
title = line[line.index('!')+1:].strip().upper()
# Strip out the description
if ':' in title:
title = title[:title.index(':')]
else:
raise PSFError('Could not determine section title')
if len(words) == 1:
pointers = _convert(words[0], int, 'pointer')
else:
pointers = tuple([_convert(w, int, 'pointer') for w in words])
line = psf.readline().strip()
if not line and title.startswith('NNB'):
# This will correctly handle the NNB section (which has a spurious blank
# line)
line = psf.readline().strip()
data = []
if title == 'NATOM' or title == 'NTITLE':
# Store these two sections as strings (ATOM section we will parse
# later). The rest of the sections are integer pointers
while line:
data.append(line)
line = psf.readline().strip()
else:
while line:
words = line.split()
data.extend([_convert(w, int, 'PSF data') for w in words])
line = psf.readline().strip()
return title, pointers, data
def load_psf(fname):
"""Load a CHARMM or XPLOR PSF file from disk
Parameters
----------
fname : str
Path to the PSF file on disk
Returns
-------
top : md.Topology
The resulting topology as an md.Topology object
Notes
-----
Only the bond and atom sections are read in, and all atoms are added to the
same chain in the topology
Raises
------
PSFError if any parsing errors occur
Examples
--------
>>> topology = md.load_psf('mysystem.psf')
>>> # or
>>> trajectory = md.load('trajectory.dcd', top='system.psf')
"""
top = topology.Topology()
with open(fname, 'r') as f:
line = f.readline()
if not line.startswith('PSF'):
raise PSFError('Unrecognized PSF file.')
# Store all of the sections and store them in a dict
f.readline()
psfsections = dict()
while True:
try:
sec, ptr, data = _parse_psf_section(f)
except _PSFEOF:
break
psfsections[sec] = (ptr, data)
# We only have to parse up to the NBOND section
if sec == 'NBOND': break
prev_residue = (None, None, None)
pdb.PDBTrajectoryFile._loadNameReplacementTables()
natom = _convert(psfsections['NATOM'][0], int, 'natom')
last_chain = None
for i in range(natom):
words = psfsections['NATOM'][1][i].split()
atid = _convert(words[0], int, 'atom index')
if atid != i + 1:
raise PSFError('Nonsequential atom indices detected!')
segid = words[1]
resid = _convert(words[2], int, 'residue number')
rname = words[3]
name = words[4]
# attype = words[5]
# charge = _convert(words[6], float, 'partial atomic charge')
mass = _convert(words[7], float, 'atomic mass')
if last_chain != segid:
c = top.add_chain()
last_chain = segid
curr_residue = (resid, rname, segid)
if prev_residue != curr_residue:
prev_residue = curr_residue
try:
rname = pdb.PDBTrajectoryFile._residueNameReplacements[rname]
except KeyError:
pass
r = top.add_residue(rname, c, resid, segid)
try:
name = pdb.PDBTrajectoryFile._atomNameReplacements[rname][name]
except KeyError:
pass
# Try to guess the element from the atom name for some of the common
# ions using the names that CHARMM assigns to ions. If it's not one of
# these 'weird' ion names, look up the element by mass. If the mass is
# 0, assume a lone pair
upper = name.upper()
if upper.startswith('CLA'):
element = elem.chlorine
elif upper.startswith('SOD'):
element = elem.sodium
elif upper.startswith('POT'):
element = elem.potassium
elif upper == 'CAL':
element = elem.calcium
elif mass == 0:
element = elem.virtual
else:
element = elem.Element.getByMass(mass*u.dalton)
top.add_atom(name, element, r)
# Add bonds to the topology
atoms = list(top.atoms)
bond_data = psfsections['NBOND'][1]
nbond = _convert(psfsections['NBOND'][0], int, 'number of bonds')
if len(bond_data) != nbond * 2:
raise PSFError('Got %d indexes for %d bonds' % (len(bond_data), nbond))
for i in range(nbond):
i2 = i * 2
top.add_bond(atoms[bond_data[i2]-1], atoms[bond_data[i2+1]-1])
return top
|
swails/mdtraj
|
mdtraj/formats/psf.py
|
Python
|
lgpl-2.1
| 9,741
|
[
"CHARMM",
"Dalton",
"MDTraj",
"OpenMM"
] |
56933c6c1da7311b9a1d8b848cd79ba2e21017774355e77e61b417c47f3c0077
|
# Copyright (C) 2012,2013,2017(H)
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
# Copyright (C) 2019
# Max Planck Computing and Data Facility
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
espressopp.standard_system.Default
**********************************
.. py:method:: espressopp.standard_system.Default(box, rc = 1.12246, skin = 0.3, dt = 0.005, temperature = None)
:param box:
:param real rc:
:param real skin:
:param real dt:
:param temperature:
:type box:
:type temperature:
Return default system and integrator, no interactions, no particles are set
if tempearture is != None then Langevin thermostat is set to temperature (gamma is 1.0)
"""
import espressopp
import mpi4py.MPI as MPI
def Default(box, rc=1.12246, skin=0.3, dt=0.005, temperature=None, halfCellInt = 1):
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, box)
system.skin = skin
nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size,box,rc,skin)
cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc, skin, halfCellInt)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid, halfCellInt)
print "nodeGrid: ",nodeGrid, " cellGrid: ",cellGrid, "half cell: ", halfCellInt
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.dt = dt
if (temperature != None):
thermostat = espressopp.integrator.LangevinThermostat(system)
thermostat.gamma = 1.0
thermostat.temperature = temperature
integrator.addExtension(thermostat)
return system, integrator
|
govarguz/espressopp
|
src/standard_system/Default.py
|
Python
|
gpl-3.0
| 2,476
|
[
"ESPResSo"
] |
3c119af754783a62060a42123012aa86e4696ae7e4946ad6ec17cabb921105fb
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions for Psi4/Cfour interface. Portions that require
calls to Boost Python psi4 module are here, otherwise in qcdb module.
Also calls to qcdb module are here and not elsewhere in driver.
Organizationally, this module isolates qcdb code from psi4 code.
"""
import os
import re
import sys
import uuid
import shutil
import inspect
import subprocess
from psi4.driver import qcdb
from psi4.driver import p4util
from psi4.driver.molutil import *
from psi4.driver.p4util.exceptions import *
# never import driver, wrappers, or aliases into this file
P4C4_INFO = {}
def run_cfour(name, **kwargs):
"""Function that prepares environment and input files
for a calculation calling Stanton and Gauss's CFOUR code.
Also processes results back into Psi4 format.
This function is not called directly but is instead called by
:py:func:`~psi4.energy` or :py:func:`~psi4.optimize` when a Cfour
method is requested (through *name* argument). In order to function
correctly, the Cfour executable ``xcfour`` must be present in
:envvar:`PATH` or :envvar:`PSIPATH`.
.. hlist::
:columns: 1
* Many :ref:`PSI Variables <apdx:cfour_psivar>` extracted from the Cfour output
* Python dictionary of associated file constants accessible as ``P4C4_INFO['zmat']``, ``P4C4_INFO['output']``, ``P4C4_INFO['grd']``, *etc.*
:type name: str
:param name: ``'c4-scf'`` || ``'c4-ccsd(t)'`` || ``'cfour'`` || etc.
First argument, usually unlabeled. Indicates the computational
method to be applied to the system.
:type keep: :ref:`boolean <op_py_boolean>`
:param keep: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether to delete the Cfour scratch directory upon
completion of the Cfour job.
:type path: str
:param path:
Indicates path to Cfour scratch directory (with respect to Psi4
scratch directory). Otherwise, the default is a subdirectory
within the Psi4 scratch directory.
If specified, GENBAS and/or ZMAT within will be used.
:type genbas: str
:param genbas:
Indicates that contents should be used for GENBAS file.
GENBAS is a complicated topic. It is quite unnecessary if the
molecule is from a molecule {...} block and basis is set through
|Psifours| BASIS keyword. In that case, a GENBAS is written from
LibMints and all is well. Otherwise, a GENBAS is looked for in
the usual places: PSIPATH, PATH, PSIDATADIR/basis. If path kwarg is
specified, also looks there preferentially for a GENBAS. Can
also specify GENBAS within an input file through a string and
setting the genbas kwarg. Note that due to the input parser's
aggression, blank lines need to be replaced by the text blankline.
"""
lowername = name.lower()
internal_p4c4_info = {}
return_wfn = kwargs.pop('return_wfn', False)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
optstash = p4util.OptionsState(
['CFOUR', 'TRANSLATE_PSI4'])
# Determine calling function and hence dertype
calledby = inspect.stack()[1][3]
dertype = ['energy', 'gradient', 'hessian'].index(calledby)
#print('I am %s called by %s called by %s.\n' %
# (inspect.stack()[0][3], inspect.stack()[1][3], inspect.stack()[2][3]))
# Save submission directory
current_directory = os.getcwd()
# Move into job scratch directory
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path())
# Construct and move into cfour subdirectory of job scratch directory
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir)
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) +
':' + os.environ.get('PATH') +
':' + core.get_datadir() + '/basis',
'GENBAS_PATH': core.get_datadir() + '/basis',
'CFOUR_NUM_CORES': os.environ.get('CFOUR_NUM_CORES'),
'MKL_NUM_THREADS': os.environ.get('MKL_NUM_THREADS'),
'OMP_NUM_THREADS': os.environ.get('OMP_NUM_THREADS'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
if 'path' in kwargs:
lenv['PATH'] = kwargs['path'] + ':' + lenv['PATH']
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Load the GENBAS file
genbas_path = qcdb.search_file('GENBAS', lenv['GENBAS_PATH'])
if genbas_path:
try:
shutil.copy2(genbas_path, psioh.get_default_path() + cfour_tmpdir)
except shutil.Error: # should only fail if src and dest equivalent
pass
core.print_out("\n GENBAS loaded from %s\n" % (genbas_path))
core.print_out(" CFOUR to be run from %s\n" % (psioh.get_default_path() + cfour_tmpdir))
else:
message = """
GENBAS file for CFOUR interface not found. Either:
[1] Supply a GENBAS by placing it in PATH or PSIPATH
[1a] Use cfour {} block with molecule and basis directives.
[1b] Use molecule {} block and CFOUR_BASIS keyword.
[2] Allow Psi4's internal basis sets to convert to GENBAS
[2a] Use molecule {} block and BASIS keyword.
"""
core.print_out(message)
core.print_out(' Search path that was tried:\n')
core.print_out(lenv['PATH'].replace(':', ', '))
# Generate the ZMAT input file in scratch
if 'path' in kwargs and os.path.isfile('ZMAT'):
core.print_out(" ZMAT loaded from %s\n" % (psioh.get_default_path() + kwargs['path'] + '/ZMAT'))
else:
with open('ZMAT', 'w') as cfour_infile:
cfour_infile.write(write_zmat(lowername, dertype, molecule))
internal_p4c4_info['zmat'] = open('ZMAT', 'r').read()
#core.print_out('\n====== Begin ZMAT input for CFOUR ======\n')
#core.print_out(open('ZMAT', 'r').read())
#core.print_out('======= End ZMAT input for CFOUR =======\n\n')
#print('\n====== Begin ZMAT input for CFOUR ======')
#print(open('ZMAT', 'r').read())
#print('======= End ZMAT input for CFOUR =======\n')
if 'genbas' in kwargs:
with open('GENBAS', 'w') as cfour_basfile:
cfour_basfile.write(kwargs['genbas'].replace('\nblankline\n', '\n\n'))
core.print_out(' GENBAS loaded from kwargs string\n')
# Close psi4 output file and reopen with filehandle
print('output in', current_directory + '/' + core.outfile_name())
pathfill = '' if os.path.isabs(core.outfile_name()) else current_directory + os.path.sep
# Handle threading
# OMP_NUM_THREADS from env is in lenv from above
# threads from psi4 -n (core.get_num_threads()) is ignored
# CFOUR_OMP_NUM_THREADS psi4 option takes precedence, handled below
if core.has_option_changed('CFOUR', 'CFOUR_OMP_NUM_THREADS'):
lenv['OMP_NUM_THREADS'] = str(core.get_option('CFOUR', 'CFOUR_OMP_NUM_THREADS'))
#print("""\n\n<<<<< RUNNING CFOUR ... >>>>>\n\n""")
# Call executable xcfour, directing cfour output to the psi4 output file
cfour_executable = kwargs['c4exec'] if 'c4exec' in kwargs else 'xcfour'
try:
retcode = subprocess.Popen([cfour_executable], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
message = ('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
data = data.decode('utf-8')
if not data:
break
core.print_out(data)
c4out += data
internal_p4c4_info['output'] = c4out
c4files = {}
core.print_out('\n')
for item in ['GRD', 'FCMFINAL', 'DIPOL']:
try:
with open(psioh.get_default_path() + cfour_tmpdir + '/' + item, 'r') as handle:
c4files[item] = handle.read()
core.print_out(' CFOUR scratch file %s has been read\n' % (item))
core.print_out('%s\n' % c4files[item])
internal_p4c4_info[item.lower()] = c4files[item]
except IOError:
pass
core.print_out('\n')
if molecule.name() == 'blank_molecule_psi4_yo':
qcdbmolecule = None
else:
molecule.update_geometry()
qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
qcdbmolecule.update_geometry()
# c4mol, if it exists, is dinky, just a clue to geometry of cfour results
psivar, c4grad, c4mol = qcdb.cfour.harvest(qcdbmolecule, c4out, **c4files)
# Absorb results into psi4 data structures
for key in psivar.keys():
core.set_variable(key.upper(), float(psivar[key]))
if qcdbmolecule is None and c4mol is not None:
molecule = geometry(c4mol.create_psi4_string_from_molecule(), name='blank_molecule_psi4_yo')
molecule.update_geometry()
# This case arises when no Molecule going into calc (cfour {} block) but want
# to know the orientation at which grad, properties, etc. are returned (c4mol).
# c4mol is dinky, w/o chg, mult, dummies and retains name
# blank_molecule_psi4_yo so as to not interfere with future cfour {} blocks
if c4grad is not None:
mat = core.Matrix.from_list(c4grad)
core.set_gradient(mat)
#print ' <<< [3] C4-GRD-GRAD >>>'
#mat.print()
# exit(1)
# # Things needed core.so module to do
# collect c4out string
# read GRD
# read FCMFINAL
# see if theres an active molecule
# # Things delegatable to qcdb
# parsing c4out
# reading GRD and FCMFINAL strings
# reconciling p4 and c4 molecules (orient)
# reconciling c4out and GRD and FCMFINAL results
# transforming frame of results back to p4
# # Things run_cfour needs to have back
# psivar
# qcdb.Molecule of c4?
# coordinates?
# gradient in p4 frame
# # Process the cfour output
# psivar, c4coord, c4grad = qcdb.cfour.cfour_harvest(c4out)
# for key in psivar.keys():
# core.set_variable(key.upper(), float(psivar[key]))
#
# # Awful Hack - Go Away TODO
# if c4grad:
# molecule = core.get_active_molecule()
# molecule.update_geometry()
#
# if molecule.name() == 'blank_molecule_psi4_yo':
# p4grad = c4grad
# p4coord = c4coord
# else:
# qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
# #p4grad = qcdbmolecule.deorient_array_from_cfour(c4coord, c4grad)
# #p4coord = qcdbmolecule.deorient_array_from_cfour(c4coord, c4coord)
#
# with open(psioh.get_default_path() + cfour_tmpdir + '/GRD', 'r') as cfour_grdfile:
# c4outgrd = cfour_grdfile.read()
# print('GRD\n',c4outgrd)
# c4coordGRD, c4gradGRD = qcdb.cfour.cfour_harvest_files(qcdbmolecule, grd=c4outgrd)
#
# p4mat = core.Matrix.from_list(p4grad)
# core.set_gradient(p4mat)
# print(' <<< P4 PSIVAR >>>')
# for item in psivar:
# print(' %30s %16.8f' % (item, psivar[item]))
#print(' <<< P4 COORD >>>')
#for item in p4coord:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# print(' <<< P4 GRAD >>>')
# for item in c4grad:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# Clean up cfour scratch directory unless user instructs otherwise
keep = yes.match(str(kwargs['keep'])) if 'keep' in kwargs else False
os.chdir('..')
try:
if keep or ('path' in kwargs):
core.print_out('\n CFOUR scratch files have been kept in %s\n' % (psioh.get_default_path() + cfour_tmpdir))
else:
shutil.rmtree(cfour_tmpdir)
except OSError as e:
print('Unable to remove CFOUR temporary directory %s' % e, file=sys.stderr)
exit(1)
# Return to submission directory and reopen output file
os.chdir(current_directory)
core.print_out('\n')
p4util.banner(' Cfour %s %s Results ' % (name.lower(), calledby.capitalize()))
core.print_variables()
if c4grad is not None:
core.get_gradient().print_out()
core.print_out('\n')
p4util.banner(' Cfour %s %s Results ' % (name.lower(), calledby.capitalize()))
core.print_variables()
if c4grad is not None:
core.get_gradient().print_out()
# Quit if Cfour threw error
if 'CFOUR ERROR CODE' in core.variables():
raise ValidationError("""Cfour exited abnormally.""")
P4C4_INFO.clear()
P4C4_INFO.update(internal_p4c4_info)
optstash.restore()
# new skeleton wavefunction w/mol, highest-SCF basis (just to choose one), & not energy
# Feb 2017 hack. Could get proper basis in skel wfn even if not through p4 basis kw
gobas = core.get_global_option('BASIS') if core.get_global_option('BASIS') else 'sto-3g'
basis = core.BasisSet.build(molecule, "ORBITAL", gobas)
if basis.has_ECP():
raise ValidationError("""ECPs not hooked up for Cfour""")
wfn = core.Wavefunction(molecule, basis)
optstash.restore()
if dertype == 0:
finalquantity = psivar['CURRENT ENERGY']
elif dertype == 1:
finalquantity = core.get_gradient()
wfn.set_gradient(finalquantity)
if finalquantity.rows(0) < 20:
core.print_out('CURRENT GRADIENT')
finalquantity.print_out()
elif dertype == 2:
pass
#finalquantity = finalhessian
#wfn.set_hessian(finalquantity)
#if finalquantity.rows(0) < 20:
# core.print_out('CURRENT HESSIAN')
# finalquantity.print_out()
return wfn
def cfour_list():
"""Form list of Cfour :py:func:`~driver.energy` arguments."""
return qcdb.cfour.cfour_list()
def cfour_gradient_list():
"""Form list of Cfour analytic :py:func:`~driver.gradient` arguments."""
return qcdb.cfour.cfour_gradient_list()
def cfour_hessian_list():
"""Form list of Cfour analytic :py:func:`~driver.gradient` arguments."""
return qcdb.cfour.cfour_hessian_list()
def cfour_psivar_list():
"""Form dictionary of :ref:`PSI Variables <apdx:cfour_psivar>` set by Cfour methods."""
return qcdb.cfour.cfour_psivar_list()
def write_zmat(name, dertype, molecule):
"""Returns string with contents of Cfour ZMAT file as gathered from
active molecule, current keyword settings, and cfour {...} block.
"""
# Handle memory
mem = int(0.000001 * core.get_memory())
if mem == 524:
memcmd, memkw = '', {}
else:
memcmd, memkw = qcdb.cfour.muster_memory(mem)
# Handle molecule and basis set
if molecule.name() == 'blank_molecule_psi4_yo':
molcmd, molkw = '', {}
bascmd, baskw = '', {}
core.set_local_option('CFOUR', 'TRANSLATE_PSI4', False)
else:
molecule.update_geometry()
#print(molecule.create_psi4_string_from_molecule())
qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
qcdbmolecule.tagline = molecule.name()
molcmd, molkw = qcdbmolecule.format_molecule_for_cfour()
if core.get_global_option('BASIS') == '':
bascmd, baskw = '', {}
else:
user_pg = molecule.schoenflies_symbol()
molecule.reset_point_group('c1') # need basis printed for *every* atom
qbs = core.BasisSet.build(molecule, "BASIS", core.get_global_option('BASIS'))
if qbs.has_ECP():
raise ValidationError("""ECPs not hooked up for Cfour""")
with open('GENBAS', 'w') as cfour_basfile:
cfour_basfile.write(qbs.genbas())
core.print_out(' GENBAS loaded from Psi4 LibMints for basis %s\n' % (core.get_global_option('BASIS')))
molecule.reset_point_group(user_pg)
molecule.update_geometry()
bascmd, baskw = qcdbmolecule.format_basis_for_cfour(qbs.has_puream())
# Handle psi4 keywords implying cfour keyword values
if core.get_option('CFOUR', 'TRANSLATE_PSI4'):
psicmd, psikw = qcdb.cfour.muster_psi4options(p4util.prepare_options_for_modules(changedOnly=True))
else:
psicmd, psikw = '', {}
# Handle calc type and quantum chemical method
mdccmd, mdckw = qcdb.cfour.muster_modelchem(name, dertype)
# Handle calc type and quantum chemical method
mdccmd, mdckw = qcdb.cfour.muster_modelchem(name, dertype)
# Handle driver vs input/default keyword reconciliation
userkw = p4util.prepare_options_for_modules()
userkw = qcdb.options.reconcile_options(userkw, memkw)
userkw = qcdb.options.reconcile_options(userkw, molkw)
userkw = qcdb.options.reconcile_options(userkw, baskw)
userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = qcdb.options.reconcile_options(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
optcmd = qcdb.options.prepare_options_for_cfour(userkw)
# Handle text to be passed untouched to cfour
litcmd = core.get_global_option('LITERAL_CFOUR')
# Assemble ZMAT pieces
zmat = memcmd + molcmd + optcmd + mdccmd + psicmd + bascmd + litcmd
if len(re.findall(r'^\*(ACES2|CFOUR|CRAPS)\(', zmat, re.MULTILINE)) != 1:
core.print_out('\n Faulty ZMAT constructed:\n%s' % (zmat))
raise ValidationError("""
Multiple *CFOUR(...) blocks in input. This usually arises
because molecule or options are specified both the psi4 way through
molecule {...} and set ... and the cfour way through cfour {...}.""")
return zmat
|
psi4/psi4
|
psi4/driver/procrouting/interface_cfour.py
|
Python
|
lgpl-3.0
| 19,073
|
[
"CFOUR",
"Psi4"
] |
27b1924ff3346f8d78b93976acdd2c208443ce01cf873a2ac0a11776718a60e6
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""
A package for converting cubes to and from specific file formats.
"""
from iris.io.format_picker import (
FileExtension,
FormatAgent,
FormatSpecification,
LeadingLine,
MagicNumber,
UriProtocol,
)
from . import abf, name, netcdf, nimrod, pp, um
__all__ = ["FORMAT_AGENT"]
FORMAT_AGENT = FormatAgent()
FORMAT_AGENT.__doc__ = (
"The FORMAT_AGENT is responsible for identifying the "
"format of a given URI. New formats can be added "
"with the **add_spec** method."
)
#
# PP files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Post Processing file (PP)",
MagicNumber(4),
0x00000100,
pp.load_cubes,
priority=5,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Post Processing file (PP) little-endian",
MagicNumber(4),
0x00010000,
pp.load_cubes_little_endian,
priority=3,
constraint_aware_handler=True,
)
)
#
# GRIB files.
#
def _load_grib(*args, **kwargs):
try:
from iris_grib import load_cubes
except ImportError:
raise RuntimeError(
"Unable to load GRIB file - "
'"iris_grib" package is not installed.'
)
return load_cubes(*args, **kwargs)
# NB. Because this is such a "fuzzy" check, we give this a very low
# priority to avoid collateral damage from false positives.
FORMAT_AGENT.add_spec(
FormatSpecification(
"GRIB",
MagicNumber(100),
lambda header_bytes: b"GRIB" in header_bytes,
_load_grib,
priority=1,
)
)
#
# netCDF files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"NetCDF",
MagicNumber(4),
0x43444601,
netcdf.load_cubes,
priority=5,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"NetCDF 64 bit offset format",
MagicNumber(4),
0x43444602,
netcdf.load_cubes,
priority=5,
constraint_aware_handler=True,
)
)
# This covers both v4 and v4 classic model.
FORMAT_AGENT.add_spec(
FormatSpecification(
"NetCDF_v4",
MagicNumber(8),
0x894844460D0A1A0A,
netcdf.load_cubes,
priority=5,
constraint_aware_handler=True,
)
)
_nc_dap = FormatSpecification(
"NetCDF OPeNDAP",
UriProtocol(),
lambda protocol: protocol in ["http", "https"],
netcdf.load_cubes,
priority=6,
constraint_aware_handler=True,
)
FORMAT_AGENT.add_spec(_nc_dap)
del _nc_dap
#
# UM Fieldsfiles.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) pre v3.1",
MagicNumber(8),
0x000000000000000F,
um.load_cubes,
priority=3,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) post v5.2",
MagicNumber(8),
0x0000000000000014,
um.load_cubes,
priority=4,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) ancillary",
MagicNumber(8),
0xFFFFFFFFFFFF8000,
um.load_cubes,
priority=3,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) converted " "with ieee to 32 bit",
MagicNumber(4),
0x00000014,
um.load_cubes_32bit_ieee,
priority=3,
constraint_aware_handler=True,
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"UM Fieldsfile (FF) ancillary " "converted with ieee to 32 bit",
MagicNumber(4),
0xFFFF8000,
um.load_cubes_32bit_ieee,
priority=3,
constraint_aware_handler=True,
)
)
#
# NIMROD files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"NIMROD", MagicNumber(4), 0x00000200, nimrod.load_cubes, priority=3
)
)
#
# NAME files.
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"NAME III",
LeadingLine(),
lambda line: line.lstrip().startswith(b"NAME III"),
name.load_cubes,
priority=5,
)
)
#
# ABF/ABL
#
FORMAT_AGENT.add_spec(
FormatSpecification(
"ABF", FileExtension(), ".abf", abf.load_cubes, priority=3
)
)
FORMAT_AGENT.add_spec(
FormatSpecification(
"ABL", FileExtension(), ".abl", abf.load_cubes, priority=3
)
)
|
bjlittle/iris
|
lib/iris/fileformats/__init__.py
|
Python
|
lgpl-3.0
| 4,659
|
[
"NetCDF"
] |
0ebe294d0ed175c28fed8e4b1a56193441274e69f898fcef53398a1175126d6d
|
# -*- coding: utf-8 -*-
"""
One-class classifer.
For more information see:
Kathryn Hempstalk, Eibe Frank, Ian H. Witten: One-Class Classification by Combining Density and Class Probability Estimation. In: Proceedings of the 12th European Conference on Principles and Practice of Knowledge Discovery in Databases and 19th European Conference on Machine Learning, ECMLPKDD2008, Berlin, 505--519, 2008.
@author: Kat
"""
from sklearn.base import BaseEstimator
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import StratifiedKFold
import numpy as np
from generators import abstract,gaussian,discrete
from scipy import stats
import math
class OneClassClassifier(BaseEstimator):
def __init__(self,base_classifier=DecisionTreeClassifier(),\
contamination=0.1,proportion_generated=0.5,\
cv_folds=10,\
density_only=False,random_state=0,discrete_threshold=-1):
self.base_classifier = base_classifier
self.contamination = contamination
self.proportion_generated = proportion_generated
self.density_only = density_only
self.random_state = random_state
self.cv_folds = cv_folds
self.discrete_threshold = discrete_threshold
self.threshold = 0.5
def fit(self,X,y=None):
#create the data generators
self.generators = [None] * X.shape[1]
for col in xrange(X.shape[1]):
if(self.discrete_threshold > 1):
discrete_gen = discrete.DiscreteGenerator(X[:,col])
if(discrete_gen.total_keys < self.discrete_threshold):
generator = discrete_gen
else:
mean = np.mean(X[:,col])
stddev = np.std(X[:,col])
if(stddev == 0):
generator = abstract.DummyGenerator(mean)
else:
generator = gaussian.GaussianGenerator(mean,stddev,self.random_state)
else:
mean = np.mean(X[:,col])
stddev = np.std(X[:,col])
if(stddev == 0):
generator = abstract.DummyGenerator(mean)
else:
generator = gaussian.GaussianGenerator(mean,stddev,self.random_state)
self.generators[col] = generator
#generate data
totalInstances = len(X) / (1 - self.proportion_generated)
generated_len = int(totalInstances - len(X))
generated = [None] * generated_len
for i in xrange(generated_len):
row = [None] * X.shape[1]
for col in xrange(X.shape[1]):
row[col] = self.generators[col].generate()
generated[i] = row
#work out the threshold of prob(X|C) using cross validation
skf = StratifiedKFold(n_splits=self.cv_folds,\
random_state=self.random_state, shuffle=True)
newX = np.vstack((X,generated))
newY = np.hstack((np.ones(len(X)),np.zeros(len(X))))
thresholds = [None] * self.cv_folds
for i, (train_indices, test_indices) in enumerate(skf.split(newX,newY)):
if(~self.density_only):
#only train if you need to!
self.base_classifier.fit(newX[train_indices], newY[train_indices])
probabilities = self._get_probabilities(newX[test_indices])
thresholds[i] = stats.scoreatpercentile(probabilities, 100 * self.contamination)
self.threshold = np.mean(thresholds)
#retrain on all the data
if(~self.density_only):
self.base_classifier.fit(newX,newY)
def _get_log_probabilities(self,X):
probabilities = [None] * len(X)
if(self.density_only):
for i,x in enumerate(X):
probabilities[i] = self._log_prob_x_given_a(x)
else:
base_classifier_probs = self.base_classifier.predict_proba(X)[:,1]
for i,x in enumerate(X):
probabilities[i] = self._log_prob_x_given_c(x,base_classifier_probs[i])
return np.array(probabilities)
def _get_probabilities(self,X):
log_probs = self._get_log_probabilities(X)
probabilities = [None] * len(X)
for i,prob in enumerate(log_probs):
if(prob == 0):
prob_outlier = 1
else:
prob_outlier = 1 / (1 + math.exp(prob - self.threshold))
prob_class = 1 - prob_outlier
probabilities[i] = prob_class
return np.array(probabilities)
def _log_prob_x_given_a(self,x):
prob_x_given_a = 0
for col in xrange(x.shape[0]):
prob_x_given_a = prob_x_given_a + self.generators[col].get_log_probability(x[col])
return prob_x_given_a
def _log_prob_x_given_c(self,x,prob_c_given_x):
prob_c = 1 - self.proportion_generated
log_prob_x_given_a = self._log_prob_x_given_a(x)
if(self.density_only):
return log_prob_x_given_a
#cover edge cases
if(prob_c_given_x == 1):
return log_prob_x_given_a
if(prob_c_given_x == 0):
return 0
#finally, calculate probability
top = math.log(1 - prob_c) + math.log(prob_c_given_x)
bottom = math.log(prob_c) + math.log(1 - prob_c_given_x)
return (top - bottom) + log_prob_x_given_a
def predict(self,X):
probs = self._get_probabilities(X)
probabilities = [None] * len(X)
for i,prob in enumerate(probs):
if prob >= self.threshold:
probabilities[i] = 1
else:
probabilities[i] = -1
return probabilities
def decision_function(self,X):
return self._get_probabilities(X)
|
drkatnz/CombinedOneClass
|
oneclass/oneclass.py
|
Python
|
mit
| 6,201
|
[
"Gaussian"
] |
754016e77174bf92ae4fb5342bc8ec8e7b31a7936635634a70f5ef88063b277a
|
import unittest
import pysal
import numpy as np
from pysal.spreg import error_sp as SP
from scipy import sparse
class TestBaseGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Error(self.y, self.X, self.w.sparse)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 27.4739775])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n,6)
k = 3
self.assertAlmostEqual(reg.k,k,6)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
e = np.array([ 31.89620319])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,6)
sig2 = 191.73716465732355
self.assertAlmostEqual(reg.sig2,sig2,5)
class TestGMError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Error(self.y, self.X, self.w)
betas = np.array([[ 47.94371455], [ 0.70598088], [ -0.55571746], [ 0.37230161]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 27.4739775])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n,6)
k = 3
self.assertAlmostEqual(reg.k,k,6)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
e = np.array([ 31.89620319])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 52.9930255])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 1.51884943e+02, -5.37622793e+00, -1.86970286e+00], [ -5.37622793e+00, 2.48972661e-01, 5.26564244e-02], [ -1.86970286e+00, 5.26564244e-02, 3.18930650e-02]])
np.testing.assert_array_almost_equal(reg.vm,vm,6)
sig2 = 191.73716465732355
self.assertAlmostEqual(reg.sig2,sig2,5)
pr2 = 0.3495097406012179
self.assertAlmostEqual(reg.pr2,pr2)
std_err = np.array([ 12.32416094, 0.4989716 , 0.1785863 ])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
z_stat = np.array([[ 3.89022140e+00, 1.00152805e-04], [ 1.41487186e+00, 1.57106070e-01], [ -3.11175868e+00, 1.85976455e-03]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
class TestBaseGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = sparse.csr_matrix(self.X)
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.BaseGM_Endog_Error(self.y, self.X, self.yd, self.q, self.w.sparse)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 26.55951566])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e = np.array([ 31.23925425])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 53.9074875])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
#std_y
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
#vm
vm = np.array([[ 529.15644447, -15.78333817, -8.38016887],
[ -15.78333817, 0.54023465, 0.2311196 ],
[ -8.38016887, 0.2311196 , 0.14497647]])
np.testing.assert_array_almost_equal(reg.vm,vm,5)
sig2 = 192.50040382591442
self.assertAlmostEqual(reg.sig2,sig2,5)
class TestGMEndogError(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
yd = []
yd.append(db.by_col("CRIME"))
self.yd = np.array(yd).T
q = []
q.append(db.by_col("DISCBD"))
self.q = np.array(q).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
reg = SP.GM_Endog_Error(self.y, self.X, self.yd, self.q, self.w)
betas = np.array([[ 55.36095292], [ 0.46411479], [ -0.66883535], [ 0.38989939]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 26.55951566])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e = np.array([ 31.23925425])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e,6)
predy = np.array([ 53.9074875])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 3
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 15.72598])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 529.15644447, -15.78333817, -8.38016887],
[ -15.78333817, 0.54023465, 0.2311196 ],
[ -8.38016887, 0.2311196 , 0.14497647]])
np.testing.assert_array_almost_equal(reg.vm,vm,5)
pr2 = 0.346472557570858
self.assertAlmostEqual(reg.pr2,pr2)
sig2 = 192.50040382591442
self.assertAlmostEqual(reg.sig2,sig2,5)
std_err = np.array([ 23.003401 , 0.73500657, 0.38075777])
np.testing.assert_array_almost_equal(reg.std_err,std_err,6)
z_stat = np.array([[ 2.40664208, 0.01609994], [ 0.63144305, 0.52775088], [-1.75659016, 0.07898769]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
class TestBaseGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
yd2, q2 = pysal.spreg.utils.set_endog(self.y, self.X, self.w, None, None, 1, True)
self.X = np.hstack((np.ones(self.y.shape),self.X))
self.X = sparse.csr_matrix(self.X)
reg = SP.BaseGM_Combo(self.y, self.X, yend=yd2, q=q2, w=self.w.sparse)
betas = np.array([[ 57.61123461],[ 0.73441314], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_array_almost_equal(reg.betas,betas,5)
u = np.array([ 25.57932637])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e_filtered = np.array([ 31.65374945])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e_filtered,5)
predy = np.array([ 54.88767663])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 4
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 35.4585005])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 5.22438894e+02, -6.07257246e+00, -1.91428892e+00, -8.97134337e+00], [ -6.07257246e+00, 2.38012836e-01, 4.70160750e-02, 2.80964005e-02], [ -1.91428911e+00, 4.70160773e-02, 3.20924154e-02, 3.14968682e-03], [ -8.97134237e+00, 2.80964005e-02, 3.14968682e-03, 2.15753890e-01]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
sig2 = 181.78650186468832
self.assertAlmostEqual(reg.sig2,sig2,4)
class TestGMCombo(unittest.TestCase):
def setUp(self):
db=pysal.open(pysal.examples.get_path("columbus.dbf"),"r")
y = np.array(db.by_col("HOVAL"))
self.y = np.reshape(y, (49,1))
X = []
X.append(db.by_col("INC"))
X.append(db.by_col("CRIME"))
self.X = np.array(X).T
self.X = sparse.csr_matrix(self.X)
self.w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp"))
self.w.transform = 'r'
def test_model(self):
# Only spatial lag
reg = SP.GM_Combo(self.y, self.X, w=self.w)
e_reduced = np.array([ 28.18617481])
np.testing.assert_array_almost_equal(reg.e_pred[0],e_reduced,6)
predy_e = np.array([ 52.28082782])
np.testing.assert_array_almost_equal(reg.predy_e[0],predy_e,6)
betas = np.array([[ 57.61123515],[ 0.73441313], [ -0.59459416], [ -0.21762921], [ 0.54732051]])
np.testing.assert_array_almost_equal(reg.betas,betas,6)
u = np.array([ 25.57932637])
np.testing.assert_array_almost_equal(reg.u[0],u,6)
e_filtered = np.array([ 31.65374945])
np.testing.assert_array_almost_equal(reg.e_filtered[0],e_filtered,5)
predy = np.array([ 54.88767685])
np.testing.assert_array_almost_equal(reg.predy[0],predy,6)
n = 49
self.assertAlmostEqual(reg.n,n)
k = 4
self.assertAlmostEqual(reg.k,k)
y = np.array([ 80.467003])
np.testing.assert_array_almost_equal(reg.y[0],y,6)
x = np.array([ 1. , 19.531 , 15.72598])
np.testing.assert_array_almost_equal(reg.x.toarray()[0],x,6)
yend = np.array([ 35.4585005])
np.testing.assert_array_almost_equal(reg.yend[0],yend,6)
z = np.array([ 1. , 19.531 , 15.72598 , 35.4585005])
np.testing.assert_array_almost_equal(reg.z.toarray()[0],z,6)
my = 38.43622446938776
self.assertAlmostEqual(reg.mean_y,my)
sy = 18.466069465206047
self.assertAlmostEqual(reg.std_y,sy)
vm = np.array([[ 5.22438894e+02, -6.07257246e+00, -1.91428892e+00, -8.97134337e+00], [ -6.07257218e+00, 2.38012839e-01, 4.70160773e-02, 2.80964005e-02], [ -1.91428911e+00, 4.70160773e-02, 3.20924154e-02, 3.14968682e-03], [ -8.97134237e+00, 2.80964005e-02, 3.14968682e-03, 2.15753890e-01]])
np.testing.assert_array_almost_equal(reg.vm,vm,4)
sig2 = 181.78650186468832
self.assertAlmostEqual(reg.sig2,sig2,4)
pr2 = 0.3018280166937799
self.assertAlmostEqual(reg.pr2,pr2)
pr2_e = 0.3561355587000738
self.assertAlmostEqual(reg.pr2_e,pr2_e)
std_err = np.array([ 22.85692222, 0.48786559, 0.17914356, 0.46449318])
np.testing.assert_array_almost_equal(reg.std_err,std_err,5)
z_stat = np.array([[ 2.52051597e+00, 1.17182922e-02], [ 1.50535954e+00, 1.32231664e-01], [ -3.31909311e+00, 9.03103123e-04], [ -4.68530506e-01, 6.39405261e-01]])
np.testing.assert_array_almost_equal(reg.z_stat,z_stat,6)
if __name__ == '__main__':
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
unittest.main()
np.set_printoptions(suppress=start_suppress)
|
AlanZatarain/pysal
|
pysal/spreg/tests/test_error_sp_sparse.py
|
Python
|
bsd-3-clause
| 15,081
|
[
"COLUMBUS"
] |
3ba2b290e52977d643a07a19d5a5e82767fdd32d9d91514d05cfab3620622d60
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the Pipeline class."""
from __future__ import absolute_import
import copy
import logging
import platform
import unittest
from builtins import object
from builtins import range
from collections import defaultdict
import mock
import apache_beam as beam
from apache_beam import typehints
from apache_beam.coders import BytesCoder
from apache_beam.io import Read
from apache_beam.metrics import Metrics
from apache_beam.pipeline import Pipeline
from apache_beam.pipeline import PipelineOptions
from apache_beam.pipeline import PipelineVisitor
from apache_beam.pipeline import PTransformOverride
from apache_beam.pvalue import AsSingleton
from apache_beam.runners.dataflow.native_io.iobase import NativeSource
from apache_beam.runners.direct.evaluation_context import _ExecutionContext
from apache_beam.runners.direct.transform_evaluator import _GroupByKeyOnlyEvaluator
from apache_beam.runners.direct.transform_evaluator import _TransformEvaluator
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import CombineGlobally
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Map
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.transforms import WindowInto
from apache_beam.transforms.userstate import BagStateSpec
from apache_beam.transforms.window import SlidingWindows
from apache_beam.transforms.window import TimestampedValue
from apache_beam.utils.timestamp import MIN_TIMESTAMP
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# from nose.plugins.attrib import attr
class FakeSource(NativeSource):
"""Fake source returning a fixed list of values."""
class _Reader(object):
def __init__(self, vals):
self._vals = vals
self._output_counter = Metrics.counter('main', 'outputs')
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
pass
def __iter__(self):
for v in self._vals:
self._output_counter.inc()
yield v
def __init__(self, vals):
self._vals = vals
def reader(self):
return FakeSource._Reader(self._vals)
class FakeUnboundedSource(NativeSource):
"""Fake unbounded source. Does not work at runtime"""
def reader(self):
return None
def is_bounded(self):
return False
class DoubleParDo(beam.PTransform):
def expand(self, input):
return input | 'Inner' >> beam.Map(lambda a: a * 2)
def to_runner_api_parameter(self, context):
return self.to_runner_api_pickled(context)
class TripleParDo(beam.PTransform):
def expand(self, input):
# Keeping labels the same intentionally to make sure that there is no label
# conflict due to replacement.
return input | 'Inner' >> beam.Map(lambda a: a * 3)
class ToStringParDo(beam.PTransform):
def expand(self, input):
# We use copy.copy() here to make sure the typehint mechanism doesn't
# automatically infer that the output type is str.
return input | 'Inner' >> beam.Map(lambda a: copy.copy(str(a)))
class PipelineTest(unittest.TestCase):
@staticmethod
def custom_callable(pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
# Some of these tests designate a runner by name, others supply a runner.
# This variation is just to verify that both means of runner specification
# work and is not related to other aspects of the tests.
class CustomTransform(PTransform):
def expand(self, pcoll):
return pcoll | '+1' >> FlatMap(lambda x: [x + 1])
class Visitor(PipelineVisitor):
def __init__(self, visited):
self.visited = visited
self.enter_composite = []
self.leave_composite = []
def visit_value(self, value, _):
self.visited.append(value)
def enter_composite_transform(self, transform_node):
self.enter_composite.append(transform_node)
def leave_composite_transform(self, transform_node):
self.leave_composite.append(transform_node)
def test_create(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
# Test if initial value is an iterator object.
pcoll2 = pipeline | 'label2' >> Create(iter((4, 5, 6)))
pcoll3 = pcoll2 | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll3, equal_to([14, 15, 16]), label='pcoll3')
pipeline.run()
def test_flatmap_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label1' >> Create([1, 2, 3])
assert_that(pcoll, equal_to([1, 2, 3]))
pcoll2 = pcoll | 'do' >> FlatMap(lambda x: [x + 10])
assert_that(pcoll2, equal_to([11, 12, 13]), label='pcoll2')
pcoll3 = pcoll2 | 'm1' >> Map(lambda x: [x, 12])
assert_that(pcoll3,
equal_to([[11, 12], [12, 12], [13, 12]]), label='pcoll3')
pcoll4 = pcoll3 | 'do2' >> FlatMap(set)
assert_that(pcoll4, equal_to([11, 12, 12, 12, 13]), label='pcoll4')
pipeline.run()
def test_maptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.MapTuple(fn),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, None)]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.MapTuple(fn, 's1', 's2'),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='StaticSidesCheck')
assert_that(pcoll | 'DynamicSides' >> beam.core.MapTuple(fn, side1, side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, 's1', 's2')]),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.MapTuple(fn, s2=side2),
equal_to([('e1', 'e2', MIN_TIMESTAMP, None, 's2')]),
label='MixedSidesCheck')
pipeline.run()
def test_flatmaptuple_builtin(self):
pipeline = TestPipeline()
pcoll = pipeline | Create([('e1', 'e2')])
side1 = beam.pvalue.AsSingleton(pipeline | 'side1' >> Create(['s1']))
side2 = beam.pvalue.AsSingleton(pipeline | 'side2' >> Create(['s2']))
# A test function with a tuple input, an auxiliary parameter,
# and some side inputs.
fn = lambda e1, e2, t=DoFn.TimestampParam, s1=None, s2=None: (
e1, e2, t, s1, s2)
assert_that(pcoll | 'NoSides' >> beam.core.FlatMapTuple(fn),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, None]),
label='NoSidesCheck')
assert_that(pcoll | 'StaticSides' >> beam.core.FlatMapTuple(fn, 's1', 's2'),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='StaticSidesCheck')
assert_that(pcoll
| 'DynamicSides' >> beam.core.FlatMapTuple(fn, side1, side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, 's1', 's2']),
label='DynamicSidesCheck')
assert_that(pcoll | 'MixedSides' >> beam.core.FlatMapTuple(fn, s2=side2),
equal_to(['e1', 'e2', MIN_TIMESTAMP, None, 's2']),
label='MixedSidesCheck')
pipeline.run()
def test_create_singleton_pcollection(self):
pipeline = TestPipeline()
pcoll = pipeline | 'label' >> Create([[1, 2, 3]])
assert_that(pcoll, equal_to([[1, 2, 3]]))
pipeline.run()
# TODO(BEAM-1555): Test is failing on the service, with FakeSource.
# @attr('ValidatesRunner')
def test_metrics_in_fake_source(self):
pipeline = TestPipeline()
pcoll = pipeline | Read(FakeSource([1, 2, 3, 4, 5, 6]))
assert_that(pcoll, equal_to([1, 2, 3, 4, 5, 6]))
res = pipeline.run()
metric_results = res.metrics().query()
outputs_counter = metric_results['counters'][0]
self.assertEqual(outputs_counter.key.step, 'Read')
self.assertEqual(outputs_counter.key.metric.name, 'outputs')
self.assertEqual(outputs_counter.committed, 6)
def test_fake_read(self):
pipeline = TestPipeline()
pcoll = pipeline | 'read' >> Read(FakeSource([1, 2, 3]))
assert_that(pcoll, equal_to([1, 2, 3]))
pipeline.run()
def test_visit_entire_graph(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
pcoll4 = pcoll2 | 'do3' >> FlatMap(lambda x: [x + 1])
transform = PipelineTest.CustomTransform()
pcoll5 = pcoll4 | transform
visitor = PipelineTest.Visitor(visited=[])
pipeline.visit(visitor)
self.assertEqual(set([pcoll1, pcoll2, pcoll3, pcoll4, pcoll5]),
set(visitor.visited))
self.assertEqual(set(visitor.enter_composite),
set(visitor.leave_composite))
self.assertEqual(3, len(visitor.enter_composite))
self.assertEqual(visitor.enter_composite[2].transform, transform)
self.assertEqual(visitor.leave_composite[1].transform, transform)
def test_apply_custom_transform(self):
pipeline = TestPipeline()
pcoll = pipeline | 'pcoll' >> Create([1, 2, 3])
result = pcoll | PipelineTest.CustomTransform()
assert_that(result, equal_to([2, 3, 4]))
pipeline.run()
def test_reuse_custom_transform_instance(self):
pipeline = Pipeline()
pcoll1 = pipeline | 'pcoll1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pcoll2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
pcoll1 | transform
with self.assertRaises(RuntimeError) as cm:
pipeline.apply(transform, pcoll2)
self.assertEqual(
cm.exception.args[0],
'Transform "CustomTransform" does not have a stable unique label. '
'This will prevent updating of pipelines. '
'To apply a transform with a specified label write '
'pvalue | "label" >> transform')
def test_reuse_cloned_custom_transform_instance(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'pc1' >> Create([1, 2, 3])
pcoll2 = pipeline | 'pc2' >> Create([4, 5, 6])
transform = PipelineTest.CustomTransform()
result1 = pcoll1 | transform
result2 = pcoll2 | 'new_label' >> transform
assert_that(result1, equal_to([2, 3, 4]), label='r1')
assert_that(result2, equal_to([5, 6, 7]), label='r2')
pipeline.run()
def test_transform_no_super_init(self):
class AddSuffix(PTransform):
def __init__(self, suffix):
# No call to super(...).__init__
self.suffix = suffix
def expand(self, pcoll):
return pcoll | Map(lambda x: x + self.suffix)
self.assertEqual(
['a-x', 'b-x', 'c-x'],
sorted(['a', 'b', 'c'] | 'AddSuffix' >> AddSuffix('-x')))
@unittest.skip("Fails on some platforms with new urllib3.")
def test_memory_usage(self):
try:
import resource
except ImportError:
# Skip the test if resource module is not available (e.g. non-Unix os).
self.skipTest('resource module not available.')
if platform.mac_ver()[0]:
# Skip the test on macos, depending on version it returns ru_maxrss in
# different units.
self.skipTest('ru_maxrss is not in standard units.')
def get_memory_usage_in_bytes():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2 ** 10)
def check_memory(value, memory_threshold):
memory_usage = get_memory_usage_in_bytes()
if memory_usage > memory_threshold:
raise RuntimeError(
'High memory usage: %d > %d' % (memory_usage, memory_threshold))
return value
len_elements = 1000000
num_elements = 10
num_maps = 100
# TODO(robertwb): reduce memory usage of FnApiRunner so that this test
# passes.
pipeline = TestPipeline(runner='BundleBasedDirectRunner')
# Consumed memory should not be proportional to the number of maps.
memory_threshold = (
get_memory_usage_in_bytes() + (5 * len_elements * num_elements))
# Plus small additional slack for memory fluctuations during the test.
memory_threshold += 10 * (2 ** 20)
biglist = pipeline | 'oom:create' >> Create(
['x' * len_elements] * num_elements)
for i in range(num_maps):
biglist = biglist | ('oom:addone-%d' % i) >> Map(lambda x: x + 'y')
result = biglist | 'oom:check' >> Map(check_memory, memory_threshold)
assert_that(result, equal_to(
['x' * len_elements + 'y' * num_maps] * num_elements))
pipeline.run()
def test_aggregator_empty_input(self):
actual = [] | CombineGlobally(max).without_defaults()
self.assertEqual(actual, [])
def test_pipeline_as_context(self):
def raise_exception(exn):
raise exn
with self.assertRaises(ValueError):
with Pipeline() as p:
# pylint: disable=expression-not-assigned
p | Create([ValueError('msg')]) | Map(raise_exception)
# TODO(BEAM-1894).
# def test_eager_pipeline(self):
# p = Pipeline('EagerRunner')
# self.assertEqual([1, 4, 9], p | Create([1, 2, 3]) | Map(lambda x: x*x))
@mock.patch(
'apache_beam.runners.direct.direct_runner._get_transform_overrides')
def test_ptransform_overrides(self, file_system_override_mock):
class MyParDoOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
if isinstance(ptransform, DoubleParDo):
return TripleParDo()
raise ValueError('Unsupported type of transform: %r' % ptransform)
def get_overrides(unused_pipeline_options):
return [MyParDoOverride()]
file_system_override_mock.side_effect = get_overrides
# Specify DirectRunner as it's the one patched above.
with Pipeline(runner='BundleBasedDirectRunner') as p:
pcoll = p | beam.Create([1, 2, 3]) | 'Multiply' >> DoubleParDo()
assert_that(pcoll, equal_to([3, 6, 9]))
def test_ptransform_override_type_hints(self):
class NoTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return ToStringParDo()
class WithTypeHintOverride(PTransformOverride):
def matches(self, applied_ptransform):
return isinstance(applied_ptransform.transform, DoubleParDo)
def get_replacement_transform(self, ptransform):
return (ToStringParDo()
.with_input_types(int)
.with_output_types(str))
for override, expected_type in [(NoTypeHintOverride(), typehints.Any),
(WithTypeHintOverride(), str)]:
p = TestPipeline()
pcoll = (p
| beam.Create([1, 2, 3])
| 'Operate' >> DoubleParDo()
| 'NoOp' >> beam.Map(lambda x: x))
p.replace_all([override])
self.assertEqual(pcoll.producer.inputs[0].element_type, expected_type)
def test_kv_ptransform_honor_type_hints(self):
# The return type of this DoFn cannot be inferred by the default
# Beam type inference
class StatefulDoFn(DoFn):
BYTES_STATE = BagStateSpec('bytes', BytesCoder())
def return_recursive(self, count):
if count == 0:
return ["some string"]
else:
self.return_recursive(count-1)
def process(self, element, counter=DoFn.StateParam(BYTES_STATE)):
return self.return_recursive(1)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()))
p.run()
self.assertEqual(pcoll.element_type, typehints.Any)
p = TestPipeline()
pcoll = (p
| beam.Create([(1, 1), (2, 2), (3, 3)])
| beam.GroupByKey()
| beam.ParDo(StatefulDoFn()).with_output_types(str))
p.run()
self.assertEqual(pcoll.element_type, str)
def test_track_pcoll_unbounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll1.is_bounded, False)
self.assertIs(pcoll3.is_bounded, False)
def test_track_pcoll_bounded(self):
pipeline = TestPipeline()
pcoll1 = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2 = pcoll1 | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll3 = pcoll2 | 'do2' >> FlatMap(lambda x: [x + 1])
self.assertIs(pcoll1.is_bounded, True)
self.assertIs(pcoll2.is_bounded, True)
self.assertIs(pcoll3.is_bounded, True)
def test_track_pcoll_bounded_flatten(self):
pipeline = TestPipeline()
pcoll1_a = pipeline | 'label_a' >> Create([1, 2, 3])
pcoll2_a = pcoll1_a | 'do_a' >> FlatMap(lambda x: [x + 1])
pcoll1_b = pipeline | 'label_b' >> Create([1, 2, 3])
pcoll2_b = pcoll1_b | 'do_b' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_a, pcoll2_b) | beam.Flatten()
self.assertIs(pcoll1_a.is_bounded, True)
self.assertIs(pcoll2_a.is_bounded, True)
self.assertIs(pcoll1_b.is_bounded, True)
self.assertIs(pcoll2_b.is_bounded, True)
self.assertIs(merged.is_bounded, True)
def test_track_pcoll_unbounded_flatten(self):
pipeline = TestPipeline()
pcoll1_bounded = pipeline | 'label1' >> Create([1, 2, 3])
pcoll2_bounded = pcoll1_bounded | 'do1' >> FlatMap(lambda x: [x + 1])
pcoll1_unbounded = pipeline | 'read' >> Read(FakeUnboundedSource())
pcoll2_unbounded = pcoll1_unbounded | 'do2' >> FlatMap(lambda x: [x + 1])
merged = (pcoll2_bounded, pcoll2_unbounded) | beam.Flatten()
self.assertIs(pcoll1_bounded.is_bounded, True)
self.assertIs(pcoll2_bounded.is_bounded, True)
self.assertIs(pcoll1_unbounded.is_bounded, False)
self.assertIs(pcoll2_unbounded.is_bounded, False)
self.assertIs(merged.is_bounded, False)
class DoFnTest(unittest.TestCase):
def test_element(self):
class TestDoFn(DoFn):
def process(self, element):
yield element + 10
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([11, 12]))
pipeline.run()
def test_side_input_no_tag(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
def test_side_input_tagged(self):
class TestDoFn(DoFn):
def process(self, element, prefix, suffix=DoFn.SideInputParam):
return ['%s-%s-%s' % (prefix, element, suffix)]
pipeline = TestPipeline()
words_list = ['aa', 'bb', 'cc']
words = pipeline | 'SomeWords' >> Create(words_list)
prefix = 'zyx'
suffix = pipeline | 'SomeString' >> Create(['xyz']) # side in
result = words | 'DecorateWordsDoFnNoTag' >> ParDo(
TestDoFn(), prefix, suffix=AsSingleton(suffix))
assert_that(result, equal_to(['zyx-%s-xyz' % x for x in words_list]))
pipeline.run()
def test_window_param(self):
class TestDoFn(DoFn):
def process(self, element, window=DoFn.WindowParam):
yield (element, (float(window.start), float(window.end)))
pipeline = TestPipeline()
pcoll = (pipeline
| Create([1, 7])
| Map(lambda x: TimestampedValue(x, x))
| WindowInto(windowfn=SlidingWindows(10, 5))
| ParDo(TestDoFn()))
assert_that(pcoll, equal_to([(1, (-5, 5)), (1, (0, 10)),
(7, (0, 10)), (7, (5, 15))]))
pcoll2 = pcoll | 'Again' >> ParDo(TestDoFn())
assert_that(
pcoll2,
equal_to([
((1, (-5, 5)), (-5, 5)), ((1, (0, 10)), (0, 10)),
((7, (0, 10)), (0, 10)), ((7, (5, 15)), (5, 15))]),
label='doubled windows')
pipeline.run()
def test_timestamp_param(self):
class TestDoFn(DoFn):
def process(self, element, timestamp=DoFn.TimestampParam):
yield timestamp
pipeline = TestPipeline()
pcoll = pipeline | 'Create' >> Create([1, 2]) | 'Do' >> ParDo(TestDoFn())
assert_that(pcoll, equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
pipeline.run()
def test_timestamp_param_map(self):
with TestPipeline() as p:
assert_that(
p | Create([1, 2]) | beam.Map(lambda _, t=DoFn.TimestampParam: t),
equal_to([MIN_TIMESTAMP, MIN_TIMESTAMP]))
class Bacon(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--slices', type=int)
class Eggs(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--style', default='scrambled')
class Breakfast(Bacon, Eggs):
pass
class PipelineOptionsTest(unittest.TestCase):
def test_flag_parsing(self):
options = Breakfast(['--slices=3', '--style=sunny side up', '--ignored'])
self.assertEqual(3, options.slices)
self.assertEqual('sunny side up', options.style)
def test_keyword_parsing(self):
options = Breakfast(
['--slices=3', '--style=sunny side up', '--ignored'],
slices=10)
self.assertEqual(10, options.slices)
self.assertEqual('sunny side up', options.style)
def test_attribute_setting(self):
options = Breakfast(slices=10)
self.assertEqual(10, options.slices)
options.slices = 20
self.assertEqual(20, options.slices)
def test_view_as(self):
generic_options = PipelineOptions(['--slices=3'])
self.assertEqual(3, generic_options.view_as(Bacon).slices)
self.assertEqual(3, generic_options.view_as(Breakfast).slices)
generic_options.view_as(Breakfast).slices = 10
self.assertEqual(10, generic_options.view_as(Bacon).slices)
with self.assertRaises(AttributeError):
generic_options.slices # pylint: disable=pointless-statement
with self.assertRaises(AttributeError):
generic_options.view_as(Eggs).slices # pylint: disable=expression-not-assigned
def test_defaults(self):
options = Breakfast(['--slices=3'])
self.assertEqual(3, options.slices)
self.assertEqual('scrambled', options.style)
def test_dir(self):
options = Breakfast()
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'slices', 'style',
'view_as', 'display_data']),
set([attr for attr in dir(options) if not attr.startswith('_') and
attr != 'next']))
self.assertEqual(
set(['from_dictionary', 'get_all_options', 'style', 'view_as',
'display_data']),
set([attr for attr in dir(options.view_as(Eggs))
if not attr.startswith('_') and attr != 'next']))
class RunnerApiTest(unittest.TestCase):
def test_parent_pointer(self):
class MyPTransform(beam.PTransform):
def expand(self, p):
self.p = p
return p | beam.Create([None])
p = beam.Pipeline()
p | MyPTransform() # pylint: disable=expression-not-assigned
p = Pipeline.from_runner_api(
Pipeline.to_runner_api(p, use_fake_coders=True), None, None)
self.assertIsNotNone(p.transforms_stack[0].parts[0].parent)
self.assertEqual(p.transforms_stack[0].parts[0].parent,
p.transforms_stack[0])
class DirectRunnerRetryTests(unittest.TestCase):
def test_retry_fork_graph(self):
# TODO(BEAM-3642): The FnApiRunner currently does not currently support
# retries.
p = beam.Pipeline(runner='BundleBasedDirectRunner')
# TODO(mariagh): Remove the use of globals from the test.
global count_b, count_c # pylint: disable=global-variable-undefined
count_b, count_c = 0, 0
def f_b(x):
global count_b # pylint: disable=global-variable-undefined
count_b += 1
raise Exception('exception in f_b')
def f_c(x):
global count_c # pylint: disable=global-variable-undefined
count_c += 1
raise Exception('exception in f_c')
names = p | 'CreateNodeA' >> beam.Create(['Ann', 'Joe'])
fork_b = names | 'SendToB' >> beam.Map(f_b) # pylint: disable=unused-variable
fork_c = names | 'SendToC' >> beam.Map(f_c) # pylint: disable=unused-variable
with self.assertRaises(Exception):
p.run().wait_until_finish()
assert count_b == count_c == 4
def test_no_partial_writeouts(self):
class TestTransformEvaluator(_TransformEvaluator):
def __init__(self):
self._execution_context = _ExecutionContext(None, {})
def start_bundle(self):
self.step_context = self._execution_context.get_step_context()
def process_element(self, element):
k, v = element
state = self.step_context.get_keyed_state(k)
state.add_state(None, _GroupByKeyOnlyEvaluator.ELEMENTS_TAG, v)
# Create instance and add key/value, key/value2
evaluator = TestTransformEvaluator()
evaluator.start_bundle()
self.assertIsNone(evaluator.step_context.existing_keyed_state.get('key'))
self.assertIsNone(evaluator.step_context.partial_keyed_state.get('key'))
evaluator.process_element(['key', 'value'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value']}})
evaluator.process_element(['key', 'value2'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value', 'value2']}})
# Simulate an exception (redo key/value)
evaluator._execution_context.reset()
evaluator.start_bundle()
evaluator.process_element(['key', 'value'])
self.assertEqual(
evaluator.step_context.existing_keyed_state['key'].state,
defaultdict(lambda: defaultdict(list)))
self.assertEqual(
evaluator.step_context.partial_keyed_state['key'].state,
{None: {'elements':['value']}})
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
markflyhigh/incubator-beam
|
sdks/python/apache_beam/pipeline_test.py
|
Python
|
apache-2.0
| 27,825
|
[
"VisIt"
] |
139200c8042829fae231a0e51b504707bff78ce076aead6b3b3b214813f877f9
|
from taichi._lib import core as _ti_core
class CuptiMetric:
"""A class to add CUPTI metric for :class:`~taichi.profiler.kernel_profiler.KernelProfiler`.
This class is designed to add user selected CUPTI metrics.
Only available for the CUDA backend now, i.e. you need ``ti.init(kernel_profiler=True, arch=ti.cuda)``.
For usage of this class, see examples in func :func:`~taichi.profiler.set_kernel_profiler_metrics` and :func:`~taichi.profiler.collect_kernel_profiler_metrics`.
Args:
name (str): name of metric that collected by CUPTI toolkit. used by :func:`~taichi.profiler.set_kernel_profiler_metrics` and :func:`~taichi.profiler.collect_kernel_profiler_metrics`.
header (str): column header of this metric, used by :func:`~taichi.profiler.print_kernel_profiler_info`.
val_format (str): format for print metric value (and unit of this value), used by :func:`~taichi.profiler.print_kernel_profiler_info`.
scale (float): scale of metric value, used by :func:`~taichi.profiler.print_kernel_profiler_info`.
Example::
>>> import taichi as ti
>>> ti.init(kernel_profiler=True, arch=ti.cuda)
>>> num_elements = 128*1024*1024
>>> x = ti.field(ti.f32, shape=num_elements)
>>> y = ti.field(ti.f32, shape=())
>>> y[None] = 0
>>> @ti.kernel
>>> def reduction():
>>> for i in x:
>>> y[None] += x[i]
>>> global_op_atom = ti.profiler.CuptiMetric(
>>> name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
>>> header=' global.atom ',
>>> val_format=' {:8.0f} ')
>>> # add and set user defined metrics
>>> profiling_metrics = ti.profiler.get_predefined_cupti_metrics('global_access') + [global_op_atom]
>>> ti.profiler.set_kernel_profile_metrics(profiling_metrics)
>>> for i in range(16):
>>> reduction()
>>> ti.profiler.print_kernel_profiler_info('trace')
Note:
For details about using CUPTI in Taichi, please visit https://docs.taichi.graphics/docs/lang/articles/misc/profiler#advanced-mode.
"""
def __init__(self,
name='',
header='unnamed_header',
val_format=' {:8.0f} ',
scale=1.0):
self.name = name
self.header = header
self.val_format = val_format
self.scale = scale
# Global Memory Metrics
dram_utilization = CuptiMetric(
name='dram__throughput.avg.pct_of_peak_sustained_elapsed',
header=' global.uti ',
val_format=' {:6.2f} % ')
dram_bytes_sum = CuptiMetric(name='dram__bytes.sum',
header=' global.R&W ',
val_format='{:9.3f} MB ',
scale=1.0 / 1024 / 1024)
dram_bytes_throughput = CuptiMetric(name='dram__bytes.sum.per_second',
header=' global.R&W/s ',
val_format='{:8.3f} GB/s ',
scale=1.0 / 1024 / 1024 / 1024)
dram_bytes_read = CuptiMetric(name='dram__bytes_read.sum',
header=' global.R ',
val_format='{:8.3f} MB ',
scale=1.0 / 1024 / 1024)
dram_read_throughput = CuptiMetric(name='dram__bytes_read.sum.per_second',
header=' global.R/s ',
val_format='{:8.3f} GB/s ',
scale=1.0 / 1024 / 1024 / 1024)
dram_bytes_write = CuptiMetric(name='dram__bytes_write.sum',
header=' global.W ',
val_format='{:8.3f} MB ',
scale=1.0 / 1024 / 1024)
dram_write_throughput = CuptiMetric(name='dram__bytes_write.sum.per_second',
header=' global.W/s ',
val_format='{:8.3f} GB/s ',
scale=1.0 / 1024 / 1024 / 1024)
# Shared Memory Metrics
shared_utilization = CuptiMetric(
name=
'l1tex__data_pipe_lsu_wavefronts_mem_shared.avg.pct_of_peak_sustained_elapsed',
header=' uti.shared ',
val_format=' {:6.2f} % ')
shared_transactions_load = CuptiMetric(
name='l1tex__data_pipe_lsu_wavefronts_mem_shared_op_ld.sum',
header=' shared.trans.W ',
val_format=' {:10.0f} ')
shared_transactions_store = CuptiMetric(
name='l1tex__data_pipe_lsu_wavefronts_mem_shared_op_st.sum',
header=' shared.trans.R ',
val_format=' {:10.0f} ')
shared_bank_conflicts_store = CuptiMetric(
name='l1tex__data_bank_conflicts_pipe_lsu_mem_shared_op_st.sum',
header=' bank.conflict.W ',
val_format=' {:10.0f} ')
shared_bank_conflicts_load = CuptiMetric(
name='l1tex__data_bank_conflicts_pipe_lsu_mem_shared_op_ld.sum',
header=' bank.conflict.R ',
val_format=' {:10.0f} ')
# Atomic Metrics
global_op_atom = CuptiMetric(
name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_atom.sum',
header=' global.atom ',
val_format=' {:8.0f} ')
global_op_reduction = CuptiMetric(
name='l1tex__t_set_accesses_pipe_lsu_mem_global_op_red.sum',
header=' global.red ',
val_format=' {:8.0f} ')
# Hardware Utilization Metrics
sm_throughput = CuptiMetric(
name='sm__throughput.avg.pct_of_peak_sustained_elapsed',
header=' core.uti ',
val_format=' {:6.2f} % ')
dram_throughput = CuptiMetric(
name='gpu__dram_throughput.avg.pct_of_peak_sustained_elapsed',
header=' mem.uti ',
val_format=' {:6.2f} % ')
l1tex_throughput = CuptiMetric(
name='l1tex__throughput.avg.pct_of_peak_sustained_elapsed',
header=' L1.uti ',
val_format=' {:6.2f} % ')
l2_throughput = CuptiMetric(
name='lts__throughput.avg.pct_of_peak_sustained_elapsed',
header=' L2.uti ',
val_format=' {:6.2f} % ')
# Misc Metrics
l1_hit_rate = CuptiMetric(name='l1tex__t_sector_hit_rate.pct',
header=' L1.hit ',
val_format=' {:6.2f} % ')
l2_hit_rate = CuptiMetric(name='lts__t_sector_hit_rate.pct',
header=' L2.hit ',
val_format=' {:6.2f} % ')
achieved_occupancy = CuptiMetric(
name='sm__warps_active.avg.pct_of_peak_sustained_active',
header=' occupancy',
val_format=' {:6.0f} ')
# metric suite: global load & store
global_access = [
dram_bytes_sum,
dram_bytes_throughput,
dram_bytes_read,
dram_read_throughput,
dram_bytes_write,
dram_write_throughput,
]
# metric suite: shared load & store
shared_access = [
shared_transactions_load,
shared_transactions_store,
shared_bank_conflicts_store,
shared_bank_conflicts_load,
]
# metric suite: atomic access
atomic_access = [
global_op_atom,
global_op_reduction,
]
# metric suite: cache hit rate
cache_hit_rate = [
l1_hit_rate,
l2_hit_rate,
]
# metric suite: device throughput
device_utilization = [
sm_throughput,
dram_throughput,
shared_utilization,
l1tex_throughput,
l2_throughput,
]
# Predefined metrics suites
predefined_cupti_metrics = {
'global_access': global_access,
'shared_access': shared_access,
'atomic_access': atomic_access,
'cache_hit_rate': cache_hit_rate,
'device_utilization': device_utilization,
}
def get_predefined_cupti_metrics(name=''):
if name not in predefined_cupti_metrics:
_ti_core.warn("Valid Taichi predefined metrics list (str):")
for key in predefined_cupti_metrics:
_ti_core.warn(f" '{key}'")
return None
return predefined_cupti_metrics[name]
# Default metrics list
default_cupti_metrics = [dram_bytes_sum]
__all__ = ['CuptiMetric', 'get_predefined_cupti_metrics']
|
yuanming-hu/taichi
|
python/taichi/profiler/kernel_metrics.py
|
Python
|
mit
| 7,879
|
[
"VisIt"
] |
d48a7031b0e647f7d12b14267006f7cbd832d915137ee05dabad7ba70c9a774c
|
#!/usr/bin/env python
# File created on 07 Jul 2012
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
from os.path import join
from qiime.make_otu_table import make_otu_table
from qiime.parse import parse_observation_metadata
from qiime.parallel.pick_otus import ParallelPickOtus
from qiime.util import write_biom_table
class ParallelDatabaseMapper(ParallelPickOtus):
_script_name = 'map_reads_to_reference.py'
_job_prefix = 'RMAP'
def _call_cleanup(self,
input_fp,
output_dir,
params,
job_prefix,
poll_directly,
suppress_submit_jobs):
""" Called as the last step in __call__.
"""
if poll_directly:
if params['observation_metadata_fp'] is not None:
observation_metadata = \
parse_observation_metadata(
open(params['observation_metadata_fp'], 'U'))
else:
observation_metadata = None
biom_fp = join(output_dir, 'observation_table.biom')
biom_table = make_otu_table(
open(join(output_dir, 'observation_map.txt'), 'U'),
observation_metadata)
write_biom_table(biom_table, biom_fp)
else:
# can't construct the final biom file if not polling
# directly as the final observation map won't have been created yet
pass
class ParallelDatabaseMapperUsearch(ParallelDatabaseMapper):
def _get_job_commands(self,
fasta_fps,
output_dir,
params,
job_prefix,
working_dir,
command_prefix='/bin/bash; ',
command_suffix='; exit'):
out_filenames = ['observation_map.txt',
'out.uc',
'out.bl6',
'observation_table.biom']
# Create lists to store the results
commands = []
result_filepaths = []
# Iterate over the input files
for i, fasta_fp in enumerate(fasta_fps):
# Each run ends with moving the output file from the tmp dir to
# the output_dir. Build the command to perform the move here.
run_output_dir = join(working_dir, str(i))
tmp_output_dir = join(working_dir, str(i), 'tmp')
rename_command, current_result_filepaths = self._get_rename_command(
out_filenames,
tmp_output_dir,
run_output_dir)
result_filepaths += current_result_filepaths
command = \
'%s %s -i %s -r %s -m usearch -o %s --min_percent_id %s --max_accepts %d --max_rejects %d --queryalnfract %f --targetalnfract %f --evalue %e %s %s' %\
(command_prefix,
self._script_name,
fasta_fp,
params['refseqs_fp'],
tmp_output_dir,
params['min_percent_id'],
params['max_accepts'],
params['max_rejects'],
params['queryalnfract'],
params['targetalnfract'],
params['evalue'],
rename_command,
command_suffix)
commands.append(command)
return commands, result_filepaths
def _write_merge_map_file(self,
input_file_basename,
job_result_filepaths,
params,
output_dir,
merge_map_filepath):
"""
"""
f = open(merge_map_filepath, 'w')
observation_fps = []
uc_fps = []
blast6_fps = []
out_filepaths = [
'%s/observation_map.txt' % output_dir,
'%s/out.uc' % output_dir,
'%s/out.bl6' % output_dir]
in_filepaths = [observation_fps, uc_fps, blast6_fps]
for fp in job_result_filepaths:
if fp.endswith('observation_map.txt'):
observation_fps.append(fp)
if fp.endswith('.uc'):
uc_fps.append(fp)
elif fp.endswith('.bl6'):
blast6_fps.append(fp)
else:
pass
for in_files, out_file in\
zip(in_filepaths, out_filepaths):
f.write('\t'.join(in_files + [out_file]))
f.write('\n')
f.close()
class ParallelDatabaseMapperBlat(ParallelDatabaseMapper):
def _get_job_commands(self,
fasta_fps,
output_dir,
params,
job_prefix,
working_dir,
command_prefix='/bin/bash; ',
command_suffix='; exit'):
out_filenames = ['observation_map.txt',
'out.bl9',
'observation_table.log',
'observation_table.biom']
# Create lists to store the results
commands = []
result_filepaths = []
# Iterate over the input files
for i, fasta_fp in enumerate(fasta_fps):
# Each run ends with moving the output file from the tmp dir to
# the output_dir. Build the command to perform the move here.
run_output_dir = join(working_dir, str(i))
tmp_output_dir = join(working_dir, str(i), 'tmp')
rename_command, current_result_filepaths = self._get_rename_command(
out_filenames,
tmp_output_dir,
run_output_dir)
result_filepaths += current_result_filepaths
command = \
'%s %s -i %s -r %s -m blat -o %s --min_percent_id %s --evalue %e %s %s' %\
(command_prefix,
self._script_name,
fasta_fp,
params['refseqs_fp'],
tmp_output_dir,
params['min_percent_id'],
params['evalue'],
rename_command,
command_suffix)
commands.append(command)
return commands, result_filepaths
def _write_merge_map_file(self,
input_file_basename,
job_result_filepaths,
params,
output_dir,
merge_map_filepath):
"""
"""
f = open(merge_map_filepath, 'w')
observation_fps = []
log_fps = []
blast9_fps = []
out_filepaths = [
'%s/observation_map.txt' % output_dir,
'%s/observation_table.log' % output_dir,
'%s/out.bl9' % output_dir]
in_filepaths = [observation_fps, log_fps, blast9_fps]
for fp in job_result_filepaths:
if fp.endswith('observation_map.txt'):
observation_fps.append(fp)
if fp.endswith('.log'):
log_fps.append(fp)
elif fp.endswith('.bl9'):
blast9_fps.append(fp)
else:
pass
for in_files, out_file in\
zip(in_filepaths, out_filepaths):
f.write('\t'.join(in_files + [out_file]))
f.write('\n')
f.close()
class ParallelDatabaseMapperBwaShort(ParallelDatabaseMapper):
def _get_job_commands(self,
fasta_fps,
output_dir,
params,
job_prefix,
working_dir,
command_prefix='/bin/bash; ',
command_suffix='; exit'):
out_filenames = ['observation_map.txt',
'bwa_raw_out.sam',
'bwa_raw_out.sai',
'observation_table.log',
'observation_table.biom']
# Create lists to store the results
commands = []
result_filepaths = []
if params['max_diff'] is not None:
max_diff_str = "--max_diff %s" % params['max_diff']
else:
max_diff_str = ""
# Iterate over the input files
for i, fasta_fp in enumerate(fasta_fps):
# Each run ends with moving the output file from the tmp dir to
# the output_dir. Build the command to perform the move here.
run_output_dir = join(working_dir, str(i))
tmp_output_dir = join(working_dir, str(i), 'tmp')
rename_command, current_result_filepaths = self._get_rename_command(
out_filenames,
tmp_output_dir,
run_output_dir)
result_filepaths += current_result_filepaths
command = \
'%s %s -i %s -r %s -m bwa-short -o %s %s %s %s' %\
(command_prefix,
self._script_name,
fasta_fp,
params['refseqs_fp'],
tmp_output_dir,
max_diff_str,
rename_command,
command_suffix)
commands.append(command)
return commands, result_filepaths
def _write_merge_map_file(self,
input_file_basename,
job_result_filepaths,
params,
output_dir,
merge_map_filepath):
"""
"""
f = open(merge_map_filepath, 'w')
observation_fps = []
log_fps = []
sam_fps = []
out_filepaths = [
'%s/observation_map.txt' % output_dir,
'%s/observation_table.log' % output_dir,
'%s/bwa_raw_out.sam' % output_dir]
in_filepaths = [observation_fps, log_fps, sam_fps]
for fp in job_result_filepaths:
if fp.endswith('observation_map.txt'):
observation_fps.append(fp)
if fp.endswith('.log'):
log_fps.append(fp)
elif fp.endswith('.sam'):
sam_fps.append(fp)
else:
pass
for in_files, out_file in\
zip(in_filepaths, out_filepaths):
f.write('\t'.join(in_files + [out_file]))
f.write('\n')
f.close()
|
wasade/qiime
|
qiime/parallel/map_reads_to_reference.py
|
Python
|
gpl-2.0
| 10,794
|
[
"BWA"
] |
6a8391a935d9ca6a9b7f08911bd58cb444eadbd804ba54aeb4857b776a949149
|
#!/usr/bin/python
import vtk
import vtk.util.numpy_support
import numpy
import os
import sys
SRC_DIR = "./x-data/"
def captureImage(window, timestamp):
image = vtk.vtkWindowToImageFilter()
image.SetInput(window)
image.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName("./temperature-{}.png".format(timestamp))
writer.SetInput(image.GetOutput())
#window.Render()
writer.Write()
def addPlot(chart, reader, name):
data1 = reader.GetOutput()
coords = vtk.vtkFloatArray()
coords.SetName("Coords")
for i in range(data1.GetNumberOfPoints()):
x,y,z = data1.GetPoint(i)
coords.InsertNextValue(y)
scalars = data1.GetPointData().GetArray("T")
scalars.SetName(name)
table = vtk.vtkTable()
table.AddColumn(coords)
table.AddColumn(scalars)
line1 = chart.AddPlot(vtk.vtkChart.LINE)
line1.SetInput(table, 0, 1)
#line1.GetPen().SetLineType(vtk.vtkPen.NO_PEN)
#line1.SetMarkerStyle(vtk.vtkPlotPoints.PLUS)
#line1.SetColor(150, 100, 0, 255)
if __name__ == "__main__":
dirname, filename = os.path.split(sys.argv[1])
_, _, k = filename.partition("-")
l,_,_ = k.partition(".")
view = vtk.vtkContextView()
view.GetRenderer().SetBackground(1.0, 1.0, 1.0)
view.GetRenderWindow().SetSize(800, 600)
chart = vtk.vtkChartXY()
view.GetScene().AddItem(chart)
chart.SetTitle("time = {}".format(int(l)))
chart.GetAxis(0).SetTitle("Temperature")
chart.GetAxis(1).SetTitle("Coordinate")
reader = vtk.vtkXMLStructuredGridReader()
reader.SetFileName(sys.argv[1])
reader.Update()
addPlot(chart, reader, "time = {}".format(int(l)))
# chart.SetShowLegend(True)
view.GetRenderWindow().SetMultiSamples(0)
view.GetRenderWindow().Render()
captureImage(view.GetRenderWindow(), l)
|
alyupa/multiphase-flow-modeling
|
visualisation/ztemp-single.py
|
Python
|
mit
| 1,924
|
[
"VTK"
] |
820d0fb91e7cbcc17a653884147cadf6586b44b5abfa5ebbb39dce13b7358168
|
import os
import sys
import math
import argparse
import config
import shutil
from turkic.cli import handler, importparser, Command, LoadCommand
from turkic.database import session
import sqlalchemy
import random
from vision import Box
from vision import ffmpeg
import vision.visualize
import vision.track.interpolation
import turkic.models
from models import *
import cStringIO
import Image, ImageDraw, ImageFont
import qa
import merge
import parsedatetime.parsedatetime
import datetime, time
import vision.pascal
import itertools
from xml.etree import ElementTree
@handler("Decompresses an entire video into frames")
class extract(Command):
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument("video")
parser.add_argument("output")
parser.add_argument("--width", default=720, type=int)
parser.add_argument("--height", default=480, type=int)
parser.add_argument("--no-resize",
action="store_true", default = False)
parser.add_argument("--no-cleanup",
action="store_true", default=False)
return parser
def __call__(self, args):
try:
os.makedirs(args.output)
except:
pass
sequence = ffmpeg.extract(args.video)
try:
for frame, image in enumerate(sequence):
if frame % 100 == 0:
print ("Decoding frames {0} to {1}"
.format(frame, frame + 100))
if not args.no_resize:
image.thumbnail((args.width, args.height), Image.BILINEAR)
path = Video.getframepath(frame, args.output)
try:
image.save(path)
except IOError:
os.makedirs(os.path.dirname(path))
image.save(path)
except:
if not args.no_cleanup:
print "Aborted. Cleaning up..."
shutil.rmtree(args.output)
raise
@handler("Formats existing frames ")
class formatframes(Command):
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument("video")
parser.add_argument("output")
parser.add_argument("--extension", default="jpg")
parser.add_argument("--no-cleanup",
action="store_true", default=False)
return parser
def __call__(self, args):
try:
os.makedirs(args.output)
except:
pass
extension = ".{0}".format(args.extension)
files = os.listdir(args.video)
files = (x for x in files if x.endswith(extension))
files = [(int(x.split(".")[0]), x) for x in files]
files.sort()
files = [(x, y) for x, (_, y) in enumerate(files)]
if not files:
print "No files ending with {0}".format(extension)
return
for frame, file in files:
path = Video.getframepath(frame, args.output)
file = os.path.join(args.video, file)
try:
os.link(file, path)
except OSError:
os.makedirs(os.path.dirname(path))
os.link(file, path)
print "Formatted {0} frames".format(len(files))
@handler("Imports a set of video frames")
class load(LoadCommand):
def setup(self):
parser = argparse.ArgumentParser(parents = [importparser])
parser.add_argument("slug")
parser.add_argument("location")
parser.add_argument("labels", nargs="+")
parser.add_argument("--length", type=int, default = 300)
parser.add_argument("--overlap", type=int, default = 20)
parser.add_argument("--skip", type=int, default = 0)
parser.add_argument("--per-object-bonus", type=float)
parser.add_argument("--completion-bonus", type=float)
parser.add_argument("--use-frames", default = None)
parser.add_argument("--start-frame", type = int, default = 0)
parser.add_argument("--stop-frame", type = int, default = None)
parser.add_argument("--train-with")
parser.add_argument("--for-training", action="store_true")
parser.add_argument("--for-training-start", type=int)
parser.add_argument("--for-training-stop", type=int)
parser.add_argument("--for-training-overlap", type=float, default=0.25)
parser.add_argument("--for-training-tolerance", type=float, default=0.2)
parser.add_argument("--for-training-mistakes", type=int, default=0)
parser.add_argument("--for-training-data", default = None)
# added by Menglong Zhu <06-27-2012>
parser.add_argument("--blow-radius", default = 0)
parser.add_argument("--action", default = None)
parser.add_argument("--pose", default = None)
return parser
def title(self, args):
return "Sports Video annotation"
def description(self, args):
return "Draw circle around moving human joints in a video."
def cost(self, args):
return 0.05
def duration(self, args):
return 7200 * 3
def keywords(self, args):
return "video, annotation, sports"
def __call__(self, args, group):
print "Checking integrity..."
# read first frame to get sizes
path = Video.getframepath(0, args.location)
try:
im = Image.open(path)
except IOError:
print "Cannot read {0}".format(path)
return
width, height = im.size
print "Searching for last frame..."
# search for last frame
toplevel = max(int(x)
for x in os.listdir(args.location))
secondlevel = max(int(x)
for x in os.listdir("{0}/{1}".format(args.location, toplevel)))
maxframes = max(int(os.path.splitext(x)[0])
for x in os.listdir("{0}/{1}/{2}"
.format(args.location, toplevel, secondlevel))) + 1
print "Found {0} frames.".format(maxframes)
# can we read the last frame?
path = Video.getframepath(maxframes - 1, args.location)
try:
im = Image.open(path)
except IOError:
print "Cannot read {0}".format(path)
return
# check last frame sizes
if im.size[0] != width and im.size[1] != height:
print "First frame dimensions differs from last frame"
return
if session.query(Video).filter(Video.slug == args.slug).count():
print "Video {0} already exists!".format(args.slug)
return
if args.train_with:
if args.for_training:
print "A training video cannot require training"
return
print "Looking for training video..."
trainer = session.query(Video)
trainer = trainer.filter(Video.slug == args.train_with)
if not trainer.count():
print ("Training video {0} does not exist!"
.format(args.train_with))
return
trainer = trainer.one()
else:
trainer = None
# create video
video = Video(slug = args.slug,
location = os.path.realpath(args.location),
width = width,
height = height,
totalframes = maxframes,
skip = args.skip,
perobjectbonus = args.per_object_bonus,
completionbonus = args.completion_bonus,
trainwith = trainer,
isfortraining = args.for_training,
blowradius = args.blow_radius,
action = args.action,
pose = args.pose)
if args.for_training:
video.trainvalidator = qa.tolerable(args.for_training_overlap,
args.for_training_tolerance,
args.for_training_mistakes)
print "Training validator is {0}".format(video.trainvalidator)
session.add(video)
print "Binding labels and attributes..."
# create labels and attributes
labelcache = {}
attributecache = {}
lastlabel = None
for labeltext in args.labels:
if labeltext[0] == "~":
if lastlabel is None:
print "Cannot assign an attribute without a label!"
return
labeltext = labeltext[1:]
attribute = Attribute(text = labeltext)
session.add(attribute)
lastlabel.attributes.append(attribute)
attributecache[labeltext] = attribute
else:
label = Label(text = labeltext)
session.add(label)
video.labels.append(label)
labelcache[labeltext] = label
lastlabel = label
print "Creating symbolic link..."
symlink = "public/frames/{0}".format(video.slug)
try:
os.remove(symlink)
except:
pass
os.symlink(video.location, symlink)
print "Creating segments..."
# create shots and jobs
if args.for_training:
segment = Segment(video = video)
if args.for_training_start:
segment.start = args.for_training_start
if segment.start < 0:
segment.start = 0
else:
segment.start = 0
if args.for_training_stop:
segment.stop = args.for_training_stop
if segment.stop > video.totalframes - 1:
segment.stop = video.totalframes - 1
else:
segment.stop = video.totalframes - 1
job = Job(segment = segment, group = group, ready = False)
session.add(segment)
session.add(job)
elif args.use_frames:
with open(args.use_frames) as useframes:
for line in useframes:
ustart, ustop = line.split()
ustart, ustop = int(ustart), int(ustop)
validlength = float(ustop - ustart)
numsegments = math.ceil(validlength / args.length)
segmentlength = math.ceil(validlength / numsegments)
for start in range(ustart, ustop, int(segmentlength)):
stop = min(start + segmentlength + args.overlap + 1,
ustop)
segment = Segment(start = start,
stop = stop,
video = video)
job = Job(segment = segment, group = group)
session.add(segment)
session.add(job)
else:
startframe = args.start_frame
stopframe = args.stop_frame
if not stopframe:
stopframe = video.totalframes - 1
for start in range(startframe, stopframe, args.length):
stop = min(start + args.length + args.overlap + 1,
stopframe)
segment = Segment(start = start,
stop = stop,
video = video)
job = Job(segment = segment, group = group)
session.add(segment)
session.add(job)
if args.per_object_bonus:
group.schedules.append(
PerObjectBonus(amount = args.per_object_bonus))
if args.completion_bonus:
group.schedules.append(
CompletionBonus(amount = args.completion_bonus))
session.add(group)
if args.for_training and args.for_training_data:
print ("Loading training ground truth annotations from {0}"
.format(args.for_training_data))
with open(args.for_training_data, "r") as file:
pathcache = {}
for line in file:
(id, xtl, ytl, xbr, ybr,
frame, outside, occluded, generated,
label) = line.split(" ")
if int(generated):
continue
if id not in pathcache:
print "Imported new path {0}".format(id)
label = labelcache[label.strip()[1:-1]]
pathcache[id] = Path(job = job, label = label)
box = Box(path = pathcache[id])
box.xtl = int(xtl)
box.ytl = int(ytl)
box.xbr = int(xbr)
box.ybr = int(ybr)
box.frame = int(frame)
box.outside = int(outside)
box.occluded = int(occluded)
pathcache[id].boxes.append(box)
session.commit()
if args.for_training:
if args.for_training and args.for_training_data:
print "Video and ground truth loaded."
else:
print "Video loaded and ready for ground truth:"
print ""
print "\t{0}".format(job.offlineurl(config.localhost))
print ""
print "Visit this URL to provide training with ground truth."
else:
print "Video loaded and ready for publication."
@handler("Deletes an already imported video")
class delete(Command):
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument("slug")
parser.add_argument("--force", action="store_true", default=False)
return parser
def __call__(self, args):
video = session.query(Video).filter(Video.slug == args.slug)
if not video.count():
print "Video {0} does not exist!".format(args.slug)
return
video = video.one()
query = session.query(Path)
query = query.join(Job)
query = query.join(Segment)
query = query.filter(Segment.video == video)
numpaths = query.count()
if numpaths and not args.force:
print ("Video has {0} paths. Use --force to delete."
.format(numpaths))
return
for segment in video.segments:
for job in segment.jobs:
if job.published and not job.completed:
hitid = job.disable()
print "Disabled {0}".format(hitid)
session.delete(video)
session.commit()
print "Deleted video and associated data."
class DumpCommand(Command):
parent = argparse.ArgumentParser(add_help=False)
parent.add_argument("slug")
parent.add_argument("--merge", "-m", action="store_true", default=False)
parent.add_argument("--merge-threshold", "-t",
type=float, default = 0.5)
parent.add_argument("--worker", "-w", nargs = "*", default = None)
class Tracklet(object):
def __init__(self, label, paths, boxes, workers):
self.label = label
self.paths = paths
self.boxes = sorted(boxes, key = lambda x: x.frame)
self.workers = workers
def bind(self):
for path in self.paths:
self.boxes = Path.bindattributes(path.attributes, self.boxes)
def getdata(self, args):
response = []
video = session.query(Video).filter(Video.slug == args.slug)
if video.count() == 0:
print "Video {0} does not exist!".format(args.slug)
raise SystemExit()
video = video.one()
if args.merge:
for boxes, paths in merge.merge(video.segments,
threshold = args.merge_threshold):
workers = list(set(x.job.workerid for x in paths))
tracklet = DumpCommand.Tracklet(paths[0].label.text,
paths, boxes, workers)
response.append(tracklet)
else:
for segment in video.segments:
for job in segment.jobs:
if not job.useful:
continue
worker = job.workerid
for path in job.paths:
tracklet = DumpCommand.Tracklet(path.label.text,
[path],
path.getboxes(),
[worker])
response.append(tracklet)
if args.worker:
workers = set(args.worker)
response = [x for x in response if set(x.workers) & workers]
interpolated = []
for track in response:
path = vision.track.interpolation.LinearFill(track.boxes)
tracklet = DumpCommand.Tracklet(track.label, track.paths,
path, track.workers)
interpolated.append(tracklet)
response = interpolated
for tracklet in response:
tracklet.bind()
return video, response
@handler("Highlights a video sequence")
class visualize(DumpCommand):
def setup(self):
parser = argparse.ArgumentParser(parents = [self.parent])
parser.add_argument("output")
parser.add_argument("--no-augment", action="store_true", default = False)
parser.add_argument("--labels", action="store_true", default = False)
parser.add_argument("--renumber", action="store_true", default = False)
return parser
def __call__(self, args):
video, data = self.getdata(args)
# prepend class label
for track in data:
for box in track.boxes:
box.attributes.insert(0, track.label)
paths = [x.boxes for x in data]
print "Highlighting {0} tracks...".format(len(data))
if args.labels:
font = ImageFont.truetype("arial.ttf", 14)
else:
font = None
it = vision.visualize.highlight_paths(video, paths, font = font)
if not args.no_augment:
it = self.augment(args, video, data, it)
if args.renumber:
it = self.renumber(it)
try:
os.makedirs(args.output)
except:
pass
vision.visualize.save(it,
lambda x: "{0}/{1}.jpg".format(args.output, x))
def renumber(self, it):
for count, (im, _) in enumerate(it):
yield im, count
def augment(self, args, video, data, frames):
offset = 100
for im, frame in frames:
aug = Image.new(im.mode, (im.size[0], im.size[1] + offset))
aug.paste("black")
aug.paste(im, (0, 0))
draw = ImageDraw.ImageDraw(aug)
s = im.size[1]
font = ImageFont.truetype("arial.ttf", 14)
# extract some data
workerids = set()
sum = 0
for track in data:
if frame in (x.frame for x in track.boxes):
for worker in track.workers:
if worker not in workerids and worker is not None:
workerids.add(worker)
sum += 1
ypos = s + 5
for worker in workerids:
draw.text((5, ypos), worker, fill="white", font = font)
ypos += draw.textsize(worker, font = font)[1] + 3
size = draw.textsize(video.slug, font = font)
draw.text((im.size[0] - size[0] - 5, s + 5),
video.slug, font = font)
text = "{0} annotations".format(sum)
numsize = draw.textsize(text, font = font)
draw.text((im.size[0] - numsize[0] - 5, s + 5 + size[1] + 3),
text, font = font)
yield aug, frame
@handler("Dumps the tracking data")
class dump(DumpCommand):
def setup(self):
parser = argparse.ArgumentParser(parents = [self.parent])
parser.add_argument("--output", "-o")
parser.add_argument("--xml", "-x",
action="store_true", default=False)
parser.add_argument("--json", "-j",
action="store_true", default=False)
parser.add_argument("--matlab", "-ml",
action="store_true", default=False)
parser.add_argument("--pickle", "-p",
action="store_true", default=False)
parser.add_argument("--labelme", "-vlm",
action="store", default=False)
parser.add_argument("--pascal", action="store_true", default=False)
parser.add_argument("--pascal-difficult", type = int, default = 100)
parser.add_argument("--pascal-skip", type = int, default = 15)
parser.add_argument("--pascal-negatives")
parser.add_argument("--scale", "-s", default = 1.0, type = float)
parser.add_argument("--dimensions", "-d", default = None)
parser.add_argument("--original-video", "-v", default = None)
parser.add_argument("--lowercase", action="store_true", default=False)
return parser
def __call__(self, args):
video, data = self.getdata(args)
if args.pascal:
if not args.output:
print "error: PASCAL output needs an output"
return
file = args.output
print "Dumping video {0}".format(video.slug)
elif args.output:
file = open(args.output, 'w')
print "Dumping video {0}".format(video.slug)
else:
file = cStringIO.StringIO()
scale = args.scale
if args.dimensions or args.original_video:
if args.original_video:
#w, h = ffmpeg.extract(args.original_video).next().size
w, h = ffmpeg.info().get_size(args.original_video)
else:
w, h = args.dimensions.split("x")
w = float(w)
h = float(h)
s = w / video.width
if s * video.height > h:
s = h / video.height
scale = s
for track in data:
track.boxes = [x.transform(scale) for x in track.boxes]
if args.lowercase:
track.label = track.label.lower()
if args.xml:
self.dumpxml(file, data)
elif args.json:
self.dumpjson(file, data)
elif args.matlab:
self.dumpmatlab(file, data, video, scale)
elif args.pickle:
self.dumppickle(file, data)
elif args.labelme:
self.dumplabelme(file, data, args.slug, args.labelme)
elif args.pascal:
if scale != 1:
print "Warning: scale is not 1, yet frames are not resizing!"
print "Warning: you should manually update the JPEGImages"
self.dumppascal(file, video, data, args.pascal_difficult,
args.pascal_skip, args.pascal_negatives)
else:
self.dumptext(file, data)
if args.pascal:
return
elif args.output:
file.close()
else:
sys.stdout.write(file.getvalue())
def dumpmatlab(self, file, data, video, scale):
results = []
for id, track in enumerate(data):
for box in track.boxes:
if not box.lost:
data = {}
data['id'] = id
data['xtl'] = box.xtl
data['ytl'] = box.ytl
data['xbr'] = box.xbr
data['ybr'] = box.ybr
data['frame'] = box.frame
data['lost'] = box.lost
data['occluded'] = box.occluded
data['label'] = track.label
data['attributes'] = box.attributes
data['generated'] = box.generated
results.append(data)
from scipy.io import savemat as savematlab
savematlab(file,
{"annotations": results,
"num_frames": video.totalframes,
"slug": video.slug,
"skip": video.skip,
"width": int(video.width * scale),
"height": int(video.height * scale),
"scale": scale}, oned_as="row")
def dumpxml(self, file, data):
file.write("<annotations count=\"{0}\">\n".format(len(data)))
for id, track in enumerate(data):
file.write("\t<track id=\"{0}\" label=\"{1}\">\n"
.format(id, track.label))
for box in track.boxes:
file.write("\t\t<box frame=\"{0}\"".format(box.frame))
file.write(" xtl=\"{0}\"".format(box.xtl))
file.write(" ytl=\"{0}\"".format(box.ytl))
file.write(" xbr=\"{0}\"".format(box.xbr))
file.write(" ybr=\"{0}\"".format(box.ybr))
file.write(" outside=\"{0}\"".format(box.lost))
file.write(" occluded=\"{0}\">".format(box.occluded))
for attr in box.attributes:
file.write("<attribute id=\"{0}\">{1}</attribute>".format(
attr.id, attr.text))
file.write("</box>\n")
file.write("\t</track>\n")
file.write("</annotations>\n")
def dumpjson(self, file, data):
annotations = {}
for id, track in enumerate(data):
result = {}
result['label'] = track.label
boxes = {}
for box in track.boxes:
boxdata = {}
boxdata['xtl'] = box.xtl
boxdata['ytl'] = box.ytl
boxdata['xbr'] = box.xbr
boxdata['ybr'] = box.ybr
boxdata['outside'] = box.lost
boxdata['occluded'] = box.occluded
boxdata['attributes'] = box.attributes
boxes[int(box.frame)] = boxdata
result['boxes'] = boxes
annotations[int(id)] = result
import json
json.dump(annotations, file)
file.write("\n")
def dumppickle(self, file, data):
annotations = []
for track in data:
result = {}
result['label'] = track.label
result['boxes'] = track.boxes
annotations.append(result)
import pickle
pickle.dump(annotations, file, protocol = 2)
def dumptext(self, file, data):
for id, track in enumerate(data):
for box in track.boxes:
file.write(str(id))
file.write(" ")
file.write(str(box.xtl))
file.write(" ")
file.write(str(box.ytl))
file.write(" ")
file.write(str(box.xbr))
file.write(" ")
file.write(str(box.ybr))
file.write(" ")
file.write(str(box.frame))
file.write(" ")
file.write(str(box.lost))
file.write(" ")
file.write(str(box.occluded))
file.write(" ")
file.write(str(box.generated))
file.write(" \"")
file.write(track.label)
file.write("\"")
for attr in box.attributes:
file.write(" \"")
file.write(attr.text)
file.write("\"")
file.write("\n")
def dumplabelme(self, file, data, slug, folder):
file.write("<annotation>")
file.write("<folder>{0}</folder>".format(folder))
file.write("<filename>{0}.flv</filename>".format(slug))
file.write("<source>")
file.write("<type>video</type>")
file.write("<sourceImage>vatic frames</sourceImage>")
file.write("<sourceAnnotation>vatic</sourceAnnotation>")
file.write("</source>")
file.write("\n")
data = list(enumerate(data))
for id, track in data:
eligibleframes = [x.frame for x in track.boxes if not x.lost]
if not eligibleframes:
continue
startframe = min(eligibleframes)
endframe = max(eligibleframes)
file.write("<object>")
file.write("<name>{0}</name>".format(track.label))
file.write("<moving>true</moving>")
file.write("<action/>")
file.write("<verified>0</verified>")
file.write("<id>{0}</id>".format(id))
file.write("<createdFrame>{0}</createdFrame>".format(startframe))
file.write("<startFrame>{0}</startFrame>".format(startframe))
file.write("<endFrame>{0}</endFrame>".format(endframe))
file.write("\n")
for box in track.boxes:
if box.lost:
continue
file.write("<polygon>")
file.write("<t>{0}</t>".format(box.frame))
file.write("<pt>")
file.write("<x>{0}</x>".format(box.xtl))
file.write("<y>{0}</y>".format(box.ytl))
file.write("<l>{0}</l>".format(0 if box.generated else 1))
file.write("</pt>")
file.write("<pt>")
file.write("<x>{0}</x>".format(box.xtl))
file.write("<y>{0}</y>".format(box.ybr))
file.write("<l>{0}</l>".format(0 if box.generated else 1))
file.write("</pt>")
file.write("<pt>")
file.write("<x>{0}</x>".format(box.xbr))
file.write("<y>{0}</y>".format(box.ybr))
file.write("<l>{0}</l>".format(0 if box.generated else 1))
file.write("</pt>")
file.write("<pt>")
file.write("<x>{0}</x>".format(box.xbr))
file.write("<y>{0}</y>".format(box.ytl))
file.write("<l>{0}</l>".format(0 if box.generated else 1))
file.write("</pt>")
file.write("</polygon>")
file.write("\n")
file.write("</object>")
file.write("\n")
eventcounter = 0
for id, track in data:
occlusions = [x for x in track.boxes if x.occluded and not x.lost]
lastframe = None
startframe = None
for box in occlusions:
output = box is occlusions[-1]
if lastframe is None:
lastframe = box.frame
startframe = box.frame
elif box.frame == lastframe + 1:
lastframe = box.frame
else:
output = True
if output:
file.write("<event>");
file.write("<username>anonymous</username>")
file.write("<startFrame>{0}</startFrame>".format(startframe))
file.write("<endFrame>{0}</endFrame>".format(lastframe))
file.write("<createdFrame>{0}</createdFrame>".format(startframe))
file.write("<eid>{0}</eid>".format(eventcounter))
file.write("<x>0</x>")
file.write("<y>0</y>")
file.write("<sentence>")
file.write("<word><text>{0}</text><id>{1}</id></word>"
.format(track.label, id))
file.write("<word><text>is</text></word>")
file.write("<word><text>occluded</text></word>")
file.write("<word><text>by</text></word>")
file.write("<word><text>unknown</text></word>")
file.write("</sentence>")
file.write("</event>")
file.write("\n")
eventcounter += 1
lastframe = None
startframe = None
file.write("</annotation>")
file.write("\n")
def dumppascal(self, folder, video, data, difficultthresh, skip,
negdir):
byframe = {}
for track in data:
for box in track.boxes:
if box.frame not in byframe:
byframe[box.frame] = []
byframe[box.frame].append((box, track))
hasit = {}
allframes = range(0, video.totalframes, skip)
try:
os.makedirs("{0}/Annotations".format(folder))
except:
pass
try:
os.makedirs("{0}/ImageSets/Main/".format(folder))
except:
pass
try:
os.makedirs("{0}/JPEGImages/".format(folder))
except:
pass
numdifficult = 0
numtotal = 0
pascalds = None
allnegatives = set()
if negdir:
pascalds = vision.pascal.PascalDataset(negdir)
print "Writing annotations..."
for frame in allframes:
if frame in byframe:
boxes = byframe[frame]
else:
boxes = []
strframe = str(frame+1).zfill(6)
filename = "{0}/Annotations/{1}.xml".format(folder, strframe)
file = open(filename, "w")
file.write("<annotation>")
file.write("<folder>{0}</folder>".format(folder))
file.write("<filename>{0}.jpg</filename>".format(strframe))
isempty = True
for box, track in boxes:
if box.lost:
continue
isempty = False
if track.label not in hasit:
hasit[track.label] = set()
hasit[track.label].add(frame)
numtotal += 1
difficult = box.area < difficultthresh
if difficult:
numdifficult += 1
difficult = int(difficult)
file.write("<object>")
file.write("<name>{0}</name>".format(track.label))
file.write("<bndbox>")
file.write("<xmax>{0}</xmax>".format(box.xbr))
file.write("<xmin>{0}</xmin>".format(box.xtl))
file.write("<ymax>{0}</ymax>".format(box.ybr))
file.write("<ymin>{0}</ymin>".format(box.ytl))
file.write("</bndbox>")
file.write("<difficult>{0}</difficult>".format(difficult))
file.write("<occluded>{0}</occluded>".format(box.occluded))
file.write("<pose>Unspecified</pose>")
file.write("<truncated>0</truncated>")
file.write("</object>")
if isempty:
# since there are no objects for this frame,
# we need to fabricate one
file.write("<object>")
file.write("<name>not-a-real-object</name>")
file.write("<bndbox>")
file.write("<xmax>10</xmax>")
file.write("<xmin>20</xmin>")
file.write("<ymax>30</ymax>")
file.write("<ymin>40</ymin>")
file.write("</bndbox>")
file.write("<difficult>1</difficult>")
file.write("<occluded>1</occluded>")
file.write("<pose>Unspecified</pose>")
file.write("<truncated>0</truncated>")
file.write("</object>")
file.write("<segmented>0</segmented>")
file.write("<size>")
file.write("<depth>3</depth>")
file.write("<height>{0}</height>".format(video.width))
file.write("<width>{0}</width>".format(video.height))
file.write("</size>")
file.write("<source>")
file.write("<annotation>{0}</annotation>".format(video.slug))
file.write("<database>vatic</database>")
file.write("")
file.write("</source>")
file.write("<owner>")
file.write("<flickrid>vatic</flickrid>")
file.write("<name>vatic</name>")
file.write("</owner>")
file.write("</annotation>")
file.close()
print "{0} of {1} are difficult".format(numdifficult, numtotal)
print "Writing image sets..."
for label, frames in hasit.items():
filename = "{0}/ImageSets/Main/{1}_trainval.txt".format(folder,
label)
file = open(filename, "w")
for frame in allframes:
file.write(str(frame+1).zfill(6))
file.write(" ")
if frame in frames:
file.write("1")
else:
file.write("-1")
file.write("\n")
if pascalds:
print "Sampling negative VOC for {0}".format(label)
negs = itertools.islice(pascalds.find(missing = [label.lower()]), 1000)
for neg in negs:
source = "{0}/Annotations/{1}.xml".format(negdir, neg)
tree = ElementTree.parse(source)
tree.find("folder").text = folder
tree.find("filename").text = "n{0}.jpg".format(neg)
try:
os.makedirs(os.path.dirname("{0}/Annotations/n{1}.xml".format(folder, neg)))
except OSError:
pass
try:
os.makedirs(os.path.dirname("{0}/JPEGImages/n{1}.jpg".format(folder, neg)))
except OSError:
pass
tree.write("{0}/Annotations/n{1}.xml".format(folder, neg))
shutil.copyfile("{0}/JPEGImages/{1}.jpg".format(negdir, neg),
"{0}/JPEGImages/n{1}.jpg".format(folder, neg))
allnegatives.add("n{0}".format(neg))
file.write("n{0} -1\n".format(neg))
file.close()
train = "{0}/ImageSets/Main/{1}_train.txt".format(folder, label)
shutil.copyfile(filename, train)
filename = "{0}/ImageSets/Main/trainval.txt".format(folder)
file = open(filename, "w")
file.write("\n".join(str(x+1).zfill(6) for x in allframes))
for neg in allnegatives:
file.write("n{0}\n".format(neg))
file.close()
train = "{0}/ImageSets/Main/train.txt".format(folder)
shutil.copyfile(filename, train)
print "Writing JPEG frames..."
for frame in allframes:
strframe = str(frame+1).zfill(6)
path = Video.getframepath(frame, video.location)
dest = "{0}/JPEGImages/{1}.jpg".format(folder, strframe)
try:
os.unlink(dest)
except OSError:
pass
os.link(path, dest)
print "Done."
@handler("Samples the performance by worker")
class sample(Command):
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument("directory")
parser.add_argument("--number", "-n", type=int, default=3)
parser.add_argument("--frames", "-f", type=int, default=4)
parser.add_argument("--since", "-s")
parser.add_argument("--labels", action="store_true", default = False)
return parser
def __call__(self, args):
try:
os.makedirs(args.directory)
except:
pass
since = None
if args.since:
since = parsedatetime.parsedatetime.Calendar().parse(args.since)
since = time.mktime(since[0])
since = datetime.datetime.fromtimestamp(since)
if args.labels:
font = ImageFont.truetype("arial.ttf", 14)
else:
font = None
workers = session.query(turkic.models.Worker)
for worker in workers:
print "Sampling worker {0}".format(worker.id)
jobs = session.query(Job)
jobs = jobs.filter(Job.worker == worker)
jobs = jobs.join(Segment)
jobs = jobs.join(Video)
jobs = jobs.filter(Video.isfortraining == False)
if since:
jobs = jobs.filter(turkic.models.HIT.timeonserver >= since)
jobs = jobs.order_by(sqlalchemy.func.rand())
jobs = jobs.limit(args.number)
for job in jobs:
print "Visualizing HIT {0}".format(job.hitid)
paths = [x.getboxes(interpolate = True,
bind = True,
label = True) for x in job.paths]
if args.frames > job.segment.stop - job.segment.start:
frames = range(job.segment.start, job.segment.stop + 1)
else:
frames = random.sample(xrange(job.segment.start,
job.segment.stop + 1),
args.frames)
size = math.sqrt(len(frames))
video = job.segment.video
bannersize = (video.width * int(math.floor(size)),
video.height * int(math.ceil(size)))
image = Image.new(video[0].mode, bannersize)
size = int(math.floor(size))
offset = (0, 0)
horcount = 0
paths = vision.visualize.highlight_paths(video, paths,
font = font)
for frame, framenum in paths:
if framenum in frames:
image.paste(frame, offset)
horcount += 1
if horcount >= size:
offset = (0, offset[1] + video.height)
horcount = 0
else:
offset = (offset[0] + video.width, offset[1])
image.save("{0}/{1}-{2}.jpg".format(args.directory,
worker.id,
job.hitid))
@handler("Provides a URL to fix annotations during vetting")
class find(Command):
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument("--id")
parser.add_argument("--frame", "-f", type = int,
nargs = '?', default = None)
parser.add_argument("--hitid")
parser.add_argument("--workerid")
parser.add_argument("--ids", action="store_true", default = False)
return parser
def __call__(self, args):
jobs = session.query(Job)
jobs = jobs.join(Segment).join(Video)
if args.id:
jobs = jobs.filter(Video.slug == args.id)
if args.frame is not None:
jobs = jobs.filter(Segment.start <= args.frame)
jobs = jobs.filter(Segment.stop >= args.frame)
if args.hitid:
jobs = jobs.filter(Job.hitid == args.hitid)
if args.workerid:
jobs = jobs.filter(Job.workerid == args.workerid)
jobs = jobs.filter(turkic.models.HIT.useful == True)
if jobs.count() > 0:
for job in jobs:
if args.ids:
if job.published:
print job.hitid,
if job.completed:
print job.assignmentid,
print job.workerid,
print ""
else:
print "(not published)"
else:
print job.offlineurl(config.localhost)
else:
print "No jobs matching this criteria."
@handler("List all videos loaded", "list")
class listvideos(Command):
def setup(self):
parser = argparse.ArgumentParser()
parser.add_argument("--completed", action="store_true", default=False)
parser.add_argument("--published", action="store_true", default=False)
parser.add_argument("--training", action="store_true", default=False)
parser.add_argument("--count", action="store_true", default=False)
parser.add_argument("--worker")
parser.add_argument("--stats", action="store_true", default=False)
return parser
def __call__(self, args):
videos = session.query(Video)
if args.training:
videos = videos.filter(Video.isfortraining == True)
else:
videos = videos.filter(Video.isfortraining == False)
if args.worker:
videos = videos.join(Segment)
videos = videos.join(Job)
videos = videos.filter(Job.workerid == args.worker)
elif args.published:
videos = videos.join(Segment)
videos = videos.join(Job)
videos = videos.filter(Job.published == True)
elif args.completed:
videos = videos.join(Segment)
videos = videos.join(Job)
videos = videos.filter(Job.completed == True)
if args.count:
print videos.count()
else:
for video in videos.distinct():
print "{0:<25}".format(video.slug),
if args.stats:
print "{0:>3}/{1:<8}".format(video.numcompleted, video.numjobs),
print "${0:<15.2f}".format(video.cost),
print ""
|
vovakkk/vatic
|
cliunique.py
|
Python
|
mit
| 45,911
|
[
"VisIt"
] |
5e045eb6bc83bb24d8589e5aa96c1853164d21c592b494d1fc0392b5e62cf14f
|
# Generated by Django 2.2.20 on 2021-06-08 19:31
from django.db import migrations
def remove_templates(apps, schema_editor):
EmailTemplate = apps.get_model('enterprise', 'EnrollmentNotificationEmailTemplate')
EmailTemplate.objects.filter(enterprise_customer=None, template_type='SELF_ENROLL').delete()
EmailTemplate.objects.filter(enterprise_customer=None, template_type='ADMIN_ENROLL').delete()
def create_templates(apps, schema_editor):
EmailTemplate = apps.get_model('enterprise', 'EnrollmentNotificationEmailTemplate')
EmailTemplate.objects.get_or_create(
plaintext_template="""
{% load i18n %}{% if user_name %}{% blocktrans %}Dear {{ user_name }},{% endblocktrans %}{% else %}{% blocktrans %}Hi!{% endblocktrans %}{% endif %}
{% if enrolled_in.type == "program" %}
{% blocktrans with program_url=enrolled_in.url program_name=enrolled_in.name program_branding=enrolled_in.branding start_date=enrolled_in.start|date:"DATE_FORMAT" %}You have been enrolled in {{ program_name }}, a {{ program_branding }} program offered by {{ organization_name }}. This program begins {{ start_date }}. For more information, see the following link:
{{ program_url }}{% endblocktrans %}{% else %}
{% blocktrans with course_url=enrolled_in.url course_name=enrolled_in.name start_date=enrolled_in.start|date:"DATE_FORMAT" %}You have been enrolled in {{ course_name }}, a course offered by {{ organization_name }}. This course begins {{ start_date }}. For more information, see the following link:
{{ course_url }}{% endblocktrans %}{% endif %}
{% blocktrans with enrolled_in_name=enrolled_in.name %}
Thanks,
The {{enrolled_in_name}} team{% endblocktrans %}
""",
html_template="""
{% load i18n %}<html>
<body>
<p>{% if user_name %}{% blocktrans %}Dear {{ user_name }},{% endblocktrans %}{% else %}{% blocktrans %}Hi!{% endblocktrans %}{% endif %}</p>
<p>{% if enrolled_in.type == "program" %}
{% blocktrans with program_url=enrolled_in.url program_name=enrolled_in.name program_branding=enrolled_in.branding start_date=enrolled_in.start|date:"DATE_FORMAT" %}You have been enrolled in <a href="{{ program_url }}">{{ program_name }}</a>, a {{ program_branding }} program offered by {{ organization_name }}. This program begins {{ start_date }}. For more information, see <a href="{{ program_url }}">{{ program_name }}</a>.{% endblocktrans %}{% else %}
{% blocktrans with course_url=enrolled_in.url course_name=enrolled_in.name start_date=enrolled_in.start|date:"DATE_FORMAT" %}You have been enrolled in <a href="{{ course_url }}">{{ course_name }}</a>, a course offered by {{ organization_name }}. This course begins {{ start_date }}. For more information, see <a href="{{ course_url }}">{{ course_name }}</a>.{% endblocktrans %}{% endif %}
</p>
{% blocktrans with enrolled_in_name=enrolled_in.name %}<p>
Thanks,
</p>
<p>
The {{enrolled_in_name}} team
</p>{% endblocktrans %}
</body>
</html>""",
subject_line='',
enterprise_customer=None,
template_type='SELF_ENROLL',
)
EmailTemplate.objects.get_or_create(
plaintext_template="""
Great News! You've been enrolled in {{enrolled_in.name}} by {{organization_name}}
This course is a free benefit offered especially for you, and we are excited for you to meet your learning community on edX.
Visit this link to see and enroll in your course, {{enrolled_in.url}}
The {{enrolled_in.name}} team
""",
html_template="""
<html>
<body>
<table style="max-width: 800;" align="center">
<tbody>
<tr>
<td>
<table width="100%" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="left" valign="top" style="padding:0;Margin:0;width:560px">
<img src="https://ci3.googleusercontent.com/proxy/ZAml-YgIIJEht-OBl6GqLNIUeVKLyPvs_ldFo9FShp-andj1YQxvliJXv_s_Tmh6cg1-5avJJmvXmzxbQp06sB_WUeYYkN9kzV6jtVUvYKPPjUX_8_iFJAZqsNqYakl4nQyHnl0dClFzxiaLuULSPoqGPLBfJmNOEXIBYkvKYa95640xvHwDiQ22bZ16=s0-d-e1-ft#https://appboy-images.com/appboy/communication/assets/image_assets/images/5fc568622213594dcbda2623/original.png?1606772834" width="110" height="57" border="0" style="display:block" alt="edX" id="m_-37988256656304111logo" class="CToWUd">
</td>
<td align="right" valign="top" style="padding:0;Margin:0;width:560px">
<a href="https://courses.edx.org/dashboard">My Dashboard</a>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td align="left" bgcolor="#002b2b" style="Margin:0;padding-left:20px;padding-right:20px;padding-top:40px;padding-bottom:40px;background-color:#002b2b;background-image:url(https://ci3.googleusercontent.com/proxy/2CLnc9QL2u1L0MsUErVcQVBOz6OlDew2A5O8umOI9v7PGI3ip8YnJqYPRcexkjGsbNvNa6kUFyuHAMp7LlVBKKa7bxaKUjEO566AyX4M_6PhDtwz-QpLXLg9eQZQ93LIwP-5SbUtfxd203xXFTVBatJrN9P2hsuJSWFwd9k2pooiea6Qsg=s0-d-e1-ft#https://fzvpwi.stripocdn.email/content/guids/CABINET_4d3c6887b8ac137f656a3dd54bb5f6c8/images/53481614126118338.png);background-repeat:no-repeat;background-position:left top" background="https://ci3.googleusercontent.com/proxy/2CLnc9QL2u1L0MsUErVcQVBOz6OlDew2A5O8umOI9v7PGI3ip8YnJqYPRcexkjGsbNvNa6kUFyuHAMp7LlVBKKa7bxaKUjEO566AyX4M_6PhDtwz-QpLXLg9eQZQ93LIwP-5SbUtfxd203xXFTVBatJrN9P2hsuJSWFwd9k2pooiea6Qsg=s0-d-e1-ft#https://fzvpwi.stripocdn.email/content/guids/CABINET_4d3c6887b8ac137f656a3dd54bb5f6c8/images/53481614126118338.png">
<table cellpadding="0" cellspacing="0" width="100%" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" valign="top" style="padding:0;Margin:0;width:560px">
<table cellpadding="0" cellspacing="0" width="100%" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0">
<p style="Margin:0;font-size:32px;font-family:helvetica,'helvetica neue',arial,verdana,sans-serif;line-height:48px;color:#ffffff"><strong><span class="il">Congratulations</span>, Restless Learner</strong></p>
</td>
</tr>
<tr>
<td align="center" style="padding:0;Margin:0">
<p style="Margin:0;font-size:20px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:30px;color:#ffffff">Great News! You've been <span class="il">Enrolled</span> in {{enrolled_in.name}} by {{organization_name}}</p>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td align="left" style="padding:0;Margin:0;padding-top:20px;padding-left:20px;padding-right:20px">
<table cellpadding="0" cellspacing="0" width="100%" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" valign="top" style="padding:0;Margin:0;width:560px">
<table cellpadding="0" cellspacing="0" width="100%" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0">
<p style="Margin:0;font-size:14px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:21px;color:#333333">
This course is a free benefit offered especially for you, and we are excited for you to meet your learning community on edX.
</p>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table width="100%" cellspacing="0" cellpadding="0" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" bgcolor="#ffffff" style="padding:0;Margin:0;padding-top:10px;padding-left:10px;padding-bottom:15px"><span class="m_-37988256656304111es-button-border" style="border-style:solid;border-color:#d03529;background:#d03529;border-width:0px 0px 2px 0px;display:inline-block;border-radius:0px;width:auto"><a href="{{enrolled_in.url}}" class="m_-37988256656304111es-button" style="text-decoration:none;font-family:helvetica,'helvetica neue',arial,verdana,sans-serif;font-size:18px;color:#ffffff;border-style:solid;border-color:#d03529;border-width:10px 15px;display:inline-block;background:#d03529;border-radius:0px;font-weight:normal;font-style:normal;line-height:22px;width:auto;text-align:center" target="_blank">Start my course</a></span></td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td align="left" bgcolor="#ffffff" style="padding:0;Margin:0;padding-top:20px;padding-left:20px;padding-right:20px;background-color:#ffffff">
<table cellpadding="0" cellspacing="0" width="100%" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" valign="top" style="padding:0;Margin:0;width:560px">
<table cellpadding="0" cellspacing="0" width="100%" bgcolor="#ffffff" style="border-collapse:collapse;border-spacing:0px;background-color:#ffffff" role="presentation">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0">
<h2 style="Margin:0;line-height:29px;font-family:arial,'helvetica neue',helvetica,sans-serif;font-size:24px;font-style:normal;font-weight:normal;color:#002b2b"><strong>Share With A Friend</strong></h2>
</td>
</tr>
<tr>
<td align="center" style="padding:15px;Margin:0">
<h2 style="Margin:0;line-height:17px;font-family:arial,'helvetica neue',helvetica,sans-serif;font-size:14px;font-style:normal;font-weight:normal;color:#333333">Invite a friend to take the course with you! Click below to share.</h2>
<p style="Margin:0;font-size:14px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:21px;color:#333333;display:none"><br></p>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td align="center" bgcolor="#ffffff" style="padding:20px;Margin:0;background-color:#ffffff">
<table cellpadding="0" cellspacing="0" class="m_-37988256656304111es-left" align="left" style="border-collapse:collapse;border-spacing:0px;float:left">
<tbody>
<tr>
<td class="m_-37988256656304111es-m-p0r" align="center" style="padding:0;Margin:0;width:96px">
<table cellpadding="0" cellspacing="0" width="100%" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;font-size:0px"><img src="https://ci3.googleusercontent.com/proxy/2EBI4EepyVUk0cIRxzfMGmsBqU6TKjJsw7CqB8hSM0zRlLb0-BL0Y_JXhu-E6b1DiMGQ-4NHHem0qkjDgG_0xHNTYSsmJ0Rv9qBXCj2SJyT-yFlrkA7SYr4YkGg8CdkRYlkwZsQzU0DiZIDWZopRyOkPvcHWVTLwzCdJMKZcFc57HfR-eQ=s0-d-e1-ft#https://fzvpwi.stripocdn.email/content/guids/CABINET_4d3c6887b8ac137f656a3dd54bb5f6c8/images/71001614127047298.png" alt="" style="display:block;border:0;outline:none;text-decoration:none" width="35" height="35" class="CToWUd"></td>
</tr>
<tr>
<td align="center" style="padding:0;Margin:0;padding-top:5px">
<p style="Margin:0;font-size:14px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:21px;color:#333333"><a style="font-family:arial,'helvetica neue',helvetica,sans-serif;font-size:14px;text-decoration:underline;color:#d03529" href="https://www.facebook.com/edX">Facebook</a></p>
</td>
</tr>
</tbody>
</table>
</td>
<td class="m_-37988256656304111es-hidden" style="padding:0;Margin:0;width:20px"></td>
</tr>
</tbody>
</table>
<table cellpadding="0" cellspacing="0" class="m_-37988256656304111es-left" align="left" style="border-collapse:collapse;border-spacing:0px;float:left">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;width:96px">
<table cellpadding="0" cellspacing="0" width="100%" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;font-size:0px"><img src="https://ci4.googleusercontent.com/proxy/yPnmVX7UE9sXw9LbZmrtTLvkrZVy9NJurDVqU6aByY871Dj0GMzEmJZOQm5kTImdMT_qNIIvpVpTpTpboYZB6wPyZuOUKwd49j6Q7yTB9IM0DWSMdldQ4XiOHReWms7Mw7HL9rRGk1iRLwmx0IT5frUGIcmc4ulwvQcjBFSHUBcMBDrAvQ=s0-d-e1-ft#https://fzvpwi.stripocdn.email/content/guids/CABINET_4d3c6887b8ac137f656a3dd54bb5f6c8/images/19131614127052508.png" alt="" style="display:block;border:0;outline:none;text-decoration:none" width="35" height="35" class="CToWUd"></td>
</tr>
<tr>
<td align="center" style="padding:0;Margin:0;padding-top:5px">
<p style="Margin:0;font-size:14px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:21px;color:#333333"><a style="font-family:arial,'helvetica neue',helvetica,sans-serif;font-size:14px;text-decoration:underline;color:#d03529" href="https://twitter.com/edXOnline">Twitter</a></p>
</td>
</tr>
</tbody>
</table>
</td>
<td class="m_-37988256656304111es-hidden" style="padding:0;Margin:0;width:20px"></td>
</tr>
</tbody>
</table>
<table cellpadding="0" cellspacing="0" class="m_-37988256656304111es-left" align="left" style="border-collapse:collapse;border-spacing:0px;float:left">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;width:96px">
<table cellpadding="0" cellspacing="0" width="100%" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;font-size:0px"><img src="https://ci6.googleusercontent.com/proxy/GT773Vs37lIPuUvoTpVMvtQ8sSuiC9sGLGtWGNti6nwmrZGrcewvoO_zFG4XJadUV-xPRYqf9zbLEshuVcSQMZiI1yUuM5VKxTF9lLfuzDWV1ZbMPEpk2cAWEZcDAW8mD3VFxY5l0y1Gxvcc_2bB4L-ApvPHnnIw7rXB_XDe1o37B4xPaA=s0-d-e1-ft#https://fzvpwi.stripocdn.email/content/guids/CABINET_4d3c6887b8ac137f656a3dd54bb5f6c8/images/42721614127058646.png" alt="" style="display:block;border:0;outline:none;text-decoration:none" width="35" height="35" class="CToWUd"></td>
</tr>
<tr>
<td align="center" style="padding:0;Margin:0;padding-top:5px">
<p style="Margin:0;font-size:14px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:21px;color:#333333"><a style="font-family:arial,'helvetica neue',helvetica,sans-serif;font-size:14px;text-decoration:underline;color:#d03529" href="https://www.linkedin.com/school/edx/">LinkedIn</a></p>
</td>
</tr>
</tbody>
</table>
</td>
<td class="m_-37988256656304111es-hidden" style="padding:0;Margin:0;width:20px"></td>
</tr>
</tbody>
</table>
<table cellpadding="0" cellspacing="0" class="m_-37988256656304111es-left" align="left" style="border-collapse:collapse;border-spacing:0px;float:left">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;width:96px">
<table cellpadding="0" cellspacing="0" width="100%" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;font-size:0px"><img src="https://ci6.googleusercontent.com/proxy/hVT_30jYZguhmOyUAcTTZhCNDjjlivo3GAljGnyt_nNnjp0A0zcheoKw-XgM2NSD7M-CemzJEui6RIte_cH0YyJXUwCJWFpZ6j96QCB3T2UeQddtN1_VrVyQoDnAR1G82cHdJnq6Ysc-KQPvBKngjYaaPYSvcvMuHR0QS7dcnlGK35dUlA=s0-d-e1-ft#https://fzvpwi.stripocdn.email/content/guids/CABINET_4d3c6887b8ac137f656a3dd54bb5f6c8/images/23371614127063365.png" alt="" style="display:block;border:0;outline:none;text-decoration:none" width="35" height="35" class="CToWUd"></td>
</tr>
<tr>
<td align="center" style="padding:0;Margin:0;padding-top:5px">
<p style="Margin:0;font-size:14px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:21px;color:#333333"><a style="font-family:arial,'helvetica neue',helvetica,sans-serif;font-size:14px;text-decoration:underline;color:#d03529" href="https://www.reddit.com/r/edX/">Reddit</a></p>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
<table cellpadding="0" cellspacing="0" class="m_-37988256656304111es-left" align="left" style="border-collapse:collapse;border-spacing:0px;">
<tbody>
<tr>
<td align="left" style="padding:0;Margin:0;width:96px">
<table cellpadding="0" cellspacing="0" width="100%" role="presentation" style="border-collapse:collapse;border-spacing:0px">
<tbody>
<tr>
<td align="center" style="padding:0;Margin:0;font-size:0px"><img src="https://ci4.googleusercontent.com/proxy/9UmWL708u3GDWbf_b3dzLoavIieL1kLHnipq6vGF06ZpMlLEQe9WkHJZsNVHfBwYZhpp71tbA8EJgf2_mhGV2RwDBIiUox83T4sP6uZKF3rw6QGEtQY2Ou16eR6v39Quf2AHsnh78t6JK6PMqlxGVSVKB2WkmUX37_2jtgfIqVtOLlV1bw=s0-d-e1-ft#https://fzvpwi.stripocdn.email/content/guids/CABINET_4d3c6887b8ac137f656a3dd54bb5f6c8/images/18781614127069989.png" alt="" style="display:block;border:0;outline:none;text-decoration:none" width="35" height="35" class="CToWUd"></td>
</tr>
<tr>
<td align="center" style="padding:0;Margin:0;padding-top:5px">
<p style="Margin:0;font-size:14px;font-family:arial,'helvetica neue',helvetica,sans-serif;line-height:21px;color:#333333"><a style="font-family:arial,'helvetica neue',helvetica,sans-serif;font-size:14px;text-decoration:underline;color:#d03529" href="https://api.whatsapp.com/send?text=edX">WhatsApp</a></p>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
</td>
</tr>
<tr>
<td>
<table cellpadding="0" cellspacing="0" width="100%">
<tbody>
<tr>
<td height="20" style="line-height:1px;font-size:1px"></td>
</tr>
<tr>
<td align="left" valign="top"><a href="https://business.edx.org" style="font-family:'Open Sans',Arial,sans-serif;font-size:14px;line-height:17px;text-decoration:none;color:#707070" target="_blank"><span style="color:#00262b">edX for Business</span><span style="color:#707070"> — eLearning Solutions for Your Company</span></a></td>
</tr>
<tr>
<td height="20" style="line-height:1px;font-size:1px"></td>
</tr>
<tr>
<td align="left" style="font-family:'Open Sans',Arial,sans-serif;color:#707070;font-size:14px;line-height:17px" valign="top">© 2021 edX Inc. All rights reserved.</td>
</tr>
<tr>
<td height="20" style="line-height:1px;font-size:1px"></td>
</tr>
<tr>
<td height="20" style="line-height:1px;font-size:1px"></td>
</tr>
<tr>
<td align="left" style="font-family:'Open Sans',Arial,sans-serif;color:#707070;font-size:14px;line-height:17px" valign="top">141 Portland St. 9th Floor, Cambridge, MA 02139</td>
</tr>
<tr>
<td height="30" style="line-height:1px;font-size:1px"></td>
</tr>
</tbody>
</table>
</td>
</tr>
</tbody>
</table>
</body>
</html>
""",
subject_line='',
enterprise_customer=None,
template_type='ADMIN_ENROLL',
)
class Migration(migrations.Migration):
dependencies = [
('enterprise', '0132_auto_20210608_1921'),
]
operations = [
migrations.RunPython(code=create_templates, reverse_code=remove_templates)
]
|
edx/edx-enterprise
|
enterprise/migrations/0133_auto_20210608_1931.py
|
Python
|
agpl-3.0
| 26,436
|
[
"VisIt"
] |
9ffcc2b4818e6ebbe04468d5ab9944444e764851ea36510d4a76e1f809d22290
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
def sv_chain_data(sv):
""" This subroutine creates a buffer of information to communicate the system variables to libnao."""
from numpy import zeros, concatenate as conc
aos,sv = sv.ao_log, sv
nr,nsp,nmt,nrt = aos.nr,aos.nspecies, sum(aos.sp2nmult),aos.nr*sum(aos.sp2nmult)
nat,na1,tna,nms = sv.natoms,sv.natoms+1,3*sv.natoms,sum(aos.sp2nmult)+aos.nspecies
ndat = 200 + 2*nr + 4*nsp + 2*nmt + nrt + nms + 3*3 + nat + 2*na1 + tna + 4*nsp
dat = zeros(ndat)
# Simple parameters
i = 0
dat[i] = -999.0; i+=1 # pointer to the empty space in simple parameter
dat[i] = aos.nspecies; i+=1
dat[i] = aos.nr; i+=1
dat[i] = aos.rmin; i+=1;
dat[i] = aos.rmax; i+=1;
dat[i] = aos.kmax; i+=1;
dat[i] = aos.jmx; i+=1;
dat[i] = conc(aos.psi_log).sum(); i+=1;
dat[i] = sv.natoms; i+=1
dat[i] = sv.norbs; i+=1
dat[i] = sv.norbs_sc; i+=1
dat[i] = sv.nspin; i+=1
dat[0] = i
# Pointers to data
i = 99
s = 199
dat[i] = s+1; i+=1; f=s+nr; dat[s:f] = aos.rr; s=f; # pointer to rr
dat[i] = s+1; i+=1; f=s+nr; dat[s:f] = aos.pp; s=f; # pointer to pp
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2nmult; s=f; # pointer to sp2nmult
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2rcut; s=f; # pointer to sp2rcut
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2norbs; s=f; # pointer to sp2norbs
dat[i] = s+1; i+=1; f=s+nsp; dat[s:f] = aos.sp2charge; s=f; # pointer to sp2charge
dat[i] = s+1; i+=1; f=s+nmt; dat[s:f] = conc(aos.sp_mu2j); s=f; # pointer to sp_mu2j
dat[i] = s+1; i+=1; f=s+nmt; dat[s:f] = conc(aos.sp_mu2rcut); s=f; # pointer to sp_mu2rcut
dat[i] = s+1; i+=1; f=s+nrt; dat[s:f] = conc(aos.psi_log).reshape(nrt); s=f; # pointer to psi_log
dat[i] = s+1; i+=1; f=s+nms; dat[s:f] = conc(aos.sp_mu2s); s=f; # pointer to sp_mu2s
dat[i] = s+1; i+=1; f=s+3*3; dat[s:f] = conc(sv.ucell); s=f; # pointer to ucell (123,xyz) ?
dat[i] = s+1; i+=1; f=s+nat; dat[s:f] = sv.atom2sp; s=f; # pointer to atom2sp
dat[i] = s+1; i+=1; f=s+na1; dat[s:f] = sv.atom2s; s=f; # pointer to atom2s
dat[i] = s+1; i+=1; f=s+na1; dat[s:f] = sv.atom2mu_s; s=f; # pointer to atom2mu_s
dat[i] = s+1; i+=1; f=s+tna; dat[s:f] = conc(sv.atom2coord); s=f; # pointer to atom2coord
dat[i] = s+1; # this is a terminator to simplify operation
return dat
|
gkc1000/pyscf
|
pyscf/nao/m_sv_chain_data.py
|
Python
|
apache-2.0
| 2,961
|
[
"PySCF"
] |
1760654a929a334eabff284761b886e8e7c032b78fa1fe02d70f2d846322bdcd
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import re
import copy
from ..common import exceptions
from .base import NodeBase
def parse_style(token):
"""Helper for converting style entries into a dict."""
style = dict()
for match in re.finditer(r'(?P<key>\S+?)\s*:\s*(?P<value>.*?)(?:;|\Z)', token.get('style', '')):
style[match.group('key')] = match.group('value').strip()
return style
def escape(text):
"""
Escape LaTeX commands.
Inputs:
text: a plain text message
"""
conv = {
'&': '\\&',
'%': '\\%',
'$': '\\$',
'#': '\\#',
'_': '\\_',
'{': '\\{',
'}': '\\}',
'^': '{\\textasciicircum}',
'~': '{\\textasciitilde}',
'\\': '{\\textbackslash}',
'<': '{\\textless}',
'>': '{\\textgreater}',
}
regex_list = []
for key in sorted(conv.keys(), key=lambda item: - len(item)):
regex_list.append(re.escape(str(key)))
regex = re.compile('|'.join(regex_list))
return regex.sub(lambda match: conv[match.group()], text)
class LatexBase(NodeBase):
"""Base class for Latex nodes."""
def __init__(self, *args, **kwargs):
string = kwargs.pop('string', None)
kwargs.setdefault('info', None)
NodeBase.__init__(self, *args, **kwargs)
if string is not None:
String(self, content=string, escape=kwargs.get('escape', True))
def copy(self):
"""Creates copy of the Node"""
return copy.copy(self)
class EnclosureBase(LatexBase):
"""
Class for enclosing other nodes in characters, e.g. [], {}.
"""
def __init__(self, *args, **kwargs):
LatexBase.__init__(self, *args, **kwargs)
if self.get('enclose', None) is None:
raise exceptions.MooseDocsException("The 'enclose' property is required.")
def write(self):
"""
Write LaTeX as a string.
"""
enclose = self.get('enclose')
out = enclose[0]
for child in self.children:
out += child.write()
out += enclose[1]
return out
class Bracket(EnclosureBase):
"""
Square bracket enclosure ([]).
"""
def __init__(self, parent=None, **kwargs):
EnclosureBase.__init__(self, 'Bracket', parent, enclose=('[', ']'), **kwargs)
class Brace(EnclosureBase):
"""
Curly brace enclosure ({}).
"""
def __init__(self, parent=None, **kwargs):
EnclosureBase.__init__(self, 'Brace', parent, enclose=('{', '}'), **kwargs)
class InlineMath(EnclosureBase):
"""
Math enclosure ($$).
"""
def __init__(self, parent=None, **kwargs):
EnclosureBase.__init__(self, 'InlineMath', parent, enclose=('$', '$'), **kwargs)
class Command(LatexBase):
"""
Typical zero or one argument command: \foo{bar}.
If children do not exist then the braces are not included (e.g., \foo).
"""
def __init__(self, parent, name, **kwargs):
kwargs.setdefault('start', '')
kwargs.setdefault('end', '')
kwargs.setdefault('args', [])
kwargs.setdefault('optional', False)
LatexBase.__init__(self, name, parent, **kwargs)
def write(self):
optional = self.get('optional', False)
out = self.get('start')
out += '\\%s' % self.name
for arg in self.get('args'):
out += arg.write()
if self.children:
out += '[' if optional else '{'
for child in self.children:
out += child.write()
out += ']' if optional else '}'
out += self.get('end')
return out
class Environment(LatexBase):
"""
Class for LaTeX environment: \\begin{foo}...\\end{foo}
"""
def __init__(self, parent, name, **kwargs):
kwargs.setdefault('start', '\n')
kwargs.setdefault('end', '\n')
kwargs.setdefault('args', [])
kwargs.setdefault('after_begin', '\n')
kwargs.setdefault('before_end', '\n')
LatexBase.__init__(self, name, parent, **kwargs)
def write(self):
"""
Write to LaTeX string.
"""
out = '%s\\begin{%s}' % (self.get('start'), self.name)
for arg in self.get('args'):
out += arg.write()
out += self.get('after_begin')
for child in self.children:
out += child.write()
out += '%s\\end{%s}%s' % (self.get('before_end'), self.name, self.get('end'))
return out
class String(NodeBase):
"""
A node for containing string content, the parent must always be a Tag.
"""
def __init__(self, parent=None, **kwargs):
kwargs.setdefault('content', '')
kwargs.setdefault('escape', True)
NodeBase.__init__(self, 'String', parent, **kwargs)
def write(self):
"""
Write to LaTeX string.
"""
out = escape(self.get('content')) if self.get('escape') else self.get('content')
for child in self.children:
out += child.write()
return out
def create_settings(*args, **kwargs):
"""Creates token with key, value pairs settings application."""
args = list(args)
args += ["{}={}".format(k, v) for k, v in kwargs.items()]
opt = Bracket(None, escape=False, string=",".join(args))
return opt
|
harterj/moose
|
python/MooseDocs/tree/latex.py
|
Python
|
lgpl-2.1
| 5,570
|
[
"MOOSE"
] |
ff5113897955b718e5fe2c9e7f163a427dbb012128fc07a4506fd2282a943427
|
#! /usr/bin/env python
import numpy as np
from ase.constraints import UnitCellFilter
from ase.lattice.cubic import FaceCenteredCubic
from ase.optimize import FIRE
from ase.utils.eos import EquationOfState
from atomistica import TabulatedAlloyEAM
###
n = 2
a = FaceCenteredCubic('Au', size=[n, n, n])
x0 = a.cell[0, 0]/n
c = TabulatedAlloyEAM(fn='Au-Grochola-JCP05.eam.alloy')
a.calc = c
# Vary volume and fit minimum
def en(a, x):
a.set_cell([x, x, x], scale_atoms=True)
return a.get_potential_energy()
x = np.linspace(0.9*x0, 1.1*x0, 101)
e = [ en(a, n*_x)/(n**3) for _x in x ]
eos = EquationOfState(x**3, e)
v0, e0, B = eos.fit()
print 'lattice constant (from equation of state) =', v0**(1./3)
# Keep cell rectangular during optimization
FIRE(UnitCellFilter(a, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=0.0001)
print 'lattice constant (from stress equilibration) =', a.cell[0, 0]/n
|
Atomistica/atomistica
|
examples/ASE/fcc.py
|
Python
|
gpl-2.0
| 929
|
[
"ASE"
] |
231a6d9442f95322d355eaf6a9fa57dabdc25f7a5441a7ce36ab19f0bdf36e33
|
'''
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
import os
os.environ['ETS_TOOLKIT'] = 'qt4'
import random
import string
from PySide import QtCore, QtGui
from mapclient.mountpoints.workflowstep import WorkflowStepMountPoint
import numpy as np
from mapclientplugins.mayaviviewerstep.mayaviviewerdata import StepState
from mapclientplugins.mayaviviewerstep.widgets.configuredialog import ConfigureDialog
from mapclientplugins.mayaviviewerstep.widgets.mayaviviewerwidget import MayaviViewerWidget
# from mappluginutils.mayaviviewer
from mappluginutils.mayaviviewer.mayaviviewerobjects import MayaviViewerObjectsContainer
from mappluginutils.mayaviviewer.mayaviviewerfieldworkmodel import MayaviViewerFieldworkModel
from mappluginutils.mayaviviewer.mayaviviewergiasscan import MayaviViewerGiasScan
from mappluginutils.mayaviviewer.mayaviviewerdatapoints import MayaviViewerDataPoints
from mappluginutils.mayaviviewer import mayaviviewerfieldworkmeasurements as MVFM
class MayaviViewerStep(WorkflowStepMountPoint):
'''
Step for displaying 3D objects using mayavi.
'''
def __init__(self, location):
super(MayaviViewerStep, self).__init__('Mayavi 3D Model Viewer', location)
self._category = 'Visualisation'
self._state = StepState()
# self._icon = QtGui.QImage(':/zincmodelsource/images/zinc_model_icon.png') # change this
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'ju#fieldworkmodeldict'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'ju#fieldworkmeasurementdict'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'http://physiomeproject.org/workflow/1.0/rdf-schema#pointcloud'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'ju#giasscandict'))
self.addPort(('http://physiomeproject.org/workflow/1.0/rdf-schema#port',
'http://physiomeproject.org/workflow/1.0/rdf-schema#uses',
'ju#simplemeshdict'))
self._widget = None
self._configured = False
self._addObjectMethods = [self._addFieldworkModels,
self._addFieldworkMeasurements,
self._addPointClouds,
self._addImages,
self._addSimplemeshes,
]
self.objectContainer = MayaviViewerObjectsContainer()
def configure(self):
d = ConfigureDialog(self._state)
d.setModal(True)
if d.exec_():
self._state = d.getState()
self.serialize(self._location)
self._configured = d.validate()
if self._configured and self._configuredObserver:
self._configuredObserver()
def getIdentifier(self):
return self._state._identifier
def setIdentifier(self, identifier):
self._state._identifier = identifier
def serialize(self, location):
configuration_file = os.path.join(location, getConfigFilename(self._state._identifier))
s = QtCore.QSettings(configuration_file, QtCore.QSettings.IniFormat)
s.beginGroup('state')
s.setValue('identifier', self._state._identifier)
s.setValue('discretisation', self._state._discretisation)
s.setValue('displaynodes', self._state._displayNodes)
s.setValue('renderargs', self._state._renderArgs)
s.endGroup()
def deserialize(self, location):
configuration_file = os.path.join(location, getConfigFilename(self._state._identifier))
s = QtCore.QSettings(configuration_file, QtCore.QSettings.IniFormat)
s.beginGroup('state')
self._state._identifier = s.value('identifier', '')
self._state._discretisation = s.value('discretisation', '')
if s.value('displaynodes', '')=='True':
self._state._displayNodes = True
else:
self._state._displayNodes = False
self._state._renderArgs = s.value('renderargs', '{}')
s.endGroup()
d = ConfigureDialog(self._state)
self._configured = d.validate()
pass
def setPortData(self, index, dataIn):
if not isinstance(dataIn, dict):
raise TypeError, 'mayaviviewerstep expects a dictionary as input'
self._addObjectMethods[index](dataIn)
def execute(self):
print 'launching MayaviViewerStep'
# if not self._widget:
self._widget = MayaviViewerWidget(self.objectContainer)
self._widget._ui.closeButton.clicked.connect(self._doneExecution)
self._widget.setModal(True)
self._setCurrentWidget(self._widget)
def _addFieldworkModels(self, D):
for name, model in D.items():
name = name+'#'+'FWModel'
renderArgs = eval(self._state._renderArgs)
obj = MayaviViewerFieldworkModel(name, model, [8,8], evaluator=None,
renderArgs=renderArgs, fields=None,
fieldName=None, PC=None)
self.objectContainer.addObject(name, obj)
def _addFieldworkMeasurements(self, D):
for name, M in D.items():
name = name+'#'+'FWMeasure'
renderArgs = eval(self._state._renderArgs)
# a bit hacky yea
if 'femur' in name.lower():
print 'ADDING MEASUREMENT', name
obj = MVFM.MayaviViewerFemurMeasurements(name, M)
self.objectContainer.addObject(name, obj)
def _addPointClouds(self, D):
for name, P in D.items():
name = name+'#'+'DC'
renderArgs = eval(self._state._renderArgs)
obj = MayaviViewerDataPoints(name, P, renderArgs={'mode':'point', 'color':(0,1,0)})
self.objectContainer.addObject(name, obj)
def _addImages(self, D):
for name, S in D.items():
name = name+'#'+'IM'
renderArgs = eval(self._state._renderArgs)
obj = MayaviViewerGiasScan(name, S, renderArgs=renderArgs)
self.objectContainer.addObject(name, obj)
def _addSimplemeshes(self, D):
pass
# for name, S in D.items():
# renderArgs = eval(self._state._renderArgs)
# obj = MayaviViewerSimpleMesh(name, model, renderArgs=renderArgs)
# self.objectContainer.addObject(name, obj)
def getConfigFilename(identifier):
return identifier + '.conf'
def generateIdentifier(char_set=string.ascii_uppercase + string.digits):
return ''.join(random.sample(char_set*6,6))
|
MusculoskeletalAtlasProject/mapclient-tests
|
test_resources/updater_test/mayaviviewerstep-master/mapclientplugins/mayaviviewerstep/step.py
|
Python
|
apache-2.0
| 7,910
|
[
"Mayavi"
] |
2ad9d96043a72e74092f1176cf3c5aece2be5a29c00301e6b59e588e3bef71c2
|
import numpy as np
import unittest2 as unittest
from scipy.special import factorial
from kafe2.core.constraint import GaussianSimpleParameterConstraint, \
GaussianMatrixParameterConstraint
from kafe2.fit._base.cost import *
from kafe2.fit.histogram.cost import *
from kafe2.fit.indexed.cost import *
from kafe2.fit.xy.cost import *
class TestCostBuiltin(unittest.TestCase):
CHI2_COST_FUNCTION = CostFunction_Chi2
NLL_COST_FUNCTION = CostFunction_NegLogLikelihood
def setUp(self):
self._data_chi2 = np.array([-0.5, 2.1, 8.9])
self._model_chi2 = np.array([5.7, 8.4, -2.3])
self._data_poisson = np.array([0.0, 2.0, 9.0])
self._model_poisson = np.array([5.7, 8.4, 2.3])
self._res = self._data_chi2 - self._model_chi2
self._cov_mat = np.array([
[1.0, 0.1, 0.2],
[0.1, 2.0, 0.3],
[0.2, 0.3, 3.0]
])
self._cov_mat_cholesky = np.linalg.cholesky(self._cov_mat)
self._cov_mat_inv = np.linalg.inv(self._cov_mat)
self._pointwise_errors = np.sqrt(np.diag(self._cov_mat))
self._cov_mat_log_det = np.log(np.linalg.det(self._cov_mat))
self._cost_chi2_cov_mat = self._res.dot(
self._cov_mat_inv).dot(self._res) + self._cov_mat_log_det
self._cost_chi2_pointwise = np.sum(
(self._res / self._pointwise_errors) ** 2) + self._cov_mat_log_det
self._cost_chi2_no_errors = np.sum(self._res ** 2) + self._cov_mat_log_det
self._cost_nll_gaussian = self._cost_chi2_pointwise + 2.0 * np.sum(np.log(
np.sqrt((2.0 * np.pi)) * self._pointwise_errors)) - self._cov_mat_log_det
self._cost_nll_poisson = -2.0 * np.sum(
np.log(self._model_poisson ** self._data_poisson)
- np.log(factorial(self._data_poisson)) - self._model_poisson)
self._cost_nllr_poisson = self._cost_nll_poisson + 2.0 * np.sum(
np.log(self._data_poisson ** self._data_poisson)
- np.log(factorial(self._data_poisson)) - self._data_poisson)
self._par_vals = np.array([12.3, 0.001, -1.9])
self._simple_constraint = GaussianSimpleParameterConstraint(
index=0, value=10.0, uncertainty=2.5)
self._matrix_constraint = GaussianMatrixParameterConstraint(
indices=(0, 1, 2),
values=(1.0, 2.0, 3.0),
matrix=[
[1.5, 0.1, 0.1],
[0.1, 2.2, 0.1],
[0.1, 0.1, 0.3]
]
)
self. _par_constraints = [self._simple_constraint, self._matrix_constraint]
self._par_cost = self._simple_constraint.cost(self._par_vals) \
+ self._matrix_constraint.cost(self._par_vals)
def test_chi2_no_errors(self):
self.assertAlmostEqual(
self._cost_chi2_no_errors,
self.CHI2_COST_FUNCTION(errors_to_use=None)
(self._data_chi2, self._model_chi2, None, None, self._cov_mat_log_det))
self.assertAlmostEqual(
self._cost_chi2_no_errors + self._par_cost,
self.CHI2_COST_FUNCTION(errors_to_use=None)
(self._data_chi2, self._model_chi2, self._par_vals, self._par_constraints,
self._cov_mat_log_det))
def test_chi2_pointwise(self):
self.assertAlmostEqual(
self._cost_chi2_pointwise,
self.CHI2_COST_FUNCTION(errors_to_use='pointwise')
(self._data_chi2, self._model_chi2, self._pointwise_errors, None, None,
self._cov_mat_log_det))
self.assertAlmostEqual(
self._cost_chi2_pointwise + self._par_cost,
self.CHI2_COST_FUNCTION(errors_to_use='pointwise')
(self._data_chi2, self._model_chi2, self._pointwise_errors,
self._par_vals, self._par_constraints, self._cov_mat_log_det))
def test_chi2_cov_mat(self):
self.assertAlmostEqual(
self._cost_chi2_cov_mat,
self.CHI2_COST_FUNCTION(errors_to_use='covariance')
(self._data_chi2, self._model_chi2, self._cov_mat_cholesky, None, None,
self._cov_mat_log_det))
self.assertAlmostEqual(
self._cost_chi2_cov_mat + self._par_cost,
self.CHI2_COST_FUNCTION(errors_to_use='covariance')
(self._data_chi2, self._model_chi2, self._cov_mat_cholesky,
self._par_vals, self._par_constraints, self._cov_mat_log_det))
def test_nll_gaussian(self):
self.assertAlmostEqual(
self._cost_nll_gaussian,
self.NLL_COST_FUNCTION(data_point_distribution='gaussian')
(self._data_chi2, self._model_chi2, self._pointwise_errors, None, None))
self.assertAlmostEqual(
self._cost_nll_gaussian + self._par_cost,
self.NLL_COST_FUNCTION(data_point_distribution='gaussian')
(self._data_chi2, self._model_chi2, self._pointwise_errors,
self._par_vals, self._par_constraints))
def test_nll_poisson(self):
self.assertAlmostEqual(
self._cost_nll_poisson,
self.NLL_COST_FUNCTION(data_point_distribution='poisson')
(self._data_poisson, self._model_poisson, None, None))
self.assertAlmostEqual(
self._cost_nll_poisson + self._par_cost,
self.NLL_COST_FUNCTION(data_point_distribution='poisson')
(self._data_poisson, self._model_poisson, self._par_vals, self._par_constraints))
def test_nllr_gaussian(self):
self.assertAlmostEqual(
self._cost_chi2_pointwise - self._cov_mat_log_det,
self.NLL_COST_FUNCTION(data_point_distribution='gaussian', ratio=True)
(self._data_chi2, self._model_chi2, self._pointwise_errors, None, None))
self.assertAlmostEqual(
self._cost_chi2_pointwise + self._par_cost - self._cov_mat_log_det,
self.NLL_COST_FUNCTION(data_point_distribution='gaussian', ratio=True)
(self._data_chi2, self._model_chi2, self._pointwise_errors,
self._par_vals, self._par_constraints))
def test_nllr_poisson(self):
self.assertAlmostEqual(
self._cost_nllr_poisson,
self.NLL_COST_FUNCTION(data_point_distribution='poisson', ratio=True)
(self._data_poisson, self._model_poisson, None, None))
self.assertAlmostEqual(
self._cost_nllr_poisson + self._par_cost,
self.NLL_COST_FUNCTION(data_point_distribution='poisson', ratio=True)
(self._data_poisson, self._model_poisson, self._par_vals, self._par_constraints))
def test_chi2_raise(self):
with self.assertRaises(ValueError):
self.CHI2_COST_FUNCTION(errors_to_use="XYZ")
with self.assertRaises(ValueError):
self.CHI2_COST_FUNCTION(errors_to_use="covariance")(
self._data_chi2, np.ones(10), self._cov_mat_cholesky, None, None,
self._cov_mat_log_det)
with self.assertRaises(CostFunctionException):
self.CHI2_COST_FUNCTION(errors_to_use="covariance", fallback_on_singular=False)(
self._data_chi2, self._model_chi2, None, None, None, self._cov_mat_log_det)
with self.assertRaises(CostFunctionException):
self.CHI2_COST_FUNCTION(errors_to_use="pointwise", fallback_on_singular=False)(
self._data_chi2, self._model_chi2, np.arange(len(self._pointwise_errors)),
None, None, self._cov_mat_log_det)
def test_nll_raise(self):
with self.assertRaises(ValueError):
self.NLL_COST_FUNCTION(data_point_distribution="yes")
def test_inf_cost(self):
self.assertEqual(
np.inf,
self.CHI2_COST_FUNCTION(errors_to_use="covariance")(
self._data_chi2, np.nan * np.ones_like(self._model_chi2), self._cov_mat_cholesky,
None, None, self._cov_mat_log_det)
)
self.assertEqual(
np.inf,
self.CHI2_COST_FUNCTION(errors_to_use="pointwise")(
self._data_chi2, np.nan * np.ones_like(self._model_chi2), self._pointwise_errors,
None, None, self._cov_mat_log_det)
)
self.assertEqual(
np.inf,
self.CHI2_COST_FUNCTION(errors_to_use=None)(
self._data_chi2, np.nan * np.ones_like(self._model_chi2), None, None,
self._cov_mat_log_det)
)
self.assertEqual(
np.inf,
self.NLL_COST_FUNCTION("poisson", ratio=False)(
self._data_poisson, -self._model_poisson, None, None)
)
self.assertEqual(
np.inf,
self.NLL_COST_FUNCTION("poisson", ratio=True)(
self._data_poisson, -self._model_poisson, None, None)
)
self.assertEqual(
np.inf,
self.NLL_COST_FUNCTION("gaussian", ratio=False)(
self._data_chi2, self._model_chi2, -self._pointwise_errors, None, None)
)
self.assertEqual(
np.inf,
self.NLL_COST_FUNCTION("gaussian", ratio=True)(
self._data_chi2, self._model_chi2, -self._pointwise_errors, None, None)
)
class TestCostBuiltinHist(TestCostBuiltin):
CHI2_COST_FUNCTION = HistCostFunction_Chi2
NLL_COST_FUNCTION = HistCostFunction_NegLogLikelihood
class TestCostBuiltinIndexed(TestCostBuiltin):
CHI2_COST_FUNCTION = IndexedCostFunction_Chi2
NLL_COST_FUNCTION = IndexedCostFunction_NegLogLikelihood
class TestCostBuiltinXY(TestCostBuiltin):
CHI2_COST_FUNCTION = XYCostFunction_Chi2
NLL_COST_FUNCTION = XYCostFunction_NegLogLikelihood
class TestCostUserDefined(unittest.TestCase):
def setUp(self):
def my_cost(a, b, c, d):
return a ** 2 + (b + c) ** 2 + (d - 1) ** 2
def my_cost_varargs(*args):
return args[0] ** 2 + (args[1] + args[2]) ** 2 + (args[3] - 1) ** 2
self._ref_par_vals = [1, 2, 5, 7]
self._ref_cost = my_cost(*self._ref_par_vals)
self._constraint = GaussianSimpleParameterConstraint(index=0, value=0, uncertainty=1)
self._matrix_constraint = GaussianMatrixParameterConstraint(
indices=[1], values=[0], matrix=[[1]])
self._constraint_cost = self._ref_par_vals[0] ** 2 + self._ref_par_vals[1] ** 2
self._ref_par_vals_constraints = self._ref_par_vals + [
self._ref_par_vals, [self._constraint, self._matrix_constraint]]
self._cost_func = CostFunction(
my_cost, arg_names=None, add_constraint_cost=False, add_determinant_cost=False)
self._cost_func_constraints = CostFunction(
my_cost, arg_names=None, add_constraint_cost=True, add_determinant_cost=False)
self._cost_func_varargs = CostFunction(
my_cost_varargs, arg_names=["a", "b", "c", "d"], add_constraint_cost=False,
add_determinant_cost=False)
self._cost_func_varargs_constraints = CostFunction(
my_cost_varargs, arg_names=["a", "b", "c", "d"], add_constraint_cost=True,
add_determinant_cost=False)
def test_properties(self):
self.assertEqual(self._cost_func.name, "my_cost")
self.assertEqual(self._cost_func.arg_names, ["a", "b", "c", "d"])
self.assertEqual(self._cost_func_varargs.name, "my_cost_varargs")
self.assertEqual(self._cost_func.arg_names, ["a", "b", "c", "d"])
def test_validate_raise(self):
def _cost_args(*args):
return np.sum(args)
def _cost_kwargs(**kwargs):
return np.sum(list(kwargs.values()))
def _cost_bad_arg_name(cost):
return cost
_ = CostFunction(_cost_args, arg_names=["a", "b"])
with self.assertRaises(CostFunctionException):
_ = CostFunction(_cost_args)
with self.assertRaises(CostFunctionException):
_ = CostFunction(_cost_kwargs)
with self.assertRaises(CostFunctionException):
_ = CostFunction(_cost_kwargs, arg_names=["a", "b"])
with self.assertRaises(CostFunctionException):
_ = CostFunction(_cost_bad_arg_name)
_ = CostFunction(_cost_bad_arg_name, arg_names=["a"])
with self.assertRaises(CostFunctionException):
_ = CostFunction(_cost_args, arg_names=["cost"])
def test_compare_cost(self):
self.assertEqual(
self._ref_cost, self._cost_func(*self._ref_par_vals)
)
self.assertEqual(
self._ref_cost, self._cost_func_constraints(*(self._ref_par_vals + [None, None]))
)
self.assertEqual(
self._ref_cost,
self._cost_func_constraints(*(self._ref_par_vals + [self._ref_par_vals, None]))
)
self.assertEqual(
self._ref_cost + self._constraint_cost,
self._cost_func_constraints(*self._ref_par_vals_constraints)
)
def test_compare_cost_varargs(self):
self.assertEqual(
self._ref_cost, self._cost_func_varargs(*self._ref_par_vals)
)
self.assertEqual(
self._ref_cost, self._cost_func_varargs_constraints(*(
self._ref_par_vals + [None, None]))
)
self.assertEqual(
self._ref_cost,
self._cost_func_varargs_constraints(*(
self._ref_par_vals + [self._ref_par_vals, None]))
)
self.assertEqual(
self._ref_cost + self._constraint_cost,
self._cost_func_varargs_constraints(*self._ref_par_vals_constraints)
)
|
dsavoiu/kafe2
|
kafe2/test/fit/test_cost_functions.py
|
Python
|
gpl-3.0
| 13,566
|
[
"Gaussian"
] |
91aec80529021808225d7223dfa39a5559400256e8b1201c6c78924e0b00d958
|
# Copyright 2001 by Gavin E. Crooks. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
""" Handle the SCOP DEScription file.
The file format is described in the scop
"release notes.":http://scop.berkeley.edu/release-notes-1.55.html
The latest DES file can be found
"elsewhere at SCOP.":http://scop.mrc-lmb.cam.ac.uk/scop/parse/
"Release 1.55":http://scop.berkeley.edu/parse/des.cla.scop.txt_1.55 (July 2001)
"""
class Record(object):
"""Holds information for one node in the SCOP hierarchy.
Attributes:
- sunid - SCOP unique identifiers
- nodetype - One of 'cl' (class), 'cf' (fold), 'sf' (superfamily),
'fa' (family), 'dm' (protein), 'sp' (species), 'px' (domain).
Additional node types may be added.
- sccs - SCOP concise classification strings. e.g. b.1.2.1
- name - The SCOP ID (sid) for domains (e.g. d1anu1), currently empty for other node types
- description - e.g. "All beta proteins","Fibronectin type III",
"""
def __init__(self, line=None):
self.sunid = ''
self.nodetype = ''
self.sccs = ''
self.name = ''
self.description = ''
if line:
self._process(line)
def _process(self, line):
"""Parses DES records.
Records consist of 5 tab deliminated fields,
sunid, node type, sccs, node name, node description.
"""
# For example ::
#
# 21953 px b.1.2.1 d1dan.1 1dan T:,U:91-106
# 48724 cl b - All beta proteins
# 48725 cf b.1 - Immunoglobulin-like beta-sandwich
# 49265 sf b.1.2 - Fibronectin type III
# 49266 fa b.1.2.1 - Fibronectin type III
line = line.rstrip() # no trailing whitespace
columns = line.split("\t") # separate the tab-delineated cols
if len(columns) != 5:
raise ValueError("I don't understand the format of %s" % line)
sunid, self.nodetype, self.sccs, self.name, self.description = columns
if self.name == '-':
self.name = ''
self.sunid = int(sunid)
def __str__(self):
s = []
s.append(self.sunid)
s.append(self.nodetype)
s.append(self.sccs)
if self.name:
s.append(self.name)
else:
s.append("-")
s.append(self.description)
return "\t".join(map(str, s)) + "\n"
def parse(handle):
"""Iterates over a DES file as a Des record for each line
Arguments:
- handle - file-like object
"""
for line in handle:
if line.startswith('#'):
continue
yield Record(line)
|
Ambuj-UF/ConCat-1.0
|
src/Utils/Bio/SCOP/Des.py
|
Python
|
gpl-2.0
| 2,825
|
[
"Biopython"
] |
7a7934fd0ac37f5803c3a60e37538a4ddb441a826ebb4d356a8cfccea402b5e6
|
"""A set of classes used to configure resources into Slurm nodes."""
import os
from .ansible import InventoryFile
# from .ansible.api import AnsibleRunner
from .ansible.cmd import AnsibleRunner
import slurmscale as ss
import logging
log = logging.getLogger(__name__)
class ConfigManagerFactory(object):
"""A factory for configuration managers."""
@staticmethod
def get_config_manager(config_manager_name):
"""
Get a config manager based on the supplied argument.
:type config_manager_name: ``str``
:param config_manager_name: Name of the configuration manager class
to instantiate. One of:
``GalaxyJetstreamIUConfigManager``
:rtype: :class:`.config_manager.ConfigManager`
:return: A configuration manager object or ``None``.
"""
if config_manager_name == 'GalaxyJetstreamIUConfigManager':
return GalaxyJetstreamIUConfigManager()
assert 0, "Unrecognized config manager: " + config_manager_name
class ConfigManager(object):
"""Configuration manager interface."""
def configure(self, instances):
"""
Configure the supplied instances.
:type instances: list of ``CloudBridge.Instance`` objects
:param instances: A list of objects representing the target nodes.
"""
pass
class GalaxyJetstreamIUConfigManager(ConfigManager):
"""Config manager for Galaxy node configuration on Jetstream at IU."""
def __init__(self):
"""Initialize the object with variables from config file."""
self._playbook_root = ss.config.get_config_value(
'ansible_playbook_root', None)
self._inventory_path = os.path.join(
self._playbook_root, ss.config.get_config_value(
'ansible_inventory', None))
self._playbook_path = os.path.join(
self._playbook_root, ss.config.get_config_value(
'ansible_playbook', None))
self._venv_path = ss.config.get_config_value('config_venv_path', None)
def configure(self, servers):
"""
Configure the supplied servers.
:type servers: list of objects with ``name`` and ``ip`` properties
:param servers: A list of servers to configure. Each element of the
list must be an object (such as ``Node`` or ``Bunch``)
that has ``name`` and ``ip`` fields.
:type instances: list of ``CloudBridge.Instance`` objects
:param instances: A list of objects representing the target nodes.
:rtype: tuple of ``str``
:return: A tuple with the process exit code and stdout.
"""
nodes = []
log.debug("Configuring servers {0}".format(servers))
# Format server info into a dict
for server in servers:
nodes.append({'name': server.name, 'ip': server.ip})
# Create the inventory file
InventoryFile.create(self._inventory_path, nodes)
# Run ansible-playbook
log.info("Starting to configure nodes via ansible-playbook.")
runner = AnsibleRunner(
playbook_root=self._playbook_root,
inventory_filename=self._inventory_path,
playbook_path=self._playbook_path,
venv_path=self._venv_path)
return runner.run()
|
afgane/slurmscale
|
slurmscale/util/config_manager.py
|
Python
|
mit
| 3,392
|
[
"Galaxy"
] |
1345b07ca9eef0f7f7d5ef309912e404de56f0a6f65f84066a58ad4e41bdd8d4
|
from ast import parse, NodeVisitor
PSEUDO_FILENAME = 'live_source.py'
DEFAULT_MODULE_NAME = '__main__'
LIVE_MODULE_NAME = '__live_coding__'
class TracedFinder(object):
""" Find which nodes to trace in a module. """
def __init__(self, source_code, traced, filename=None):
""" Initialize the finder.
:param str source_code: the source code that will be traced, or None if
the source code should be read from the normal path.
:param str traced: the module, method, or function name to trace
:param str filename: the file the source code was read from
"""
self.source_code = source_code
self.traced = traced
self.traced_node = None
self.source_tree = parse(source_code, filename or PSEUDO_FILENAME)
visitor = TreeVisitor(self)
visitor.visit(self.source_tree)
self.is_tracing = self.traced_node is not None
# noinspection PyPep8Naming
class TreeVisitor(NodeVisitor):
def __init__(self, finder):
self.finder = finder
self.target = finder.traced.split('.')
self.context = []
def visit_FunctionDef(self, node):
self.visit_node(node)
def visit_ClassDef(self, node):
self.visit_node(node)
def visit_node(self, node):
name = node.name
self.context.append(name)
self.generic_visit(node)
if self.target == self.context:
self.finder.traced_node = node
self.context.pop()
|
donkirkby/live-py-plugin
|
plugin/PySrc/space_tracer/traced_finder.py
|
Python
|
mit
| 1,490
|
[
"VisIt"
] |
a7490ef9f00226e878d9b2ea175ecd90c94c4d0c5e87d2129f5be39628c604b5
|
from .PyPolyBoRi import *
from .interred import interred
def buchberger(l):
"calculates a (non minimal) Groebner basis"
l = interred(l)
#for making sure, that every polynomial has a different leading term
#needed for add_generator
if not l:
return []
g = GroebnerStrategy(l[0].ring())
for p in l:
g.add_generator(p)
while g.npairs() > 0:
g.clean_top_by_chain_criterion()
p = g.next_spoly()
p = g.nf(p)
if not p.is_zero():
g.add_generator(p)
return list(g)
def less_than_n_solutions(ideal, n):
l = interred(ideal)
if not l:
return False
g = GroebnerStrategy(l[0].ring())
all_monomials = Monomial([Variable(i) for i
in range(number_of_variables())]).divisors()
monomials_not_in_leading_ideal = all_monomials
for p in l:
g.add_generator(p)
while g.npairs() > 0:
monomials_not_in_leading_ideal = monomials_not_in_leading_ideal \
% g.reduction_strategy.minimal_leading_terms
if len(monomials_not_in_leading_ideal) < n:
return True
g.clean_top_by_chain_criterion()
p = g.next_spoly()
p = g.nf(p)
if not p.is_zero():
g.add_generator(p)
monomials_not_in_leading_ideal = monomials_not_in_leading_ideal \
% g.reduction_strategy.minimal_leading_terms
if len(monomials_not_in_leading_ideal) < n:
return True
else:
return False
def gauss(matrix):
"""Toy Gaussian elimination.
Example: gauss([[0,1],[1,1]]) """
from .gbcore import groebner_basis
def get_num(idx, vars):
if idx in [var.index() for var in vars.variables()]:
return 1
return 0
nrows = len(matrix)
ncols = len(matrix[0])
eqs = [sum([matrix[row][col] * Variable(col) for col in range(ncols)])
for row in range(nrows)]
result = groebner_basis(eqs)
result = result + [BooleConstant(0)] * (nrows - len(result))
return [[get_num(idx, elt.set().vars()) for idx in range(ncols)]
for elt in result]
return result
|
BRiAl/BRiAl
|
sage-brial/brial/simplebb.py
|
Python
|
gpl-2.0
| 2,128
|
[
"Gaussian"
] |
c73bdb973b9e3bc1824c0a74424d73a1a0ba75f47372c97bd591a953626bfce9
|
from catkit import Gratoms
from .. import utils
import networkx as nx
import numpy as np
import ase
class Classifier():
"""Class for classification of various aspects of an an atomic
unit cell.
Currently, a tool for classification of adsorbates on surface
environments and the active sites they rest on.
"""
def __init__(self, atoms):
"""Return unique coordinate values of a given atoms object
for a specified axis.
Parameters
----------
atoms : atoms object
"""
self.atoms = atoms
self.ads_atoms = None
self.slab_atoms = None
self.surface_atoms = None
def id_slab_atoms(
self,
classifier='trivial',
tag=False,
rtol=1e-3):
"""Return the indices of the slab atoms using select characterization
techniques.
Parameters
----------
classifier : str
Classification technique to identify slab atoms.
'trivial':
Slab atoms assumed to have atomic number == 13 or >= 21.
tag : bool
Return adsorbate atoms with tags of 2.
rtol : float
Relative cutoff distance for tagging layers.
Returns
-------
slab_atoms : ndarray (n,)
Index of slab atoms found.
"""
atoms = self.atoms
if classifier == 'trivial':
slab_atoms = np.where((atoms.numbers == 13) |
(atoms.numbers >= 21))[0]
if tag:
zpos = np.sort(atoms.positions[slab_atoms][:, -1])
new_tags = np.zeros_like(zpos, dtype=int)
tag = 1
for i, z in enumerate(zpos):
if new_tags[i] != 0:
continue
layer = np.isclose(z, zpos, rtol=rtol)
new_tags[layer] = tag
tag += 1
tags = self.atoms.get_tags()
tags[slab_atoms] = new_tags[::-1]
self.atoms.set_tags(tags)
self.slab_atoms = slab_atoms
return slab_atoms
def id_adsorbate_atoms(self, classifier='trivial', tag=False):
"""Identify adsorbed atoms in a given atoms object.
Parameters
----------
classifier : str
Classification technique to identify adsorbate atoms.
'trivial':
Adsorbate atoms assumed to have atomic number != 13 or < 21.
tag : bool
Return adsorbate atoms with tags of -2.
Returns
-------
ads_atoms : ndarray (n,)
Index of adsorbate atoms found.
"""
atoms = self.atoms
if classifier == 'trivial':
ads_atoms = np.where((atoms.numbers != 13) &
(atoms.numbers < 21))[0]
if tag:
tags = self.atoms.get_tags()
tags[ads_atoms] = -2
self.atoms.set_tags(tags)
self.ads_atoms = ads_atoms
return ads_atoms
def id_surface_atoms(self, classifier='voronoi_sweep'):
"""Identify surface atoms of an atoms object. This will
require that adsorbate atoms have already been identified.
Parameters
----------
classifier : str
Classification technique to identify surface atoms.
'voronoi_sweep':
Create a sweep of proxy atoms above surface. Surface atoms
are those which are most frequent neighbors of the sweep.
Returns
-------
surface_atoms : ndarray (n,)
Index of the surface atoms in the object.
"""
atoms = self.atoms.copy()
# Remove adsorbates before analysis
ads_atoms = self.ads_atoms
if ads_atoms is None:
ads_atoms = self.id_adsorbate_atoms()
del atoms[ads_atoms]
if classifier == 'voronoi_sweep':
spos = atoms.get_scaled_positions()
zmax = np.max(spos[:, -1])
# Create a distribution of points to screen with
# 2.5 angstrom defines the absolute separation
dvec = (np.linalg.norm(atoms.cell[:-1], axis=1) / 2.5) ** -1
xy = np.mgrid[0:1:dvec[0], 0:1:dvec[1]].reshape(2, -1)
z = np.ones_like(xy[0]) * zmax
xyz = np.vstack((xy, z)).T
screen = np.dot(xyz, atoms.cell)
n = len(atoms)
m = len(screen)
ind = np.arange(n, n + m)
slab_atoms = np.arange(n)
satoms = []
# 2 - 3 Angstroms seems to work for a large range of indices.
for k in np.linspace(2, 3, 10):
wall = screen.copy() + [0, 0, k]
atm = ase.Atoms(['X'] * m, positions=wall)
test_atoms = atoms + atm
con = utils.get_voronoi_neighbors(test_atoms)
surf_atoms = np.where(con[ind].sum(axis=0)[slab_atoms])[0]
satoms += [surf_atoms]
len_surf_atoms = [len(_) for _ in satoms]
uni, ind, cnt = np.unique(
len_surf_atoms, return_counts=True, return_index=True)
max_cnt = np.argmax(cnt)
surf_atoms = satoms[ind[max_cnt]]
self.surface_atoms = surf_atoms
return surf_atoms
def id_adsorbates(self, classifier='radial', return_atoms=False):
"""Return a list of Gratoms objects for each adsorbate
classified on a surface. Requires classification of adsorbate
atoms.
Parameters
----------
classifier : str
Classification technique to identify individual adsorbates.
'radial':
Use standard cutoff distances to identify neighboring atoms.
return_atoms : bool
Return Gratoms objects instead of adsorbate indices.
Returns
-------
adsorbates : list (n,)
Adsorbate indices of adsorbates in unit cell.
"""
atoms = self.atoms.copy()
# Remove the slab atoms
ads_atoms = self.ads_atoms
if ads_atoms is None:
ads_atoms = self.id_adsorbate_atoms()
if classifier == 'radial':
con = utils.get_cutoff_neighbors(atoms)
ads_con = con[ads_atoms][:, ads_atoms]
G = nx.Graph()
G.add_nodes_from(ads_atoms)
edges = utils.connectivity_to_edges(ads_con, indices=ads_atoms)
G.add_weighted_edges_from(edges, weight='bonds')
SG = nx.connected_component_subgraphs(G)
adsorbates = []
for sg in SG:
nodes = list(sg.nodes)
if return_atoms:
edges = list(sg.edges)
ads = Gratoms(
numbers=atoms.numbers[nodes],
positions=atoms.positions[nodes],
edges=edges)
ads.center(vacuum=5)
else:
ads = nodes
adsorbates += [ads]
return adsorbates
|
jboes/CatKit
|
catkit/gen/analysis/classifier.py
|
Python
|
gpl-3.0
| 7,098
|
[
"ASE"
] |
c09ba8e919b775f7740c52325acd817bf8a5b95587f2c5902cde575d004029bd
|
"""
Common Django settings for the CarnetDuMaker project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
import os # For filesystem PATH generation and traversing
import json # For secret configuration file parsing
from django.utils.translation import ugettext_lazy as _
# IMPORTANT NOTE: DO NOT USE TUPLE, USE ARRAY, otherwise dev/prod settings overload will break
#region ----- Root directory path setting
# Root directory of the project
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
#endregion
#region ----- Secret settings loading from JSON file
# Catch error to provide fallback in case of debug environment (no secret file)
try:
# Open the secret settings JSON file and load it's content
with open(os.path.join(os.path.dirname(BASE_DIR), 'carnetdumaker-secrets.json')) as fi_handle:
SECRETS = json.load(fi_handle)
except IOError:
# Fallback for debug environment (no secret file)
SECRETS = {}
#endregion
#region ----- Core settings
# Secret key for cryptographic signing
# SECURITY WARNING: keep the secret key used in production secret!
# See https://docs.djangoproject.com/en/1.7/ref/settings/#secret-key
SECRET_KEY = SECRETS.get('SECRET_KEY', 'the totally not secret key for debug')
# A string representing the full Python import path to your root URLconf.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#root-urlconf
ROOT_URLCONF = 'carnetdumaker.urls'
# The full Python path of the WSGI application object that Django’s built-in servers will use.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#wsgi-application
WSGI_APPLICATION = 'carnetdumaker.wsgi.dev.application'
# List of directories searched for fixture files, in addition to the fixtures directory of each application, in search order.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = [
os.path.join(BASE_DIR, 'fixtures'),
]
# List of authentication backend
# See https://docs.djangoproject.com/fr/1.8/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'apps.tools.auth_backend.CaseInsensitiveUsernameAuthBackend',
'apps.tools.auth_backend.EmailAuthBackend',
]
#endregion
#region ----- Internationalization and localization settings
# See https://docs.djangoproject.com/en/1.7/topics/i18n/
# Default language code of the project
# See https://docs.djangoproject.com/en/1.7/ref/settings/#language-code
LANGUAGE_CODE = 'fr'
# A tuple of all available languages.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#languages
LANGUAGES = [
('fr', _('French')),
('en', _('English')),
]
# Set to true to enable string language translation
# See https://docs.djangoproject.com/en/1.7/ref/settings/#use-i18n
USE_I18N = True
# Set to true to enable localized format (like date and numbers)
# See https://docs.djangoproject.com/en/1.7/ref/settings/#use-l10n
USE_L10N = True
# Default time zone of the server
# See https://docs.djangoproject.com/en/1.7/ref/settings/#time-zone
TIME_ZONE = 'Europe/Paris'
# Set to true to enable localized time zone (like GMT+1)
# See https://docs.djangoproject.com/en/1.7/ref/settings/#use-tz
USE_TZ = True
# Default first day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
# See https://docs.djangoproject.com/en/1.7/ref/settings/#first-day-of-week
FIRST_DAY_OF_WEEK = 1
#endregion
#region ----- Email and error notification settings
# List of admin mails, will get mailed when DEBUG=False and something goes terribly wrong
# See https://docs.djangoproject.com/en/1.7/ref/settings/#admins
ADMINS = SECRETS.get('ADMINS', [])
# List of managers mails, will get mailed when a broken link is requested
# Require BrokenLinkEmailsMiddleware to be enabled to work
# See https://docs.djangoproject.com/en/1.7/ref/settings/#managers
MANAGERS = SECRETS.get('MANAGERS', [])
# List of (compiled) regex describing URLs that should be ignored when reporting HTTP 404 errors via email
# See https://docs.djangoproject.com/en/1.7/ref/settings/#ignorable-404-urls
IGNORABLE_404_URLS = []
#endregion
#region ----- Cache settings
# Key prefix for all cache key
# Declared here once, then use in concrete settings
# See https://docs.djangoproject.com/fr/1.9/ref/settings/#std:setting-CACHES-KEY_PREFIX
CACHE_KEY_PREFIX = 'cdm1'
# The cache backend alias to use for the per-site cache
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-CACHE_MIDDLEWARE_ALIAS
CACHE_MIDDLEWARE_ALIAS = 'default'
# Key prefix for the cache middleware
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-CACHE_MIDDLEWARE_KEY_PREFIX
CACHE_MIDDLEWARE_KEY_PREFIX = 'cachemdw'
# Lifetime duration of the cache from the cache middleware in seconds
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-CACHE_MIDDLEWARE_SECONDS
CACHE_MIDDLEWARE_SECONDS = 60 * 60 * 24 # 24 hours
#endregion
#region ----- Email settings
# Subject prefix of emails sent to ADMINS and MANAGERS
# See https://docs.djangoproject.com/en/1.7/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[Carnet Du Maker] '
# The email address that error messages come from (used with ADMINS and MANAGERS)
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-SERVER_EMAIL
SERVER_EMAIL = 'notifications@carnetdumaker.net'
# 'From' address for all mail sent with send_mail()
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-DEFAULT_FROM_EMAIL
DEFAULT_FROM_EMAIL = 'notifications@carnetdumaker.net'
# SMTP settings
EMAIL_HOST = SECRETS.get('EMAIL_HOST', 'localhost')
EMAIL_PORT = SECRETS.get('EMAIL_PORT', 25)
EMAIL_HOST_USER = SECRETS.get('EMAIL_HOST_USER', '')
EMAIL_HOST_PASSWORD = SECRETS.get('EMAIL_HOST_PASSWORD', '')
EMAIL_USE_TLS = SECRETS.get('EMAIL_USE_TLS', False)
#endregion
#region ----- User agents filtering settings
# List of (compiled) regex representing User-Agent strings that are not allowed to visit any page
# See https://docs.djangoproject.com/en/1.7/ref/settings/#disallowed-user-agents
DISALLOWED_USER_AGENTS = []
# Some good regex here:
# http://www.askapache.com/htaccess/blocking-bad-bots-and-scrapers-with-htaccess.html
# https://github.com/bluedragonz/bad-bot-blocker/blob/master/.htaccess
#endregion
#region ----- Application definition
# List of all applications that are enabled in this Django installation
# See https://docs.djangoproject.com/en/1.7/ref/settings/#installed-apps
INSTALLED_APPS = [
# Django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
# Vendor apps
'mptt',
'haystack',
# Local apps
'apps.accounts',
'apps.announcements',
'apps.antispam',
# 'apps.badges',
'apps.blog',
'apps.bootstrapform',
'apps.bugtracker',
'apps.changemail',
# 'apps.contactform',
'apps.contentreport',
'apps.countries',
'apps.dbmutex',
'apps.donottrack',
'apps.fileattachments',
'apps.forcelogout',
'apps.forum',
'apps.gender',
'apps.home',
'apps.imageattachments',
'apps.licenses',
'apps.loginwatcher',
# 'apps.mailqueue',
'apps.multiupload',
'apps.notifications',
'apps.paginator',
'apps.privatemsg',
'apps.redirects',
'apps.registration',
'apps.shop',
'apps.snippets',
'apps.staticpages',
'apps.timezones',
'apps.tools',
'apps.twitter',
'apps.txtrender',
'apps.userapikey',
'apps.usernotes',
'apps.userstrike',
]
# List of all middleware that are enabled in this Django installation
# See https://docs.djangoproject.com/en/1.7/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware', # For language activation upon login
'apps.timezones.middleware.TimezoneMiddleware', # For timezone activation upon login
'apps.redirects.middleware.RedirectFallbackMiddleware', # For handling known 404 errors
# RedirectFallbackMiddleware MUST BE BEFORE CommonMiddleware
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.security.SecurityMiddleware ',
'apps.forcelogout.middleware.ForceLogoutMiddleware', # For forcing logout of specific users
'apps.accounts.middleware.LastActivityDateUpdateMiddleware', # For last login date update
'apps.donottrack.middleware.DoNotTrackMiddleware', # For DoNotTrack support
'apps.userstrike.middleware.UserStrikeMiddleware', # For warning/ban users/ip address
]
#endregion
#region ----- Database settings
# Database settings
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': SECRETS.get('DATABASE_NAME', 'vagrant'),
'USER': SECRETS.get('DATABASE_USER', 'vagrant'),
'PASSWORD': SECRETS.get('DATABASE_PASSWORD', 'vagrant'),
'HOST': SECRETS.get('DATABASE_HOST', '') # If blank: use UNIX domain socket (local lines in pg_hba.conf)
}
}
#endregion
#region ----- Media and files upload settings
# Maximum size in bytes of a request before it will be streamed to the file system instead of memory.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#file-upload-max-memory-size
FILE_UPLOAD_MAX_MEMORY_SIZE = 1024 * 1024 * 2.5 # 2.5Mo
# The numeric mode to set newly uploaded files to.
# DO NOT FORGET THE 0o (octal) prefix!
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-FILE_UPLOAD_PERMISSIONS
FILE_UPLOAD_PERMISSIONS = 0o644
# The numeric mode to apply to directories created in the process of uploading files.
# DO NOT FORGET THE 0 (octal) prefix!
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-FILE_UPLOAD_DIRECTORY_PERMISSIONS
FILE_UPLOAD_DIRECTORY_PERMISSIONS = 0o755
# Absolute filesystem path to the directory that will hold user-uploaded files
# See https://docs.djangoproject.com/en/1.7/ref/settings/#media-root
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
# Debug variant for tests
DEBUG_MEDIA_ROOT = os.path.join(MEDIA_ROOT, 'tests')
# URL that handles the media served from MEDIA_ROOT.
# It must end in a slash if set to a non-empty value.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#media-url
MEDIA_URL = '/uploads/'
#endregion
#region ----- Static files upload settings
# See https://docs.djangoproject.com/en/1.7/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#static-root
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# URL to use when referring to static files located in STATIC_ROOT.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#static-url
STATIC_URL = '/static/'
# This setting defines the additional locations the staticfiles app will traverse if the FileSystemFinder finder is enabled.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#staticfiles-dirs
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
# The file storage engine to use when collecting static files with the collectstatic management command.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-STATICFILES_STORAGE
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# The list of finder backends that know how to find static files in various locations.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
#endregion
#region ----- Template settings
# Template settings
# See https://docs.djangoproject.com/en/1.8/ref/templates/upgrading/
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'debug': False,
'context_processors': [
# Standard context processors
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
# Custom context processors
'carnetdumaker.context_processors.app_constants',
'apps.bugtracker.context_processors.bugtracker',
'apps.gender.context_processors.gender',
'apps.donottrack.context_processors.do_not_track',
],
'string_if_invalid': '!-%s-!',
},
},
]
#endregion
#region ----- XFrame settings
# See https://docs.djangoproject.com/en/1.7/ref/clickjacking/
# Click jacking options
# See https://docs.djangoproject.com/en/1.7/ref/settings/#std:setting-X_FRAME_OPTIONS
X_FRAME_OPTIONS = 'SAMEORIGIN'
#endregion
#region ----- Sessions settings
# Age of cookie, in seconds
# See https://docs.djangoproject.com/en/1.7/ref/settings/#session-cookie-age
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 4 # 4 weeks
# Set to true to disallow access of session cookies by javascript
# See https://docs.djangoproject.com/en/1.7/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# backend used to store session data
# See https://docs.djangoproject.com/en/1.7/ref/settings/#session-engine
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
#endregion
#region ----- Users authentication
# The URL where requests are redirected for login
# See https://docs.djangoproject.com/en/1.7/ref/settings/#login-url
LOGIN_URL = '/authentification/connexion/'
# The URL where requests are redirected for logout, LOGIN_URL counterpart.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#logout-url
LOGOUT_URL = '/authentification/deconnexion/'
# The URL where requests are redirected after login when no next parameter is provided
# See https://docs.djangoproject.com/en/1.7/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = '/mon-compte/'
# The number of days a password reset link is valid for
# See https://docs.djangoproject.com/en/1.7/ref/settings/#password-reset-timeout-days
PASSWORD_RESET_TIMEOUT_DAYS = 2
#endregion
#region ----- CSRF settings
# Set to true to disallow CSRF cookies access from javascript
# See https://docs.djangoproject.com/en/1.7/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = False
# Don't set to true, otherwise Ajax forms will be fucked up
# Dotted path to callable to be used as view when a request is rejected by the CSRF middleware.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#csrf-failure-view
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# TODO Add a custom CSRF view for trolling bad guys
#endregion
#region ----- Login
# See https://docs.djangoproject.com/en/1.7/topics/logging/#configuring-logging
# A data structure containing configuration information.
# See https://docs.djangoproject.com/en/1.7/ref/settings/#logging
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"simple": {
"format": "[%(name)s] %(levelname)s: %(message)s",
},
"full": {
"format": "%(asctime)s [%(name)s] %(levelname)s: %(message)s",
"datefmt": "%d-%m-%Y %H:%M:%S",
},
},
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse",
},
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ['require_debug_false'],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
},
},
"loggers": {
"django.request": {
"handlers": ["mail_admins", "console"],
"level": "DEBUG",
"propagate": False,
},
}
}
#endregion
#region ----- Django-site settings
# Current Django site identifier
# See https://docs.djangoproject.com/en/1.7/ref/settings/#site-id
SITE_ID = 1
#endregion
#region ----- Registration app settings
# Set to true to allow new user registrations
REGISTRATION_OPEN = SECRETS.get('REGISTRATION_OPEN', True)
# The number of days an activation link is valid for
ACCOUNT_ACTIVATION_TIMEOUT_DAYS = 2
#endregion
#region ----- Search engine settings
# Settings for the Haystack search API
# See http://django-haystack.readthedocs.org/en/latest/settings.html#haystack-connections
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': SECRETS.get('HAYSTACK_URL', 'http://127.0.0.1:9200/'),
'INDEX_NAME': SECRETS.get('HAYSTACK_INDEX_NAME', 'haystack'),
},
}
# This setting controls how many results are shown per page when using the included SearchView and its subclasses.
# See http://django-haystack.readthedocs.org/en/latest/settings.html#haystack-search-results-per-page
HAYSTACK_SEARCH_RESULTS_PER_PAGE = 20
#endregion
#region ----- Anti-spam app settings
# Set to True to disable completely the antispam verification
DISABLE_ANTISPAM_VERIFICATION = SECRETS.get('DISABLE_ANTISPAM_VERIFICATION', False)
#endregion
#region ----- Blog app settings
# Parent forum ID for all article related forum's thread (None = no related thread created).
PARENT_FORUM_ID_FOR_ARTICLE_THREADS = SECRETS.get('PARENT_FORUM_ID_FOR_ARTICLE_THREADS', None)
#endregion
#region ----- Change-email app settings
# Number of days a "change email" link is valid
CHANGE_EMAIL_TIMEOUT_DAYS = 2
#endregion
#region ----- Content report app settings
# List of user names for notification of new content report
USERNAME_LIST_FOR_CONTENT_REPORT_NOTIFICATION = SECRETS.get('USERNAME_LIST_FOR_CONTENT_REPORT_NOTIFICATION', [])
#endregion
#region ----- Login watcher app settings
# Number of days before an event is deleted.
LOG_EVENT_TTL_TIMEOUT_DAYS = 3 * 31
#endregion
#region ----- Twitter app settings
# Consumer key (from https://apps.twitter.com/)
TWITTER_CONSUMER_KEY = SECRETS.get('TWITTER_CONSUMER_KEY', None)
# Consumer secret (from https://apps.twitter.com/)
TWITTER_CONSUMER_SECRET = SECRETS.get('TWITTER_CONSUMER_SECRET', None)
# OAuth token (from ``get_access_token.py`` helper of ``python-twitter``)
TWITTER_OAUTH_TOKEN = SECRETS.get('TWITTER_OAUTH_TOKEN', None)
# OAuth token (from ``get_access_token.py`` helper of ``python-twitter``)
TWITTER_OAUTH_TOKEN_SECRET = SECRETS.get('TWITTER_OAUTH_TOKEN_SECRET', None)
#endregion
|
TamiaLab/carnetdumaker
|
carnetdumaker/settings/common.py
|
Python
|
agpl-3.0
| 19,607
|
[
"VisIt"
] |
0c656318927ab90119a52839e0c3140b7817ae48e8b3f059a2a8eaa5be533079
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
import random
def drawVertex(myscreen, p, vertexColor, rad=0.1):
myscreen.addActor( camvtk.Sphere( center=(p.x,p.y,p.z), radius=rad, color=vertexColor ) )
def drawEdge(myscreen, e, edgeColor=camvtk.yellow):
p1 = e[0]
p2 = e[1]
myscreen.addActor( camvtk.Line( p1=( p1.x,p1.y,p1.z), p2=(p2.x,p2.y,p2.z), color=edgeColor ) )
#def drawFarCircle(myscreen, r, circleColor):
# myscreen.addActor( camvtk.Circle( center=(0,0,0), radius=r, color=circleColor ) )
def drawDiagram( myscreen, diag ):
#drawFarCircle(myscreen, vd.getFarRadius(), camvtk.pink)
for v in diag.getVertices():
drawVertex(myscreen, v, camvtk.green)
edges = diag.getEdges()
for e in edges:
drawEdge(myscreen,e, camvtk.cyan)
def writeFrame( w2if, lwr, n ):
w2if.Modified()
lwr.SetFileName("frames/vd500_zoomout"+ ('%05d' % n)+".png")
lwr.Write()
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
camvtk.drawOCLtext(myscreen)
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
#w2if.Modified()
#lwr.SetFileName("tux1.png")
# SURFACE
#stl = camvtk.STLSurf("../stl/Cylinder_1.stl")
#stl = camvtk.STLSurf("../stl/gnu_tux_mod.stl")
stl = camvtk.STLSurf("../stl/demo.stl")
myscreen.addActor(stl)
stl.SetWireframe()
stl.SetColor((0.5,0.5,0.5))
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read,", s.size(), "triangles"
far = 20
# far = 0.000002 generator 52 face_count crash
# far = 0.000010 crashes at n=192
camPos = 2* far
myscreen.camera.SetPosition(camPos/1000, camPos/1000, camPos)
myscreen.camera.SetClippingRange(-2*camPos,2*camPos)
myscreen.camera.SetFocalPoint(0.051, 0, 0)
cls = ocl.CutterLocationSurface(10)
cutter = ocl.BallCutter(2,10)
cls.setCutter(cutter)
cls.setSampling(1)
cls.setMinSampling(0.1)
cls.setSTL(s)
drawDiagram(myscreen, cls)
#vd = ocl.VoronoiDiagram(far,1200)
#vod = VD(myscreen,vd,scale)
#vod.setAll(vd)
#drawFarCircle(myscreen, scale*vd.getFarRadius(), camvtk.orange)
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
|
AlanZatarain/opencamlib
|
scripts/clsurf_1.py
|
Python
|
gpl-3.0
| 2,482
|
[
"VTK"
] |
3e72d6c4f574c5786bbab7337d23306980eb950a60730445d355addb21c60a79
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import copy
import importlib
import logging
import os
import tempfile
import warnings
from contextlib import contextmanager
from packaging.version import Version
from functools import partial
import dill
import numpy as np
import scipy
import scipy.odr as odr
from IPython.display import display, display_pretty
from scipy.linalg import svd
from scipy.optimize import (
differential_evolution,
leastsq,
least_squares,
minimize,
OptimizeResult
)
from hyperspy.component import Component
from hyperspy.defaults_parser import preferences
from hyperspy.docstrings.model import FIT_PARAMETERS_ARG
from hyperspy.docstrings.signal import SHOW_PROGRESSBAR_ARG
from hyperspy.events import Event, Events, EventSuppressor
from hyperspy.exceptions import VisibleDeprecationWarning
from hyperspy.extensions import ALL_EXTENSIONS
from hyperspy.external.mpfit.mpfit import mpfit
from hyperspy.external.progressbar import progressbar
from hyperspy.misc.export_dictionary import (export_to_dictionary,
load_from_dictionary,
parse_flag_string,
reconstruct_object)
from hyperspy.misc.model_tools import current_model_values
from hyperspy.misc.slicing import copy_slice_from_whitelist
from hyperspy.misc.utils import (dummy_context_manager, shorten_name, slugify,
stash_active_state)
from hyperspy.signal import BaseSignal
from hyperspy.ui_registry import add_gui_method
_logger = logging.getLogger(__name__)
_COMPONENTS = ALL_EXTENSIONS["components1D"]
_COMPONENTS.update(ALL_EXTENSIONS["components1D"])
def _check_deprecated_optimizer(optimizer):
"""Can be removed in HyperSpy 2.0"""
deprecated_optimizer_dict = {
"fmin": "Nelder-Mead",
"fmin_cg": "CG",
"fmin_ncg": "Newton-CG",
"fmin_bfgs": "BFGS",
"fmin_l_bfgs_b": "L-BFGS-B",
"fmin_tnc": "TNC",
"fmin_powell": "Powell",
"mpfit": "lm",
"leastsq": "lm",
}
check_optimizer = deprecated_optimizer_dict.get(optimizer, None)
if check_optimizer:
warnings.warn(
f"`{optimizer}` has been deprecated and will be removed "
f"in HyperSpy 2.0. Please use `{check_optimizer}` instead.",
VisibleDeprecationWarning,
)
optimizer = check_optimizer
return optimizer
def reconstruct_component(comp_dictionary, **init_args):
_id = comp_dictionary['_id_name']
if _id in _COMPONENTS:
_class = getattr(
importlib.import_module(
_COMPONENTS[_id]["module"]), _COMPONENTS[_id]["class"])
elif "_class_dump" in comp_dictionary:
# When a component is not registered using the extension mechanism,
# it is serialized using dill.
_class = dill.loads(comp_dictionary['_class_dump'])
else:
raise ImportError(
f'Loading the {comp_dictionary["class"]} component ' +
'failed because the component is provided by the ' +
f'{comp_dictionary["package"]} Python package, but ' +
f'{comp_dictionary["package"]} is not installed.')
return _class(**init_args)
class ModelComponents(object):
"""Container for model components.
Useful to provide tab completion when running in IPython.
"""
def __init__(self, model):
self._model = model
def __repr__(self):
signature = "%4s | %19s | %19s | %19s"
ans = signature % ('#',
'Attribute Name',
'Component Name',
'Component Type')
ans += "\n"
ans += signature % ('-' * 4, '-' * 19, '-' * 19, '-' * 19)
if self._model:
for i, c in enumerate(self._model):
ans += "\n"
name_string = c.name
variable_name = slugify(name_string, valid_variable_name=True)
component_type = c.__class__.__name__
variable_name = shorten_name(variable_name, 19)
name_string = shorten_name(name_string, 19)
component_type = shorten_name(component_type, 19)
ans += signature % (i,
variable_name,
name_string,
component_type)
return ans
@add_gui_method(toolkey="hyperspy.Model")
class BaseModel(list):
"""Model and data fitting tools applicable to signals of both one and two
dimensions.
Models of one-dimensional signals should use the
:py:class:`~hyperspy.models.model1d` and models of two-dimensional signals
should use the :class:`~hyperspy.models.model2d`.
A model is constructed as a linear combination of
:py:mod:`~hyperspy._components` that are added to the model using the
:py:meth:`~hyperspy.model.BaseModel.append` or
:py:meth:`~hyperspy.model.BaseModel.extend`. There are many predefined
components available in the in the :py:mod:`~hyperspy._components`
module. If needed, new components can be created easily using the code of
existing components as a template.
Once defined, the model can be fitted to the data using :meth:`fit` or
:py:meth:`~hyperspy.model.BaseModel.multifit`. Once the optimizer reaches
the convergence criteria or the maximum number of iterations the new value
of the component parameters are stored in the components.
It is possible to access the components in the model by their name or by
the index in the model. An example is given at the end of this docstring.
Attributes
----------
signal : BaseSignal instance
It contains the data to fit.
chisq : :py:class:`~.signal.BaseSignal` of float
Chi-squared of the signal (or np.nan if not yet fit)
dof : :py:class:`~.signal.BaseSignal` of int
Degrees of freedom of the signal (0 if not yet fit)
components : :py:class:`~.model.ModelComponents` instance
The components of the model are attributes of this class. This provides
a convenient way to access the model components when working in IPython
as it enables tab completion.
Methods
-------
set_signal_range, remove_signal range, reset_signal_range,
add signal_range.
Customize the signal range to fit.
fit, multifit
Fit the model to the data at the current position or the
full dataset.
save_parameters2file, load_parameters_from_file
Save/load the parameter values to/from a file.
plot
Plot the model and the data.
enable_plot_components, disable_plot_components
Plot each component separately. (Use after `plot`.)
set_current_values_to
Set the current value of all the parameters of the given component as
the value for all the dataset.
enable_adjust_position, disable_adjust_position
Enable/disable interactive adjustment of the position of the components
that have a well defined position. (Use after `plot`).
fit_component
Fit just the given component in the given signal range, that can be
set interactively.
set_parameters_not_free, set_parameters_free
Fit the `free` status of several components and parameters at once.
See also
--------
:py:class:`~hyperspy.models.model1d.Model1D`
:py:class:`~hyperspy.models.model2d.Model2D`
"""
def __init__(self):
self.events = Events()
self.events.fitted = Event("""
Event that triggers after fitting changed at least one parameter.
The event triggers after the fitting step was finished, and only of
at least one of the parameters changed.
Arguments
---------
obj : Model
The Model that the event belongs to
""", arguments=['obj'])
def __hash__(self):
# This is needed to simulate a hashable object so that PySide does not
# raise an exception when using windows.connect
return id(self)
def store(self, name=None):
"""Stores current model in the original signal
Parameters
----------
name : {None, str}
Stored model name. Auto-generated if left empty
"""
if self.signal is None:
raise ValueError("Cannot store models with no signal")
s = self.signal
s.models.store(self, name)
def save(self, file_name, name=None, **kwargs):
"""Saves signal and its model to a file
Parameters
----------
file_name : str
Name of the file
name : {None, str}
Stored model name. Auto-generated if left empty
**kwargs :
Other keyword arguments are passed onto `BaseSignal.save()`
"""
if self.signal is None:
raise ValueError("Currently cannot save models with no signal")
else:
self.store(name)
self.signal.save(file_name, **kwargs)
def _load_dictionary(self, dic):
"""Load data from dictionary.
Parameters
----------
dic : dict
A dictionary containing at least the following fields:
* _whitelist: a dictionary with keys used as references of save
attributes, for more information, see
:py:func:`~.misc.export_dictionary.load_from_dictionary`
* components: a dictionary, with information about components of
the model (see
:py:meth:`~.component.Parameter.as_dictionary`
documentation for more details)
* any field from _whitelist.keys()
"""
if 'components' in dic:
while len(self) != 0:
self.remove(self[0])
id_dict = {}
for comp in dic['components']:
init_args = {}
for k, flags_str in comp['_whitelist'].items():
if not len(flags_str):
continue
if 'init' in parse_flag_string(flags_str):
init_args[k] = reconstruct_object(flags_str, comp[k])
self.append(reconstruct_component(comp, **init_args))
id_dict.update(self[-1]._load_dictionary(comp))
# deal with twins:
for comp in dic['components']:
for par in comp['parameters']:
for tw in par['_twins']:
id_dict[tw].twin = id_dict[par['self']]
if '_whitelist' in dic:
load_from_dictionary(self, dic)
def __repr__(self):
title = self.signal.metadata.General.title
class_name = str(self.__class__).split("'")[1].split('.')[-1]
if len(title):
return "<%s, title: %s>" % (
class_name, self.signal.metadata.General.title)
else:
return "<%s>" % class_name
def _get_component(self, thing):
if isinstance(thing, int) or isinstance(thing, str):
thing = self[thing]
elif np.iterable(thing):
thing = [self._get_component(athing) for athing in thing]
return thing
elif not isinstance(thing, Component):
raise ValueError("Not a component or component id.")
if thing in self:
return thing
else:
raise ValueError("The component is not in the model.")
def insert(self, **kwargs):
raise NotImplementedError
def append(self, thing):
"""Add component to Model.
Parameters
----------
thing: `Component` instance.
"""
if not isinstance(thing, Component):
raise ValueError(
"Only `Component` instances can be added to a model")
# Check if any of the other components in the model has the same name
if thing in self:
raise ValueError("Component already in model")
component_name_list = [component.name for component in self]
if thing.name:
name_string = thing.name
else:
name_string = thing.__class__.__name__
if name_string in component_name_list:
temp_name_string = name_string
index = 0
while temp_name_string in component_name_list:
temp_name_string = name_string + "_" + str(index)
index += 1
name_string = temp_name_string
thing.name = name_string
thing._axes_manager = self.axes_manager
thing._create_arrays()
list.append(self, thing)
thing.model = self
setattr(self.components, slugify(name_string,
valid_variable_name=True), thing)
if self._plot_active:
self._connect_parameters2update_plot(components=[thing])
self.signal._plot.signal_plot.update()
def extend(self, iterable):
"""Append multiple components to the model.
Parameters
----------
iterable: iterable of `Component` instances.
"""
for object in iterable:
self.append(object)
def __delitem__(self, thing):
thing = self.__getitem__(thing)
self.remove(thing)
def remove(self, thing):
"""Remove component from model.
Examples
--------
>>> s = hs.signals.Signal1D(np.empty(1))
>>> m = s.create_model()
>>> g = hs.model.components1D.Gaussian()
>>> m.append(g)
You could remove `g` like this
>>> m.remove(g)
Like this:
>>> m.remove("Gaussian")
Or like this:
>>> m.remove(0)
"""
thing = self._get_component(thing)
if not np.iterable(thing):
thing = [thing, ]
for athing in thing:
for parameter in athing.parameters:
# Remove the parameter from its twin _twins
parameter.twin = None
for twin in [twin for twin in parameter._twins]:
twin.twin = None
list.remove(self, athing)
athing.model = None
if self._plot_active:
self.signal._plot.signal_plot.update()
def as_signal(self, component_list=None, out_of_range_to_nan=True,
show_progressbar=None, out=None, **kwargs):
"""Returns a recreation of the dataset using the model.
By default, the signal range outside of the fitted range is filled with nans.
Parameters
----------
component_list : list of HyperSpy components, optional
If a list of components is given, only the components given in the
list is used in making the returned spectrum. The components can
be specified by name, index or themselves.
out_of_range_to_nan : bool
If True the signal range outside of the fitted range is filled with
nans. Default True.
%s
out : {None, BaseSignal}
The signal where to put the result into. Convenient for parallel
processing. If None (default), creates a new one. If passed, it is
assumed to be of correct shape and dtype and not checked.
Returns
-------
BaseSignal : An instance of the same class as `BaseSignal`.
Examples
--------
>>> s = hs.signals.Signal1D(np.random.random((10,100)))
>>> m = s.create_model()
>>> l1 = hs.model.components1D.Lorentzian()
>>> l2 = hs.model.components1D.Lorentzian()
>>> m.append(l1)
>>> m.append(l2)
>>> s1 = m.as_signal()
>>> s2 = m.as_signal(component_list=[l1])
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
for k in [k for k in ["parallel", "max_workers"] if k in kwargs]:
warnings.warn(
f"`{k}` argument has been deprecated and will be removed in HyperSpy 2.0",
VisibleDeprecationWarning,
)
if out is None:
data = np.empty(self.signal.data.shape, dtype='float')
data.fill(np.nan)
signal = self.signal.__class__(
data,
axes=self.signal.axes_manager._get_axes_dicts())
signal.metadata.General.title = (
self.signal.metadata.General.title + " from fitted model")
else:
signal = out
data = signal.data
if not out_of_range_to_nan:
# we want the full signal range, including outside the fitted
# range, we need to set all the channel_switches to True
channel_switches_backup = copy.copy(self.channel_switches)
self.channel_switches[:] = True
self._as_signal_iter(
component_list=component_list,
show_progressbar=show_progressbar,
data=data
)
if not out_of_range_to_nan:
# Restore the channel_switches, previously set
self.channel_switches[:] = channel_switches_backup
return signal
as_signal.__doc__ %= SHOW_PROGRESSBAR_ARG
def _as_signal_iter(self, data, component_list=None,
show_progressbar=None):
# Note that show_progressbar can be an int to determine the progressbar
# position for a thread-friendly bars. Otherwise race conditions are
# ugly...
if show_progressbar is None: # pragma: no cover
show_progressbar = preferences.General.show_progressbar
with stash_active_state(self if component_list else []):
if component_list:
component_list = [self._get_component(x)
for x in component_list]
for component_ in self:
active = component_ in component_list
if component_.active_is_multidimensional:
if active:
continue # Keep active_map
component_.active_is_multidimensional = False
component_.active = active
maxval = self.axes_manager._get_iterpath_size()
enabled = show_progressbar and (maxval != 0)
pbar = progressbar(total=maxval, disable=not enabled,
position=show_progressbar, leave=True)
for index in self.axes_manager:
self.fetch_stored_values(only_fixed=False)
data[self.axes_manager._getitem_tuple][
np.where(self.channel_switches)] = self.__call__(
non_convolved=not self.convolved, onlyactive=True).ravel()
pbar.update(1)
@property
def _plot_active(self):
if self._plot is not None and self._plot.is_active:
return True
else:
return False
def _connect_parameters2update_plot(self, components):
if self._plot_active is False:
return
for i, component in enumerate(components):
component.events.active_changed.connect(
self._model_line._auto_update_line, [])
for parameter in component.parameters:
parameter.events.value_changed.connect(
self._model_line._auto_update_line, [])
def _disconnect_parameters2update_plot(self, components):
if self._model_line is None:
return
for component in components:
component.events.active_changed.disconnect(
self._model_line._auto_update_line)
for parameter in component.parameters:
parameter.events.value_changed.disconnect(
self._model_line._auto_update_line)
def update_plot(self, render_figure=False, update_ylimits=False, **kwargs):
"""Update model plot.
The updating can be suspended using `suspend_update`.
See Also
--------
suspend_update
"""
if self._plot_active is True and self._suspend_update is False:
try:
if self._model_line is not None:
self._model_line.update(render_figure=render_figure,
update_ylimits=update_ylimits)
if self._plot_components:
for component in [component for component in self if
component.active is True]:
self._update_component_line(component)
except BaseException:
self._disconnect_parameters2update_plot(components=self)
@contextmanager
def suspend_update(self, update_on_resume=True):
"""Prevents plot from updating until 'with' clause completes.
See Also
--------
update_plot
"""
es = EventSuppressor()
es.add(self.axes_manager.events.indices_changed)
if self._model_line:
f = self._model_line._auto_update_line
for c in self:
es.add(c.events, f)
if c._position:
es.add(c._position.events)
for p in c.parameters:
es.add(p.events, f)
for c in self:
if hasattr(c, '_component_line'):
f = c._component_line._auto_update_line
es.add(c.events, f)
for p in c.parameters:
es.add(p.events, f)
old = self._suspend_update
self._suspend_update = True
with es.suppress():
yield
self._suspend_update = old
if update_on_resume is True:
for c in self:
position = c._position
if position:
position.events.value_changed.trigger(
obj=position, value=position.value)
self.update_plot(render_figure=True, update_ylimits=False)
def _close_plot(self):
if self._plot_components is True:
self.disable_plot_components()
self._disconnect_parameters2update_plot(components=self)
self._model_line = None
def enable_plot_components(self):
if self._plot is None or self._plot_components:
return
for component in [component for component in self if
component.active]:
self._plot_component(component)
self._plot_components = True
def disable_plot_components(self):
if self._plot is None:
return
if self._plot_components:
for component in self:
self._disable_plot_component(component)
self._plot_components = False
def _set_p0(self):
"(Re)sets the initial values for the parameters used in the curve fitting functions"
self.p0 = () # Stores the values and is fed as initial values to the fitter
for component in self:
if component.active:
for parameter in component.free_parameters:
self.p0 = (self.p0 + (parameter.value,)
if parameter._number_of_elements == 1
else self.p0 + parameter.value)
def set_boundaries(self, bounded=True):
warnings.warn(
"`set_boundaries()` has been deprecated and "
"will be made private in HyperSpy 2.0.",
VisibleDeprecationWarning,
)
self._set_boundaries(bounded=bounded)
def _set_boundaries(self, bounded=True):
"""Generate the boundary list.
Necessary before fitting with a boundary aware optimizer.
Parameters
----------
bounded : bool, default True
If True, loops through the model components and
populates the free parameter boundaries.
Returns
-------
None
"""
if not bounded:
self.free_parameters_boundaries = None
else:
self.free_parameters_boundaries = []
for component in self:
if component.active:
for param in component.free_parameters:
if param._number_of_elements == 1:
self.free_parameters_boundaries.append((param._bounds))
else:
self.free_parameters_boundaries.extend((param._bounds))
def _bounds_as_tuple(self):
"""Converts parameter bounds to tuples for least_squares()"""
if self.free_parameters_boundaries is None:
return (-np.inf, np.inf)
return tuple(
(a if a is not None else -np.inf, b if b is not None else np.inf)
for a, b in self.free_parameters_boundaries
)
def set_mpfit_parameters_info(self, bounded=True):
warnings.warn(
"`set_mpfit_parameters_info()` has been deprecated and "
"will be made private in HyperSpy 2.0.",
VisibleDeprecationWarning,
)
self._set_mpfit_parameters_info(bounded=bounded)
def _set_mpfit_parameters_info(self, bounded=True):
"""Generate the boundary list for mpfit.
Parameters
----------
bounded : bool, default True
If True, loops through the model components and
populates the free parameter boundaries.
Returns
-------
None
"""
if not bounded:
self.mpfit_parinfo = None
else:
self.mpfit_parinfo = []
for component in self:
if component.active:
for param in component.free_parameters:
limited = [False, False]
limits = [0, 0]
if param.bmin is not None:
limited[0] = True
limits[0] = param.bmin
if param.bmax is not None:
limited[1] = True
limits[1] = param.bmax
if param._number_of_elements == 1:
self.mpfit_parinfo.append(
{"limited": limited, "limits": limits}
)
else:
self.mpfit_parinfo.extend(
({"limited": limited, "limits": limits},)
* param._number_of_elements
)
def ensure_parameters_in_bounds(self):
"""For all active components, snaps their free parameter values to
be within their boundaries (if bounded). Does not touch the array of
values.
"""
for component in self:
if component.active:
for param in component.free_parameters:
bmin = -np.inf if param.bmin is None else param.bmin
bmax = np.inf if param.bmax is None else param.bmax
if param._number_of_elements == 1:
if not bmin <= param.value <= bmax:
min_d = np.abs(param.value - bmin)
max_d = np.abs(param.value - bmax)
if min_d < max_d:
param.value = bmin
else:
param.value = bmax
else:
values = np.array(param.value)
if param.bmin is not None:
minmask = values < bmin
values[minmask] = bmin
if param.bmax is not None:
maxmask = values > bmax
values[maxmask] = bmax
param.value = tuple(values)
def store_current_values(self):
""" Store the parameters of the current coordinates into the
`parameter.map` array and sets the `is_set` array attribute to True.
If the parameters array has not being defined yet it creates it filling
it with the current parameters at the current indices in the array."""
for component in self:
if component.active:
component.store_current_parameters_in_map()
def fetch_stored_values(self, only_fixed=False, update_on_resume=True):
"""Fetch the value of the parameters that have been previously stored
in `parameter.map['values']` if `parameter.map['is_set']` is `True` for
those indices.
If it is not previously stored, the current values from `parameter.value`
are used, which are typically from the fit in the previous pixel of a
multidimensional signal.
Parameters
----------
only_fixed : bool, optional
If True, only the fixed parameters are fetched.
update_on_resume : bool, optional
If True, update the model plot after values are updated.
See Also
--------
store_current_values
"""
cm = self.suspend_update if self._plot_active else dummy_context_manager
with cm(update_on_resume=update_on_resume):
for component in self:
component.fetch_stored_values(only_fixed=only_fixed)
def _on_navigating(self):
"""Same as fetch_stored_values but without update_on_resume since
the model plot is updated in the figure update callback.
"""
self.fetch_stored_values(only_fixed=False, update_on_resume=False)
def fetch_values_from_array(self, array, array_std=None):
"""Fetch the parameter values from the given array, optionally also
fetching the standard deviations.
Places the parameter values into both `m.p0` (the initial values
for the optimizer routine) and `component.parameter.value` and
`...std`, for parameters in active components ordered by their
position in the model and component.
Parameters
----------
array : array
array with the parameter values
array_std : {None, array}
array with the standard deviations of parameters
"""
self.p0 = array
self._fetch_values_from_p0(p_std=array_std)
def _fetch_values_from_p0(self, p_std=None):
"""Fetch the parameter values from the output of the optimizer `self.p0`,
placing them in their appropriate `component.parameter.value` and `...std`
Parameters
----------
p_std : array, optional
array containing the corresponding standard deviation.
"""
comp_p_std = None
counter = 0
for component in self: # Cut the parameters list
if component.active is True:
if p_std is not None:
comp_p_std = p_std[
counter: counter +
component._nfree_param]
component.fetch_values_from_array(
self.p0[counter: counter + component._nfree_param],
comp_p_std, onlyfree=True)
counter += component._nfree_param
def _model2plot(self, axes_manager, out_of_range2nans=True):
old_axes_manager = None
if axes_manager is not self.axes_manager:
old_axes_manager = self.axes_manager
self.axes_manager = axes_manager
self.fetch_stored_values()
s = self.__call__(non_convolved=False, onlyactive=True)
if old_axes_manager is not None:
self.axes_manager = old_axes_manager
self.fetch_stored_values()
if out_of_range2nans is True:
ns = np.empty(self.axis.axis.shape)
ns.fill(np.nan)
ns[np.where(self.channel_switches)] = s
s = ns
return s
def _model_function(self, param):
self.p0 = param
self._fetch_values_from_p0()
to_return = self.__call__(non_convolved=False, onlyactive=True)
return to_return
def _errfunc_sq(self, param, y, weights=None):
if weights is None:
weights = 1.0
return ((weights * self._errfunc(param, y)) ** 2).sum()
def _errfunc4mpfit(self, p, fjac=None, x=None, y=None, weights=None):
if fjac is None:
errfunc = self._model_function(p).ravel() - y
if weights is not None:
errfunc *= weights.ravel()
status = 0
return [status, errfunc]
else:
return [0, self._jacobian(p, y).T]
def _get_variance(self, only_current=True):
"""Return the variance taking into account the `channel_switches`.
If only_current=True, the variance for the current navigation indices
is returned, otherwise the variance for all navigation indices is
returned.
"""
variance = self.signal.get_noise_variance()
if variance is not None:
if isinstance(variance, BaseSignal):
if only_current:
variance = variance.data.__getitem__(
self.axes_manager._getitem_tuple
)[np.where(self.channel_switches)]
else:
variance = variance.data[..., np.where(
self.channel_switches)[0]]
else:
variance = 1.0
return variance
def _calculate_chisq(self):
variance = self._get_variance()
d = self(onlyactive=True).ravel() - self.signal()[np.where(
self.channel_switches)]
d *= d / (1. * variance) # d = difference^2 / variance.
self.chisq.data[self.signal.axes_manager.indices[::-1]] = d.sum()
def _set_current_degrees_of_freedom(self):
self.dof.data[self.signal.axes_manager.indices[::-1]] = len(self.p0)
@property
def red_chisq(self):
""":py:class:`~.signal.BaseSignal`: Reduced chi-squared.
Calculated from ``self.chisq`` and ``self.dof``.
"""
tmp = self.chisq / (- self.dof + self.channel_switches.sum() - 1)
tmp.metadata.General.title = self.signal.metadata.General.title + \
' reduced chi-squared'
return tmp
def _calculate_parameter_std(self, pcov, cost, ysize):
warn_cov = False
if pcov is None: # Indeterminate covariance
p_var = np.zeros(len(self.p0), dtype=float)
p_var.fill(np.nan)
warn_cov = True
elif isinstance(pcov, np.ndarray):
p_var = np.diag(pcov).astype(float) if pcov.ndim > 1 else pcov.astype(float)
if p_var.min() < 0 or np.any(np.isnan(p_var)) or np.any(np.isinf(p_var)):
# Numerical overflow on diagonal
p_var.fill(np.nan)
warn_cov = True
elif ysize > self.p0.size:
p_var *= cost / (ysize - self.p0.size)
p_var = np.sqrt(p_var)
else:
p_var.fill(np.nan)
warn_cov = True
else:
raise ValueError(f"pcov should be None or np.ndarray, got {type(pcov)}")
if warn_cov:
_logger.warning(
"Covariance of the parameters could not be estimated. "
"Estimated parameter standard deviations will be np.nan."
)
return p_var
def _convert_variance_to_weights(self):
weights = None
variance = self.signal.get_noise_variance()
if variance is not None:
if isinstance(variance, BaseSignal):
variance = variance.data.__getitem__(self.axes_manager._getitem_tuple)[
np.where(self.channel_switches)
]
_logger.info("Setting weights to 1/variance of signal noise")
# Note that we square this later in self._errfunc_sq()
weights = 1.0 / np.sqrt(variance)
return weights
def fit(
self,
optimizer="lm",
loss_function="ls",
grad="fd",
bounded=False,
update_plot=False,
print_info=False,
return_info=True,
fd_scheme="2-point",
**kwargs,
):
"""Fits the model to the experimental data.
Read more in the :ref:`User Guide <model.fitting>`.
Parameters
----------
%s
Returns
-------
None
Notes
-----
The chi-squared and reduced chi-squared statistics, and the
degrees of freedom, are computed automatically when fitting,
only when `loss_function="ls"`. They are stored as signals:
``chisq``, ``red_chisq`` and ``dof``.
If the attribute ``metada.Signal.Noise_properties.variance``
is defined as a ``Signal`` instance with the same
``navigation_dimension`` as the signal, and ``loss_function``
is ``"ls"`` or ``"huber"``, then a weighted fit is performed,
using the inverse of the noise variance as the weights.
Note that for both homoscedastic and heteroscedastic noise, if
``metadata.Signal.Noise_properties.variance`` does not contain
an accurate estimation of the variance of the data, then the
chi-squared and reduced chi-squared statistics will not be be
computed correctly. See the :ref:`Setting the noise properties
<signal.noise_properties>` in the User Guide for more details.
See Also
--------
* :py:meth:`~hyperspy.model.BaseModel.multifit`
* :py:meth:`~hyperspy.model.EELSModel.fit`
"""
cm = (
self.suspend_update
if (update_plot != self._plot_active) and not update_plot
else dummy_context_manager
)
# ---------------------------------------------
# Deprecated arguments (remove in HyperSpy 2.0)
# ---------------------------------------------
# Deprecate "fitter" argument
check_fitter = kwargs.pop("fitter", None)
if check_fitter:
warnings.warn(
f"`fitter='{check_fitter}'` has been deprecated and will be removed "
f"in HyperSpy 2.0. Please use `optimizer='{check_fitter}'` instead.",
VisibleDeprecationWarning,
)
optimizer = check_fitter
# Deprecated optimization algorithms
optimizer = _check_deprecated_optimizer(optimizer)
# Deprecate loss_function
if loss_function == "ml":
warnings.warn(
"`loss_function='ml'` has been deprecated and will be removed in "
"HyperSpy 2.0. Please use `loss_function='ML-poisson'` instead.",
VisibleDeprecationWarning,
)
loss_function = "ML-poisson"
# Deprecate grad=True/False
if isinstance(grad, bool):
alt_grad = "analytical" if grad else None
warnings.warn(
f"`grad={grad}` has been deprecated and will be removed in "
f"HyperSpy 2.0. Please use `grad={alt_grad}` instead.",
VisibleDeprecationWarning,
)
grad = alt_grad
# Deprecate ext_bounding
ext_bounding = kwargs.pop("ext_bounding", False)
if ext_bounding:
warnings.warn(
"`ext_bounding=True` has been deprecated and will be removed "
"in HyperSpy 2.0. Please use `bounded=True` instead.",
VisibleDeprecationWarning,
)
# Deprecate custom min_function
min_function = kwargs.pop("min_function", None)
if min_function:
warnings.warn(
"`min_function` has been deprecated and will be removed "
"in HyperSpy 2.0. Please use `loss_function` instead.",
VisibleDeprecationWarning,
)
loss_function = min_function
# Deprecate custom min_function
min_function_grad = kwargs.pop("min_function_grad", None)
if min_function_grad:
warnings.warn(
"`min_function_grad` has been deprecated and will be removed "
"in HyperSpy 2.0. Please use `grad` instead.",
VisibleDeprecationWarning,
)
grad = min_function_grad
# ---------------------------
# End of deprecated arguments
# ---------------------------
# Supported losses and optimizers
_supported_global = {
"Differential Evolution": differential_evolution,
}
if optimizer in ["Dual Annealing", "SHGO"]:
if Version(scipy.__version__) < Version("1.2.0"):
raise ValueError(f"`optimizer='{optimizer}'` requires scipy >= 1.2.0")
from scipy.optimize import dual_annealing, shgo
_supported_global.update({"Dual Annealing": dual_annealing, "SHGO": shgo})
_supported_fd_schemes = ["2-point", "3-point", "cs"]
_supported_losses = ["ls", "ML-poisson", "huber"]
_supported_bounds = [
"lm",
"trf",
"dogbox",
"Powell",
"TNC",
"L-BFGS-B",
"SLSQP",
"trust-constr",
"Differential Evolution",
"Dual Annealing",
"SHGO",
]
_supported_deriv_free = [
"Powell",
"COBYLA",
"Nelder-Mead",
"SLSQP",
"trust-constr",
]
# Validate arguments
if bounded:
if optimizer not in _supported_bounds:
raise ValueError(
f"Bounded optimization is only supported by "
f"'{_supported_bounds}', not '{optimizer}'."
)
# This has to be done before setting p0
self.ensure_parameters_in_bounds()
# Check validity of loss_function argument
if callable(loss_function):
loss_function = partial(loss_function, self)
elif loss_function not in _supported_losses:
raise ValueError(
f"loss_function must be one of {_supported_losses} "
f"or callable, not '{loss_function}'"
)
elif loss_function != "ls" and optimizer in ["lm", "trf", "dogbox", "odr"]:
raise NotImplementedError(
f"`optimizer='{optimizer}'` only supports "
"least-squares fitting (`loss_function='ls'`)"
)
# Initialize print_info
if print_info:
to_print = [
"Fit info:",
f" optimizer={optimizer}",
f" loss_function={loss_function}",
f" bounded={bounded}",
f" grad={grad}",
]
# Don't let user pass "jac" kwarg since
# it will clash with "grad" argument
jac = kwargs.pop("jac", None)
if jac:
_logger.warning(
f"`jac={jac}` keyword argument is not supported. "
f"Please use `grad={jac}` instead."
)
grad = jac
# Check validity of grad and fd_scheme arguments
if grad == "analytical":
_has_gradient, _jac_err_msg = self._check_analytical_jacobian()
if not _has_gradient:
# Alert the user that analytical gradients
# are not supported (and the reason why)
raise ValueError(f"`grad='analytical' is not supported: {_jac_err_msg}")
elif callable(grad):
grad = partial(grad, self)
elif grad == "fd":
if optimizer in ["lm", "odr"]:
grad = None
elif optimizer in _supported_deriv_free:
# Setting it to None here avoids unnecessary warnings
# from `scipy.optimize.minimize`
grad = None
else:
if fd_scheme not in _supported_fd_schemes:
raise ValueError(
"`fd_scheme` must be one of "
f"{_supported_fd_schemes}, not '{fd_scheme}'"
)
grad = fd_scheme
elif grad is None:
if optimizer in ["lm", "trf", "dogbox"]:
# `scipy.optimize.least_squares` does not accept None as
# an argument. `scipy.optimize.leastsq` will ALWAYS estimate
# the Jacobian even if Dfun=None. `mpfit` can support no
# differentiation, but for consistency across all three
# we enforce estimation below, and raise an error here.
raise ValueError(
f"`optimizer='{optimizer}'` does not support `grad=None`."
)
else:
raise ValueError(
"`grad` must be one of ['analytical', callable, None], not "
f"'{grad}'."
)
with cm(update_on_resume=True):
self.p_std = None
self._set_p0()
old_p0 = self.p0
if ext_bounding:
self._enable_ext_bounding()
# Get weights if metadata.Signal.Noise_properties.variance
# has been set, otherwise this returns None
weights = self._convert_variance_to_weights()
if weights is not None and loss_function == "ML-poisson":
# The attribute ``metadata.Signal.Noise_properties.variance`` is set,
# but weighted fitting is not supported for `loss_function='ml_poisson'`.
# Will proceed with unweighted fitting.
weights = None
args = (self.signal()[np.where(self.channel_switches)], weights)
if optimizer == "lm":
if bounded:
# Bounded Levenberg-Marquardt algorithm is supported
# using the `mpfit` function (bundled with HyperSpy)
self._set_mpfit_parameters_info(bounded=bounded)
# We enforce estimation of the Jacobian if no
# analytical gradients available for consistency
# with `scipy.optimize.leastsq`
auto_deriv = 0 if grad == "analytical" else 1
res = mpfit(
self._errfunc4mpfit,
self.p0[:],
parinfo=self.mpfit_parinfo,
functkw={
"y": self.signal()[self.channel_switches],
"weights": weights,
},
autoderivative=auto_deriv,
quiet=1,
**kwargs,
)
# Return as an OptimizeResult object
self.fit_output = res.optimize_result
self.p0 = self.fit_output.x
ysize = len(self.fit_output.x) + self.fit_output.dof
cost = self.fit_output.fnorm
pcov = self.fit_output.perror ** 2
# Calculate estimated parameter standard deviation
self.p_std = self._calculate_parameter_std(pcov, cost, ysize)
else:
# Unbounded Levenberg-Marquardt algorithm is supported
# using the `scipy.optimize.leastsq` function. Note that
# Dfun=None means the gradient is always estimated here.
grad = self._jacobian if grad == "analytical" else None
res = leastsq(
self._errfunc,
self.p0[:],
Dfun=grad,
col_deriv=1,
args=args,
full_output=True,
**kwargs,
)
self.fit_output = OptimizeResult(
x=res[0],
covar=res[1],
fun=res[2]["fvec"],
nfev=res[2]["nfev"],
success=res[4] in [1, 2, 3, 4],
status=res[4],
message=res[3],
)
self.p0 = self.fit_output.x
ysize = len(self.fit_output.fun)
cost = np.sum(self.fit_output.fun ** 2)
pcov = self.fit_output.covar
# Calculate estimated parameter standard deviation
self.p_std = self._calculate_parameter_std(pcov, cost, ysize)
elif optimizer in ["trf", "dogbox"]:
self._set_boundaries(bounded=bounded)
def _wrap_jac(*args, **kwargs):
# Our Jacobian function computes derivatives along
# columns, so we need the transpose instead here
return self._jacobian(*args, **kwargs).T
grad = _wrap_jac if grad == "analytical" else grad
self.fit_output = least_squares(
self._errfunc,
self.p0[:],
args=args,
bounds=self._bounds_as_tuple(),
jac=grad,
method=optimizer,
**kwargs,
)
self.p0 = self.fit_output.x
ysize = len(self.fit_output.fun)
jac = self.fit_output.jac
cost = 2 * self.fit_output.cost
# Do Moore-Penrose inverse, discarding zero singular values
# to get pcov (as per scipy.optimize.curve_fit())
_, s, VT = svd(jac, full_matrices=False)
threshold = np.finfo(float).eps * max(jac.shape) * s[0]
s = s[s > threshold]
VT = VT[: s.size]
pcov = np.dot(VT.T / s ** 2, VT)
# Calculate estimated parameter standard deviation
self.p_std = self._calculate_parameter_std(pcov, cost, ysize)
elif optimizer == "odr":
if not hasattr(self, "axis"):
raise NotImplementedError(
"`optimizer='odr'` is not implemented for Model2D"
)
odr_jacobian = self._jacobian4odr if grad == "analytical" else None
modelo = odr.Model(fcn=self._function4odr, fjacb=odr_jacobian)
mydata = odr.RealData(
self.axis.axis[np.where(self.channel_switches)],
self.signal()[np.where(self.channel_switches)],
sx=None,
sy=(1.0 / weights if weights is not None else None),
)
myodr = odr.ODR(mydata, modelo, beta0=self.p0[:], **kwargs)
res = myodr.run()
dd = {
"x": res.beta,
"perror": res.sd_beta,
"covar": res.cov_beta,
}
if hasattr(res, "info"):
dd["status"] = res.info
dd["message"] = ", ".join(res.stopreason)
# Note that a value of 5 means maximum iterations reached
dd["success"] = (res.info >= 0) and (res.info < 4)
self.fit_output = OptimizeResult(**dd)
self.p0 = self.fit_output.x
self.p_std = self.fit_output.perror
else:
# scipy.optimize.* functions
if loss_function == "ls":
f_min = self._errfunc_sq
f_der = self._gradient_ls if grad == "analytical" else grad
elif loss_function == "ML-poisson":
f_min = self._poisson_likelihood_function
f_der = self._gradient_ml if grad == "analytical" else grad
elif loss_function == "huber":
f_min = self._huber_loss_function
f_der = self._gradient_huber if grad == "analytical" else grad
huber_delta = kwargs.pop("huber_delta", 1.0)
args = args + (huber_delta,)
elif callable(loss_function):
f_min = loss_function
f_der = grad
self._set_boundaries(bounded=bounded)
if optimizer in _supported_global:
de_b = self._bounds_as_tuple()
if np.any(~np.isfinite(de_b)):
raise ValueError(
"Finite upper and lower bounds must be specified "
"using `bmin/bmax` for every free parameter and "
"`bounded=True` needs to be set as argument of "
f"`m.fit()` when using `optimizer='{optimizer}'`."
)
self.fit_output = _supported_global[optimizer](
f_min, de_b, args=args, **kwargs
)
else:
self.fit_output = minimize(
f_min,
self.p0,
jac=f_der,
args=args,
method=optimizer,
bounds=self.free_parameters_boundaries,
**kwargs,
)
self.p0 = self.fit_output.x
if np.iterable(self.p0) == 0:
self.p0 = (self.p0,)
self._fetch_values_from_p0(p_std=self.p_std)
self.store_current_values()
self._calculate_chisq()
self._set_current_degrees_of_freedom()
if ext_bounding:
self._disable_ext_bounding()
if np.any(old_p0 != self.p0):
self.events.fitted.trigger(self)
# Print details about the fit we just performed
if print_info:
output_print = copy.copy(self.fit_output)
# Drop these as they can be large (== size of data array)
output_print.pop("fun", None)
output_print.pop("jac", None)
to_print.extend(["Fit result:", output_print])
print("\n".join([str(pr) for pr in to_print]))
# Check if the optimization actually succeeded
success = self.fit_output.get("success", None)
if success is False:
message = self.fit_output.get("message", "Unknown reason")
_logger.warning(f"`m.fit()` did not exit successfully. Reason: {message}")
# Return info
if return_info:
return self.fit_output
else:
return None
fit.__doc__ %= FIT_PARAMETERS_ARG
def multifit(
self,
mask=None,
fetch_only_fixed=False,
autosave=False,
autosave_every=10,
show_progressbar=None,
interactive_plot=False,
iterpath=None,
**kwargs,
):
"""Fit the data to the model at all positions of the navigation dimensions.
Parameters
----------
mask : np.ndarray, optional
To mask (i.e. do not fit) at certain position, pass a boolean
numpy.array, where True indicates that the data will NOT be
fitted at the given position.
fetch_only_fixed : bool, default False
If True, only the fixed parameters values will be updated
when changing the positon.
autosave : bool, default False
If True, the result of the fit will be saved automatically
with a frequency defined by autosave_every.
autosave_every : int, default 10
Save the result of fitting every given number of spectra.
%s
interactive_plot : bool, default False
If True, update the plot for every position as they are processed.
Note that this slows down the fitting by a lot, but it allows for
interactive monitoring of the fitting (if in interactive mode).
iterpath : {None, "flyback", "serpentine"}, default None
If "flyback":
At each new row the index begins at the first column,
in accordance with the way :py:class:`numpy.ndindex` generates indices.
If "serpentine":
Iterate through the signal in a serpentine, "snake-game"-like
manner instead of beginning each new row at the first index.
Works for n-dimensional navigation space, not just 2D.
If None:
Currently ``None -> "flyback"``. The default argument will use
the ``"flyback"`` iterpath, but shows a warning that this will
change to ``"serpentine"`` in version 2.0.
**kwargs : keyword arguments
Any extra keyword argument will be passed to the fit method.
See the documentation for :py:meth:`~hyperspy.model.BaseModel.fit`
for a list of valid arguments.
Returns
-------
None
See Also
--------
* :py:meth:`~hyperspy.model.BaseModel.fit`
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if autosave:
fd, autosave_fn = tempfile.mkstemp(
prefix="hyperspy_autosave-", dir=".", suffix=".npz"
)
os.close(fd)
autosave_fn = autosave_fn[:-4]
_logger.info(
f"Autosaving every {autosave_every} pixels to {autosave_fn}.npz. "
"When multifit finishes, this file will be deleted."
)
if mask is not None and (
mask.shape != tuple(self.axes_manager._navigation_shape_in_array)
):
raise ValueError(
"The mask must be a numpy array of boolean type with "
f"shape: {self.axes_manager._navigation_shape_in_array}"
)
if iterpath is None:
if self.axes_manager.iterpath == "flyback":
# flyback is set by default in axes_manager.iterpath on signal creation
warnings.warn(
"The `iterpath` default will change from 'flyback' to 'serpentine' "
"in HyperSpy version 2.0. Change the 'iterpath' argument to other than "
"None to suppress this warning.",
VisibleDeprecationWarning,
)
# otherwise use whatever is set at m.axes_manager.iterpath
else:
self.axes_manager.iterpath = iterpath
masked_elements = 0 if mask is None else mask.sum()
maxval = self.axes_manager._get_iterpath_size(masked_elements)
show_progressbar = show_progressbar and (maxval != 0)
i = 0
with self.axes_manager.events.indices_changed.suppress_callback(
self.fetch_stored_values
):
if interactive_plot:
outer = dummy_context_manager
inner = self.suspend_update
else:
outer = self.suspend_update
inner = dummy_context_manager
with outer(update_on_resume=True):
with progressbar(
total=maxval, disable=not show_progressbar, leave=True
) as pbar:
for index in self.axes_manager:
with inner(update_on_resume=True):
if mask is None or not mask[index[::-1]]:
# first check if model has set initial values in
# parameters.map['values'][indices],
# otherwise use values from previous fit
self.fetch_stored_values(only_fixed=fetch_only_fixed)
self.fit(**kwargs)
i += 1
pbar.update(1)
if autosave and i % autosave_every == 0:
self.save_parameters2file(autosave_fn)
# Trigger the indices_changed event to update to current indices,
# since the callback was suppressed
self.axes_manager.events.indices_changed.trigger(self.axes_manager)
if autosave is True:
_logger.info(f"Deleting temporary file: {autosave_fn}.npz")
os.remove(autosave_fn + ".npz")
multifit.__doc__ %= (SHOW_PROGRESSBAR_ARG)
def save_parameters2file(self, filename):
"""Save the parameters array in binary format.
The data is saved to a single file in numpy's uncompressed ``.npz``
format.
Parameters
----------
filename : str
See Also
--------
load_parameters_from_file, export_results
Notes
-----
This method can be used to save the current state of the model in a way
that can be loaded back to recreate the it using `load_parameters_from
file`. Actually, as of HyperSpy 0.8 this is the only way to do so.
However, this is known to be brittle. For example see
https://github.com/hyperspy/hyperspy/issues/341.
"""
kwds = {}
i = 0
for component in self:
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
kwds['%s_%s.%s' % (i, cname, pname)] = param.map
i += 1
np.savez(filename, **kwds)
def load_parameters_from_file(self, filename):
"""Loads the parameters array from a binary file written with the
'save_parameters2file' function.
Parameters
---------
filename : str
See Also
--------
save_parameters2file, export_results
Notes
-----
In combination with `save_parameters2file`, this method can be used to
recreate a model stored in a file. Actually, before HyperSpy 0.8 this
is the only way to do so. However, this is known to be brittle. For
example see https://github.com/hyperspy/hyperspy/issues/341.
"""
f = np.load(filename)
i = 0
for component in self: # Cut the parameters list
cname = component.name.lower().replace(' ', '_')
for param in component.parameters:
pname = param.name.lower().replace(' ', '_')
param.map = f['%s_%s.%s' % (i, cname, pname)]
i += 1
self.fetch_stored_values()
def assign_current_values_to_all(self, components_list=None, mask=None):
"""Set parameter values for all positions to the current ones.
Parameters
----------
component_list : list of components, optional
If a list of components is given, the operation will be performed
only in the value of the parameters of the given components.
The components can be specified by name, index or themselves.
mask : boolean numpy array or None, optional
The operation won't be performed where mask is True.
"""
if components_list is None:
components_list = []
for comp in self:
if comp.active:
components_list.append(comp)
else:
components_list = [self._get_component(x) for x in components_list]
for comp in components_list:
for parameter in comp.parameters:
parameter.assign_current_value_to_all(mask=mask)
def _enable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = True
def _disable_ext_bounding(self, components=None):
"""
"""
if components is None:
components = self
for component in components:
for parameter in component.parameters:
parameter.ext_bounded = False
def export_results(self, folder=None, format="hspy", save_std=False,
only_free=True, only_active=True):
"""Export the results of the parameters of the model to the desired
folder.
Parameters
----------
folder : str or None
The path to the folder where the file will be saved. If `None` the
current folder is used by default.
format : str
The extension of the file format. It must be one of the
fileformats supported by HyperSpy. The default is "hspy".
save_std : bool
If True, also the standard deviation will be saved.
only_free : bool
If True, only the value of the parameters that are free will be
exported.
only_active : bool
If True, only the value of the active parameters will be exported.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.export(folder=folder, format=format,
save_std=save_std, only_free=only_free)
def plot_results(self, only_free=True, only_active=True):
"""Plot the value of the parameters of the model
Parameters
----------
only_free : bool
If True, only the value of the parameters that are free will be
plotted.
only_active : bool
If True, only the value of the active parameters will be plotted.
Notes
-----
The name of the files will be determined by each the Component and
each Parameter name attributes. Therefore, it is possible to customise
the file names modify the name attributes.
"""
for component in self:
if only_active is False or component.active:
component.plot(only_free=only_free)
def print_current_values(self, only_free=False, only_active=False,
component_list=None, fancy=True):
"""Prints the current values of the parameters of all components.
Parameters
----------
only_free : bool
If True, only components with free parameters will be printed. Within these,
only parameters which are free will be printed.
only_active : bool
If True, only values of active components will be printed
component_list : None or list of components.
If None, print all components.
fancy : bool
If True, attempts to print using html rather than text in the notebook.
"""
if fancy:
display(current_model_values(
model=self, only_free=only_free, only_active=only_active,
component_list=component_list))
else:
display_pretty(current_model_values(
model=self, only_free=only_free, only_active=only_active,
component_list=component_list))
def set_parameters_not_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to not free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> m.append(v1)
>>> m.set_parameters_not_free()
>>> m.set_parameters_not_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_not_free(parameter_name_list)
def set_parameters_free(self, component_list=None,
parameter_name_list=None):
"""
Sets the parameters in a component in a model to free.
Parameters
----------
component_list : None, or list of hyperspy components, optional
If None, will apply the function to all components in the model.
If list of components, will apply the functions to the components
in the list. The components can be specified by name, index or
themselves.
parameter_name_list : None or list of strings, optional
If None, will set all the parameters to not free.
If list of strings, will set all the parameters with the same name
as the strings in parameter_name_list to not free.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> m.append(v1)
>>> m.set_parameters_free()
>>> m.set_parameters_free(component_list=[v1],
parameter_name_list=['area','centre'])
See also
--------
set_parameters_not_free
hyperspy.component.Component.set_parameters_free
hyperspy.component.Component.set_parameters_not_free
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.set_parameters_free(parameter_name_list)
def set_parameters_value(
self,
parameter_name,
value,
component_list=None,
only_current=False):
"""
Sets the value of a parameter in components in a model to a specified
value
Parameters
----------
parameter_name : string
Name of the parameter whose value will be changed
value : number
The new value of the parameter
component_list : list of hyperspy components, optional
A list of components whose parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v2 = hs.model.components1D.Voigt()
>>> m.extend([v1,v2])
>>> m.set_parameters_value('area', 5)
>>> m.set_parameters_value('area', 5, component_list=[v1])
>>> m.set_parameters_value('area', 5, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
for _parameter in _component.parameters:
if _parameter.name == parameter_name:
if only_current:
_parameter.value = value
_parameter.store_current_value_in_array()
else:
_parameter.value = value
_parameter.assign_current_value_to_all()
def as_dictionary(self, fullcopy=True):
"""Returns a dictionary of the model, including all components, degrees
of freedom (dof) and chi-squared (chisq) with values.
Parameters
----------
fullcopy : bool (optional, True)
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
Returns
-------
dictionary : dict
A dictionary including at least the following fields:
* components: a list of dictionaries of components, one per
component
* _whitelist: a dictionary with keys used as references for saved
attributes, for more information, see
:py:func:`~hyperspy.misc.export_dictionary.export_to_dictionary`
* any field from _whitelist.keys()
Examples
--------
>>> s = signals.Signal1D(np.random.random((10,100)))
>>> m = s.create_model()
>>> l1 = components1d.Lorentzian()
>>> l2 = components1d.Lorentzian()
>>> m.append(l1)
>>> m.append(l2)
>>> d = m.as_dictionary()
>>> m2 = s.create_model(dictionary=d)
"""
dic = {'components': [c.as_dictionary(fullcopy) for c in self]}
export_to_dictionary(self, self._whitelist, dic, fullcopy)
def remove_empty_numpy_strings(dic):
for k, v in dic.items():
if isinstance(v, dict):
remove_empty_numpy_strings(v)
elif isinstance(v, list):
for vv in v:
if isinstance(vv, dict):
remove_empty_numpy_strings(vv)
elif isinstance(vv, np.string_) and len(vv) == 0:
vv = ''
elif isinstance(v, np.string_) and len(v) == 0:
del dic[k]
dic[k] = ''
remove_empty_numpy_strings(dic)
return dic
def set_component_active_value(
self, value, component_list=None, only_current=False):
"""
Sets the component 'active' parameter to a specified value
Parameters
----------
value : bool
The new value of the 'active' parameter
component_list : list of hyperspy components, optional
A list of components whose parameters will changed. The components
can be specified by name, index or themselves.
only_current : bool, default False
If True, will only change the parameter value at the current
position in the model.
If False, will change the parameter value for all the positions.
Examples
--------
>>> v1 = hs.model.components1D.Voigt()
>>> v2 = hs.model.components1D.Voigt()
>>> m.extend([v1,v2])
>>> m.set_component_active_value(False)
>>> m.set_component_active_value(True, component_list=[v1])
>>> m.set_component_active_value(False, component_list=[v1],
only_current=True)
"""
if not component_list:
component_list = []
for _component in self:
component_list.append(_component)
else:
component_list = [self._get_component(x) for x in component_list]
for _component in component_list:
_component.active = value
if _component.active_is_multidimensional:
if only_current:
_component._active_array[
self.axes_manager.indices[::-1]] = value
else:
_component._active_array.fill(value)
def __getitem__(self, value):
"""x.__getitem__(y) <==> x[y]"""
if isinstance(value, str):
component_list = []
for component in self:
if component.name:
if component.name == value:
component_list.append(component)
elif component.__class__.__name__ == value:
component_list.append(component)
if component_list:
if len(component_list) == 1:
return component_list[0]
else:
raise ValueError(
"There are several components with "
"the name \"" + str(value) + "\"")
else:
raise ValueError(
"Component name \"" + str(value) +
"\" not found in model")
else:
return list.__getitem__(self, value)
def create_samfire(self, workers=None, setup=True, **kwargs):
"""Creates a SAMFire object.
Parameters
----------
workers : {None, int}
the number of workers to initialise.
If zero, all computations will be done serially.
If None (default), will attempt to use (number-of-cores - 1),
however if just one core is available, will use one worker.
setup : bool
if the setup should be run upon initialization.
**kwargs
Any that will be passed to the _setup and in turn SamfirePool.
"""
from hyperspy.samfire import Samfire
return Samfire(self, workers=workers,
setup=setup, **kwargs)
class ModelSpecialSlicers(object):
def __init__(self, model, isNavigation):
self.isNavigation = isNavigation
self.model = model
def __getitem__(self, slices):
array_slices = self.model.signal._get_array_slices(
slices,
self.isNavigation)
_signal = self.model.signal._slicer(slices, self.isNavigation)
# TODO: for next major release, change model creation defaults to not
# automate anything. For now we explicitly look for "auto_" kwargs and
# disable them:
import inspect
pars = inspect.signature(_signal.create_model).parameters
kwargs = {key: False for key in pars.keys() if key.startswith('auto_')}
_model = _signal.create_model(**kwargs)
dims = (self.model.axes_manager.navigation_dimension,
self.model.axes_manager.signal_dimension)
if self.isNavigation:
_model.channel_switches[:] = self.model.channel_switches
else:
_model.channel_switches[:] = \
np.atleast_1d(
self.model.channel_switches[
tuple(array_slices[-dims[1]:])])
twin_dict = {}
for comp in self.model:
init_args = {}
for k, v in comp._whitelist.items():
if v is None:
continue
flags_str, value = v
if 'init' in parse_flag_string(flags_str):
init_args[k] = value
_model.append(comp.__class__(**init_args))
copy_slice_from_whitelist(self.model,
_model,
dims,
(slices, array_slices),
self.isNavigation,
)
for co, cn in zip(self.model, _model):
copy_slice_from_whitelist(co,
cn,
dims,
(slices, array_slices),
self.isNavigation)
if _model.axes_manager.navigation_size < 2:
if co.active_is_multidimensional:
cn.active = co._active_array[array_slices[:dims[0]]]
for po, pn in zip(co.parameters, cn.parameters):
copy_slice_from_whitelist(po,
pn,
dims,
(slices, array_slices),
self.isNavigation)
twin_dict[id(po)] = ([id(i) for i in list(po._twins)], pn)
for k in twin_dict.keys():
for tw_id in twin_dict[k][0]:
twin_dict[tw_id][1].twin = twin_dict[k][1]
_model.chisq.data = _model.chisq.data.copy()
_model.dof.data = _model.dof.data.copy()
_model.fetch_stored_values() # to update and have correct values
if not self.isNavigation:
for _ in _model.axes_manager:
_model._calculate_chisq()
return _model
# vim: textwidth=80
|
erh3cq/hyperspy
|
hyperspy/model.py
|
Python
|
gpl-3.0
| 82,619
|
[
"Gaussian"
] |
65ce3d7b40965e169841e13d3688ce7c5598bc4f0da8cf8ed14430fa513455ac
|
# Import OVITO modules.
from ovito import *
from ovito.io import *
from ovito.vis import OpenGLRenderer, RenderSettings
if not ovito.headless_mode:
# Import a data file.
node = import_file("../../files/CFG/shear.void.120.cfg")
renderer = OpenGLRenderer()
print("Parameter defaults:")
print(" antialiasing_level: {}".format(renderer.antialiasing_level))
renderer.antialiasing_level = 2
settings = RenderSettings(size = (100,100), renderer = renderer)
dataset.viewports.active_vp.render(settings)
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/opengl_renderer.py
|
Python
|
gpl-3.0
| 533
|
[
"OVITO"
] |
a08a06804f8ddb44cefe910e1415ec490850172e49ca33ea0a78d6eb7296636b
|
########################################################################
# $HeadURL $
# File: FTSValidator.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/04/08 14:28:29
########################################################################
""" :mod: FTSValidator
==================
.. module: FTSValidator
:synopsis: making sure that all required bits and pieces are in place for FTSLfn, FTSJob
and FTSJobFile
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
A general and simple fts validator checking for required attributes and logic.
It checks if required attributes are set/unset but not for their values.
There is a global singleton validator for general use defined in this module: gFTSValidator.
"""
__RCSID__ = "$Id $"
# #
# @file FTSValidator.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/04/08 14:28:52
# @brief Definition of FTSValidator class.
# # imports
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
from DIRAC.DataManagementSystem.Client.FTSJob import FTSJob
from DIRAC.DataManagementSystem.Client.FTSFile import FTSFile
from DIRAC.DataManagementSystem.Client.FTSSite import FTSSite
########################################################################
class FTSValidator( object ):
"""
.. class:: FTSValidator
"""
__metaclass__ = DIRACSingleton
# # required attributes in FTSLfn, FTSJob and FTSJobFile
__reqAttrs = { FTSJob: { "attrs": [ "SourceSE", "TargetSE", "FTSServer", "Size"] },
FTSFile: { "attrs": [ "FileID", "OperationID", "RequestID", "LFN", "Checksum", "ChecksumType", "Size",
"SourceSE", "SourceSURL", "TargetSE", "TargetSURL" ] },
FTSSite: { "attrs": [ "FTSServer", "Name" ] } }
def __init__( self ):
""" c'tor """
# # order of validators
self.validators = [ self.isA, self.hasReqAttrs, self.hasFTSJobFiles ]
def validate( self, obj ):
""" validate
:param mixed obj: FTSJob, FTSFile or FTSSite instance
"""
for validator in self.validators:
isValid = validator( obj )
if not isValid["OK"]:
return isValid
# # if we're here request is more or less valid
return S_OK()
@classmethod
def isA( cls, obj ):
""" object is a proper class
:param mixed obj: FTSJob, FTSFile or FTSSite instance
"""
for objtype in cls.__reqAttrs:
if isinstance( obj, objtype ):
return S_OK()
return S_ERROR( "Not supported object type %s" % type( obj ) )
@classmethod
def hasReqAttrs( cls, obj ):
""" has required attributes set
:param mixed obj: FTSFile, FTSJob of FTSSite instance
"""
for objtype in cls.__reqAttrs:
if isinstance( obj, objtype ):
for attr in cls.__reqAttrs[objtype].get( "attrs", [] ):
if not getattr( obj, attr ):
return S_ERROR( "Missing property %s in %s" % ( attr, obj.__class__.__name__ ) )
return S_OK()
@classmethod
def hasFTSJobFiles( cls, obj ):
""" check if FTSJob has FTSJobFiles
:param mixed obj: FTSJob instance
"""
if not isinstance( obj, FTSJob ):
return S_OK()
if not len( obj ):
return S_ERROR( "FTSJob is missing FTSFiles" )
return S_OK()
# # global instance
gFTSValidator = FTSValidator()
|
Sbalbp/DIRAC
|
DataManagementSystem/private/FTSValidator.py
|
Python
|
gpl-3.0
| 3,350
|
[
"DIRAC"
] |
d9b53e7197bb7f36a724d671a7f23d1a7c92204476304e9970d8555ed12b66d2
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Timothy Berkelbach <tim.berkelbach@gmail.com>
#
'''
parse CP2K PP format, following parse_nwchem.py
'''
import sys
import numpy as np
def parse(string):
'''Parse the pseudo text *string* which is in CP2K format, return an internal
basis format which can be assigned to :attr:`Cell.pseudo`
Lines started with # are ignored.
'''
pseudotxt = [x.strip() for x in string.splitlines()
if x.strip() and 'END' not in x and '#PSEUDOPOTENTIAL' not in x]
return _parse(pseudotxt)
def load(pseudofile, symb, suffix=None):
'''Parse the *pseudofile's entry* for atom 'symb', return an internal
pseudo format which can be assigned to :attr:`Cell.pseudo`
'''
return _parse(search_seg(pseudofile, symb, suffix))
def _parse(plines):
header_ln = plines.pop(0)
nelecs = [ int(nelec) for nelec in plines.pop(0).split() ]
rnc_ppl = plines.pop(0).split()
rloc = float(rnc_ppl[0])
nexp = int(rnc_ppl[1])
cexp = [ float(c) for c in rnc_ppl[2:] ]
nproj_types = int(plines.pop(0))
r = []
nproj = []
hproj = []
for p in range(nproj_types):
rnh_ppnl = plines.pop(0).split()
r.append(float(rnh_ppnl[0]))
nproj.append(int(rnh_ppnl[1]))
hproj_p_ij = []
for h in rnh_ppnl[2:]:
hproj_p_ij.append(float(h))
for i in range(1,nproj[-1]):
for h in plines.pop(0).split():
hproj_p_ij.append(float(h))
hproj_p = np.zeros((nproj[-1],nproj[-1]))
hproj_p[np.triu_indices(nproj[-1])] = [ h for h in hproj_p_ij ]
hproj_p_symm = hproj_p + hproj_p.T - np.diag(hproj_p.diagonal())
hproj.append(hproj_p_symm.tolist())
pseudo_params = [nelecs,
rloc, nexp, cexp,
nproj_types]
for ri,ni,hi in zip(r,nproj,hproj):
pseudo_params.append([ri, ni, hi])
return pseudo_params
def search_seg(pseudofile, symb, suffix=None):
'''
Find the pseudopotential entry for atom 'symb' in file 'pseudofile'
'''
fin = open(pseudofile, 'r')
fdata = fin.read().split('#PSEUDOPOTENTIAL')
fin.close()
for dat in fdata[1:]:
dat0 = dat.split(None, 1)
if dat0 and dat0[0] == symb:
dat = [x.strip() for x in dat.splitlines()
if x.strip() and 'END' not in x]
if suffix is None: # use default PP
qsuffix = dat[0].split()[-1].split('-')[-1]
if not (qsuffix.startswith('q') and qsuffix[1:].isdigit()):
return dat
else:
if any(suffix == x.split('-')[-1] for x in dat[0].split()):
return dat
raise RuntimeError('Pseudopotential not found for %s in %s' % (symb, pseudofile))
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 2:
ppfile = args[0]
atom = args[1]
else:
print('usage: ppfile atomlabel ')
sys.exit(1)
print("Testing search_seg():")
print(search_seg(ppfile,atom))
print("Testing load() [[from file]]:")
load(ppfile,atom)
print("Testing parse():")
print(parse("""
#PSEUDOPOTENTIAL
C GTH-BLYP-q4
2 2
0.33806609 2 -9.13626871 1.42925956
2
0.30232223 1 9.66551228
0.28637912 0
"""
))
|
gkc1000/pyscf
|
pyscf/pbc/gto/pseudo/parse_cp2k.py
|
Python
|
apache-2.0
| 3,999
|
[
"CP2K",
"PySCF"
] |
8f29d352c47b636a11bdc649610f20df258503aaea5614ed23ff5dd383bcbb3a
|
import numpy as np
import pandas as pd
from sklearn import metrics,cross_validation
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split
from sklearn import svm
#Read training data and split into train and test data
data=pd.read_csv('train.csv')
data1=data.values
X=data1[:,1:]
y=data1[:,:1]
y=np.ravel(y)
Xtrain,Xtest,ytrain,ytest=train_test_split(X,y,test_size=0.5)
#Run linear kernel first
svmL1=svm.SVC(kernel='linear',C=0.01)
svmL1.fit(Xtrain,ytrain)
predL1=svmL1.predict(Xtest)
print("Classification report for classifier %s:\n%s\n"
% (svmL1, metrics.classification_report(ytest,predL1)))
Classification report for classifier SVC(C=0.01, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,
kernel='linear', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False):
precision recall f1-score support
0 0.95 0.97 0.96 2101
1 0.95 0.98 0.96 2386
2 0.89 0.91 0.90 2092
3 0.85 0.89 0.87 2126
4 0.89 0.93 0.91 2007
5 0.87 0.87 0.87 1881
6 0.96 0.94 0.95 2116
7 0.93 0.91 0.92 2202
8 0.92 0.83 0.87 2018
9 0.89 0.86 0.87 2071
avg / total 0.91 0.91 0.91 21000
#Use GridSearchCV to tune parameters.C=1e-06 gave the best result.
parameters3={'kernel':['linear'],'C': [1e-06, 0.0001, 0.01, 1, 100]}
clf3=GridSearchCV(svm.SVC(),parameters3,cv=5,scoring='precision')
clf3.fit(Xtrain,ytrain)
clf3.grid_scores_
Out[17]:
[mean: 0.93872, std: 0.00203, params: {'kernel': 'linear', 'C': 1e-06},
mean: 0.91670, std: 0.00370, params: {'kernel': 'linear', 'C': 0.0001},
mean: 0.91276, std: 0.00306, params: {'kernel': 'linear', 'C': 0.01},
mean: 0.91276, std: 0.00306, params: {'kernel': 'linear', 'C': 1},
mean: 0.91276, std: 0.00306, params: {'kernel': 'linear', 'C': 100}]
#Run gaussian kernel but the result is poor and running time is long
svmR1=svm.SVC(kernel='rbf',gamma=0.001, C=10000)
svmR1.fit(Xtrain,ytrain)
predR1=svmR1.predict(Xtest)
print("Classification report for classifier %s:\n%s\n"
% (svmR1, metrics.classification_report(ytest,predR1)))
Classification report for classifier SVC(C=10000, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.001, kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=0.001, verbose=False):
precision recall f1-score support
0 0.00 0.00 0.00 2101
1 0.11 1.00 0.20 2386
2 0.00 0.00 0.00 2092
3 0.00 0.00 0.00 2126
4 0.00 0.00 0.00 2007
5 0.00 0.00 0.00 1881
6 0.00 0.00 0.00 2116
7 0.00 0.00 0.00 2202
8 0.00 0.00 0.00 2018
9 0.00 0.00 0.00 2071
avg / total 0.01 0.11 0.02 21000
#Run polynomial kernel
svmP1=svm.SVC(kernel='poly',degree=3)
svmP1.fit(Xtrain,ytrain)
predP1=svmP1.predict(Xtest)
print("Classification report for classifier %s:\n%s\n"
% (svmP1, metrics.classification_report(ytest,predP1)))
Classification report for classifier SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0,
kernel='poly', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False):
precision recall f1-score support
0 0.97 0.98 0.98 2101
1 0.97 0.99 0.98 2386
2 0.97 0.96 0.96 2092
3 0.96 0.96 0.96 2126
4 0.97 0.97 0.97 2007
5 0.96 0.96 0.96 1881
6 0.98 0.97 0.98 2116
7 0.98 0.97 0.97 2202
8 0.97 0.96 0.96 2018
9 0.97 0.95 0.96 2071
avg / total 0.97 0.97 0.97 21000
#Use GridSearchCV to tune parameters.Degree=2 gave the best result.
parameters1={'kernel':['poly'],'degree':[2,3,4]}
clf1=GridSearchCV(svm.SVC(),parameters1,cv=5,scoring='precision')
clf1.fit(Xtrain,ytrain)
clf1.grid_scores_
Out[13]:
[mean: 0.96890, std: 0.00264, params: {'kernel': 'poly', 'degree': 2},
mean: 0.96567, std: 0.00233, params: {'kernel': 'poly', 'degree': 3},
mean: 0.95507, std: 0.00210, params: {'kernel': 'poly', 'degree': 4}]
#Use polynomial degree=2 for final model and submission
svm1=svm.SVC(kernel='poly',degree=2)
svm1.fit(X,y)
test=pd.read_csv('test.csv')
pred=svm1.predict(test)
pred = pd.DataFrame(pred)
pred['ImageId'] = pred.index + 1
pred = pred[['ImageId', 0]]
pred.columns = ['ImageId', 'Label']
pred.to_csv('pred.csv', index=False)
|
lingcheng99/kagge-digit-recognition
|
svm.py
|
Python
|
mit
| 5,162
|
[
"Gaussian"
] |
e4294c055b42832188e934ef71044df224a6827f5e10546b1ee9ebc056e625a6
|
import numpy as np
def benitez2(p, fjac=None, x=None, y=None, err=None):
model = pow(x,p[0]) * np.exp( -(pow(x/p[1]),p[2]))
status = 0
return([status, (y-model)/err])
def parabola(p, fjac=None, x=None, y=None, err=None):
#p[0] = x_offset
#p[1] = amplitude
#p[2] = y_offset
model = p[1] * (pow( (x - p[0]), 2 )) + p[2]
status = 0
return([status, (y-model)/err])
def parabola2(p, fjac=None, x=None, y=None, err=None):
#p[0] = a
#p[1] = b
#p[2] = c
model = p[0] * (pow(x, 2 )) + p[1] * x + p[2]
status = 0
return([status, (y-model)/err])
def gaussian(p, fjac=None, x=None, y=None, err=None):
#p[0] = sigma
#p[1] = x_offset
#p[2] = amplitude
#p[3] = y_offset
model = p[3] + p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err])
def twogaussian(p, fjac=None, x=None, y=None, err=None):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
model = gauss1 + gauss2
status = 0
return([status, (y-model)/err])
def twogaussianexp(p, fjac=None, x=None, y=None, err=None):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
#p[6] = scalefactor
#p[7] = x_offset3
#p[8] = amplitude3
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
expo = p[8] * np.exp(p[6] * (x - p[7]))
model = gauss1 + gauss2 + expo
status = 0
return([status, (y-model)/err])
def threegaussian(p, fjac=None, x=None, y=None, err=None):
#p[0] = sigma1
#p[1] = x_offset1
#p[2] = amplitude1
#p[3] = sigma2
#p[4] = x_offset2
#p[5] = amplitude2
#p[6] = sigma3
#p[7] = x_offset3
#p[8] = amplitude3
gauss1 = p[2] * np.exp( - (pow(( x - p[1]),2) / ( 2. * pow(p[0],2))))
gauss2 = p[5] * np.exp( - (pow(( x - p[4]),2) / ( 2. * pow(p[3],2))))
gauss3 = p[8] * np.exp( - (pow(( x - p[7]),2) / ( 2. * pow(p[6],2))))
model = gauss1 + gauss2 + gauss3
status = 0
return([status, (y-model)/err])
|
bmazin/SDR
|
Projects/Simulator/fitFunctions.py
|
Python
|
gpl-2.0
| 2,488
|
[
"Gaussian"
] |
a4e063af943d0f614cd18620f9d8752538d89db603d1c73e69ae10031ea82084
|
#!/usr/bin/env python
import numpy as np
from numpy import random
import cv2
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for i in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
points = np.float32( np.vstack(points) )
return points, ref_distrs
def draw_gaussain(img, mean, cov, color):
x, y = np.int32(mean)
w, u, vt = cv2.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA)
if __name__ == '__main__':
cluster_n = 5
img_size = 512
print 'press any key to update distributions, ESC - exit\n'
while True:
print 'sampling distributions...'
points, ref_distrs = make_gaussians(cluster_n, img_size)
print 'EM (opencv) ...'
em = cv2.EM(cluster_n, cv2.EM_COV_MAT_GENERIC)
em.train(points)
means = em.getMat('means')
covs = em.getMatVector('covs')
found_distrs = zip(means, covs)
print 'ready!\n'
img = np.zeros((img_size, img_size, 3), np.uint8)
for x, y in np.int32(points):
cv2.circle(img, (x, y), 1, (255, 255, 255), -1)
for m, cov in ref_distrs:
draw_gaussain(img, m, cov, (0, 255, 0))
for m, cov in found_distrs:
draw_gaussain(img, m, cov, (0, 0, 255))
cv2.imshow('gaussian mixture', img)
ch = 0xFF & cv2.waitKey(0)
if ch == 27:
break
cv2.destroyAllWindows()
|
apavlenko/opencv
|
samples/python2/gaussian_mix.py
|
Python
|
bsd-3-clause
| 1,824
|
[
"Gaussian"
] |
eb680c313f4ed1ffe35a696edef12342ab12b8cc66a6e34ddd8dbf64f48026af
|
# Sample BLAST parameters.
PARAMS = {
'application': 'BLASTN',
'blast_cutoff': [
None,
None
],
'database': 'manx-shearwater',
'database_length': 17465129,
'database_letters': None,
'database_name': [],
'database_sequences': 70016,
'date': '',
'dropoff_1st_pass': [
None,
None
],
'effective_database_length': None,
'effective_hsp_length': 22,
'effective_query_length': None,
'effective_search_space': 382194648.0,
'effective_search_space_used': None,
'frameshift': [
None,
None
],
'gap_penalties': [
5,
2
],
'gap_trigger': [
None,
None
],
'gap_x_dropoff': [
None,
None
],
'gap_x_dropoff_final': [
None,
None
],
'gapped': 0,
'hsps_gapped': None,
'hsps_no_gap': None,
'hsps_prelim_gapped': None,
'hsps_prelim_gapped_attemped': None,
'ka_params': [
0.625,
0.41,
0.78
],
'ka_params_gap': [
None,
None,
None
],
'matrix': '',
'num_good_extends': None,
'num_hits': None,
'num_letters_in_database': 17465129,
'num_seqs_better_e': None,
'num_sequences': None,
'num_sequences_in_database': 70016,
'posted_date': [],
'query': 'GZG3DGY01ASHXW',
'query_id': 'Query_1',
'query_length': 46,
'query_letters': 46,
'reference': 'Stephen F. Altschul, Thomas L. Madden, ...',
'sc_match': 2,
'sc_mismatch': -3,
'threshold': None,
'version': '2.2.28+',
'window_size': None
}
RECORD0 = {
'query': 'id0',
'alignments': [
{
'length': 37000,
'hsps': [
{
'bits': 20,
'sbjct_end': 15400,
'expect': 1e-11,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 15362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 1296/99',
},
{
'length': 38000,
'hsps': [
{
'bits': 25,
'sbjct_end': 12400,
'expect': 1e-10,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 12362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Squirrelpox virus 55',
}
]
}
RECORD1 = {
'query': 'id1',
'alignments': [
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 11400,
'expect': 1e-8,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 11362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Monkeypox virus 456',
},
{
'length': 35000,
'hsps': [
{
'bits': 20,
'sbjct_end': 10400,
'expect': 1e-7,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 10362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Mummypox virus 3000 B.C.',
}
]
}
RECORD2 = {
'query': 'id2',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-6,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
# Identical to RECORD2, apart from e-value.
RECORD3 = {
'query': 'id3',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 20,
'sbjct_end': 1400,
'expect': 1e-5,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
RECORD4 = {
'query': 'id4',
'alignments': [
{
'length': 30000,
'hsps': [
{
'bits': 10,
'sbjct_end': 1400,
'expect': 1e-3,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 5,
'sbjct_end': 1400,
'expect': 1e-2,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
},
{
'bits': 3,
'sbjct_end': 1400,
'expect': 0.0,
'sbjct': 'TACCC--CGGCCCGCG-CGGCCGGCTCTCCA',
'sbjct_start': 1362,
'query': 'TACCCTGCGGCCCGCTACGGCTGG-TCTCCA',
'frame': [1, 1],
'query_end': 68,
'query_start': 28
}
],
'title': 'gi|887699|gb|DQ37780 Cowpox virus 15',
}
]
}
|
bamueh/dark-matter
|
test/blast/sample_data.py
|
Python
|
mit
| 6,857
|
[
"BLAST"
] |
2ba97c395c9f52a7422caa476081a90a4e4d487595f655db4ccfd6a68a58daa5
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2018 (ita)
"""
Runner.py: Task scheduling and execution
"""
import heapq, traceback
try:
from queue import Queue, PriorityQueue
except ImportError:
from Queue import Queue
try:
from Queue import PriorityQueue
except ImportError:
class PriorityQueue(Queue):
def _init(self, maxsize):
self.maxsize = maxsize
self.queue = []
def _put(self, item):
heapq.heappush(self.queue, item)
def _get(self):
return heapq.heappop(self.queue)
from waflib import Utils, Task, Errors, Logs
GAP = 5
"""
Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run
"""
class PriorityTasks(object):
def __init__(self):
self.lst = []
def __len__(self):
return len(self.lst)
def __iter__(self):
return iter(self.lst)
def __str__(self):
return 'PriorityTasks: [%s]' % '\n '.join(str(x) for x in self.lst)
def clear(self):
self.lst = []
def append(self, task):
heapq.heappush(self.lst, task)
def appendleft(self, task):
"Deprecated, do not use"
heapq.heappush(self.lst, task)
def pop(self):
return heapq.heappop(self.lst)
def extend(self, lst):
if self.lst:
for x in lst:
self.append(x)
else:
if isinstance(lst, list):
self.lst = lst
heapq.heapify(lst)
else:
self.lst = lst.lst
class Consumer(Utils.threading.Thread):
"""
Daemon thread object that executes a task. It shares a semaphore with
the coordinator :py:class:`waflib.Runner.Spawner`. There is one
instance per task to consume.
"""
def __init__(self, spawner, task):
Utils.threading.Thread.__init__(self)
self.task = task
"""Task to execute"""
self.spawner = spawner
"""Coordinator object"""
self.setDaemon(1)
self.start()
def run(self):
"""
Processes a single task
"""
try:
if not self.spawner.master.stop:
self.spawner.master.process_task(self.task)
finally:
self.spawner.sem.release()
self.spawner.master.out.put(self.task)
self.task = None
self.spawner = None
class Spawner(Utils.threading.Thread):
"""
Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and
spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each
:py:class:`waflib.Task.Task` instance.
"""
def __init__(self, master):
Utils.threading.Thread.__init__(self)
self.master = master
""":py:class:`waflib.Runner.Parallel` producer instance"""
self.sem = Utils.threading.Semaphore(master.numjobs)
"""Bounded semaphore that prevents spawning more than *n* concurrent consumers"""
self.setDaemon(1)
self.start()
def run(self):
"""
Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop`
"""
try:
self.loop()
except Exception:
# Python 2 prints unnecessary messages when shutting down
# we also want to stop the thread properly
pass
def loop(self):
"""
Consumes task objects from the producer; ends when the producer has no more
task to provide.
"""
master = self.master
while 1:
task = master.ready.get()
self.sem.acquire()
if not master.stop:
task.log_display(task.generator.bld)
Consumer(self, task)
class Parallel(object):
"""
Schedule the tasks obtained from the build context for execution.
"""
def __init__(self, bld, j=2):
"""
The initialization requires a build context reference
for computing the total number of jobs.
"""
self.numjobs = j
"""
Amount of parallel consumers to use
"""
self.bld = bld
"""
Instance of :py:class:`waflib.Build.BuildContext`
"""
self.outstanding = PriorityTasks()
"""Heap of :py:class:`waflib.Task.Task` that may be ready to be executed"""
self.postponed = PriorityTasks()
"""Heap of :py:class:`waflib.Task.Task` which are not ready to run for non-DAG reasons"""
self.incomplete = set()
"""List of :py:class:`waflib.Task.Task` waiting for dependent tasks to complete (DAG)"""
self.ready = PriorityQueue(0)
"""List of :py:class:`waflib.Task.Task` ready to be executed by consumers"""
self.out = Queue(0)
"""List of :py:class:`waflib.Task.Task` returned by the task consumers"""
self.count = 0
"""Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`"""
self.processed = 0
"""Amount of tasks processed"""
self.stop = False
"""Error flag to stop the build"""
self.error = []
"""Tasks that could not be executed"""
self.biter = None
"""Task iterator which must give groups of parallelizable tasks when calling ``next()``"""
self.dirty = False
"""
Flag that indicates that the build cache must be saved when a task was executed
(calls :py:meth:`waflib.Build.BuildContext.store`)"""
self.revdeps = Utils.defaultdict(set)
"""
The reverse dependency graph of dependencies obtained from Task.run_after
"""
self.spawner = None
"""
Coordinating daemon thread that spawns thread consumers
"""
if self.numjobs > 1:
self.spawner = Spawner(self)
def get_next_task(self):
"""
Obtains the next Task instance to run
:rtype: :py:class:`waflib.Task.Task`
"""
if not self.outstanding:
return None
return self.outstanding.pop()
def postpone(self, tsk):
"""
Adds the task to the list :py:attr:`waflib.Runner.Parallel.postponed`.
The order is scrambled so as to consume as many tasks in parallel as possible.
:param tsk: task instance
:type tsk: :py:class:`waflib.Task.Task`
"""
self.postponed.append(tsk)
def refill_task_list(self):
"""
Pulls a next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`.
Ensures that all tasks in the current build group are complete before processing the next one.
"""
while self.count > self.numjobs * GAP:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.outstanding:
break
elif self.postponed:
try:
cond = self.deadlock == self.processed
except AttributeError:
pass
else:
if cond:
# The most common reason is conflicting build order declaration
# for example: "X run_after Y" and "Y run_after X"
# Another can be changing "run_after" dependencies while the build is running
# for example: updating "tsk.run_after" in the "runnable_status" method
lst = []
for tsk in self.postponed:
deps = [id(x) for x in tsk.run_after if not x.hasrun]
lst.append('%s\t-> %r' % (repr(tsk), deps))
if not deps:
lst.append('\n task %r dependencies are done, check its *runnable_status*?' % id(tsk))
raise Errors.WafError('Deadlock detected: check the task build order%s' % ''.join(lst))
self.deadlock = self.processed
if self.postponed:
self.outstanding.extend(self.postponed)
self.postponed.clear()
elif not self.count:
if self.incomplete:
for x in self.incomplete:
for k in x.run_after:
if not k.hasrun:
break
else:
# dependency added after the build started without updating revdeps
self.incomplete.remove(x)
self.outstanding.append(x)
break
else:
if self.stop or self.error:
break
raise Errors.WafError('Broken revdeps detected on %r' % self.incomplete)
else:
tasks = next(self.biter)
ready, waiting = self.prio_and_split(tasks)
self.outstanding.extend(ready)
self.incomplete.update(waiting)
self.total = self.bld.total()
break
def add_more_tasks(self, tsk):
"""
If a task provides :py:attr:`waflib.Task.Task.more_tasks`, then the tasks contained
in that list are added to the current build and will be processed before the next build group.
The priorities for dependent tasks are not re-calculated globally
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.Task`
"""
if getattr(tsk, 'more_tasks', None):
more = set(tsk.more_tasks)
groups_done = set()
def iteri(a, b):
for x in a:
yield x
for x in b:
yield x
# Update the dependency tree
# this assumes that task.run_after values were updated
for x in iteri(self.outstanding, self.incomplete):
for k in x.run_after:
if isinstance(k, Task.TaskGroup):
if k not in groups_done:
groups_done.add(k)
for j in k.prev & more:
self.revdeps[j].add(k)
elif k in more:
self.revdeps[k].add(x)
ready, waiting = self.prio_and_split(tsk.more_tasks)
self.outstanding.extend(ready)
self.incomplete.update(waiting)
self.total += len(tsk.more_tasks)
def mark_finished(self, tsk):
def try_unfreeze(x):
# DAG ancestors are likely to be in the incomplete set
# This assumes that the run_after contents have not changed
# after the build starts, else a deadlock may occur
if x in self.incomplete:
# TODO remove dependencies to free some memory?
# x.run_after.remove(tsk)
for k in x.run_after:
if not k.hasrun:
break
else:
self.incomplete.remove(x)
self.outstanding.append(x)
if tsk in self.revdeps:
for x in self.revdeps[tsk]:
if isinstance(x, Task.TaskGroup):
x.prev.remove(tsk)
if not x.prev:
for k in x.next:
# TODO necessary optimization?
k.run_after.remove(x)
try_unfreeze(k)
# TODO necessary optimization?
x.next = []
else:
try_unfreeze(x)
del self.revdeps[tsk]
if hasattr(tsk, 'semaphore'):
sem = tsk.semaphore
try:
sem.release(tsk)
except KeyError:
# TODO
pass
else:
while sem.waiting and not sem.is_locked():
# take a frozen task, make it ready to run
x = sem.waiting.pop()
self._add_task(x)
def get_out(self):
"""
Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution.
Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`.
:rtype: :py:attr:`waflib.Task.Task`
"""
tsk = self.out.get()
if not self.stop:
self.add_more_tasks(tsk)
self.mark_finished(tsk)
self.count -= 1
self.dirty = True
return tsk
def add_task(self, tsk):
"""
Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them.
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.Task`
"""
# TODO change in waf 2.1
self.ready.put(tsk)
def _add_task(self, tsk):
if hasattr(tsk, 'semaphore'):
sem = tsk.semaphore
try:
sem.acquire(tsk)
except IndexError:
sem.waiting.add(tsk)
return
self.count += 1
self.processed += 1
if self.numjobs == 1:
tsk.log_display(tsk.generator.bld)
try:
self.process_task(tsk)
finally:
self.out.put(tsk)
else:
self.add_task(tsk)
def process_task(self, tsk):
"""
Processes a task and attempts to stop the build in case of errors
"""
tsk.process()
if tsk.hasrun != Task.SUCCESS:
self.error_handler(tsk)
def skip(self, tsk):
"""
Mark a task as skipped/up-to-date
"""
tsk.hasrun = Task.SKIPPED
self.mark_finished(tsk)
def cancel(self, tsk):
"""
Mark a task as failed because of unsatisfiable dependencies
"""
tsk.hasrun = Task.CANCELED
self.mark_finished(tsk)
def error_handler(self, tsk):
"""
Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set,
unless the build is executed with::
$ waf build -k
:param tsk: task instance
:type tsk: :py:attr:`waflib.Task.Task`
"""
if not self.bld.keep:
self.stop = True
self.error.append(tsk)
def task_status(self, tsk):
"""
Obtains the task status to decide whether to run it immediately or not.
:return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER`
:rtype: integer
"""
try:
return tsk.runnable_status()
except Exception:
self.processed += 1
tsk.err_msg = traceback.format_exc()
if not self.stop and self.bld.keep:
self.skip(tsk)
if self.bld.keep == 1:
# if -k stop on the first exception, if -kk try to go as far as possible
if Logs.verbose > 1 or not self.error:
self.error.append(tsk)
self.stop = True
else:
if Logs.verbose > 1:
self.error.append(tsk)
return Task.EXCEPTION
tsk.hasrun = Task.EXCEPTION
self.error_handler(tsk)
return Task.EXCEPTION
def start(self):
"""
Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to
:py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread
has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out`
and marks the build as failed by setting the ``stop`` flag.
If only one job is used, then executes the tasks one by one, without consumers.
"""
self.total = self.bld.total()
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next_task()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
continue
if self.stop: # stop immediately after a failure is detected
break
st = self.task_status(tsk)
if st == Task.RUN_ME:
self._add_task(tsk)
elif st == Task.ASK_LATER:
self.postpone(tsk)
elif st == Task.SKIP_ME:
self.processed += 1
self.skip(tsk)
self.add_more_tasks(tsk)
elif st == Task.CANCEL_ME:
# A dependency problem has occurred, and the
# build is most likely run with `waf -k`
if Logs.verbose > 1:
self.error.append(tsk)
self.processed += 1
self.cancel(tsk)
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
self.ready.put(None)
if not self.stop:
assert not self.count
assert not self.postponed
assert not self.incomplete
def prio_and_split(self, tasks):
"""
Label input tasks with priority values, and return a pair containing
the tasks that are ready to run and the tasks that are necessarily
waiting for other tasks to complete.
The priority system is really meant as an optional layer for optimization:
dependency cycles are found quickly, and builds should be more efficient.
A high priority number means that a task is processed first.
This method can be overridden to disable the priority system::
def prio_and_split(self, tasks):
return tasks, []
:return: A pair of task lists
:rtype: tuple
"""
# to disable:
#return tasks, []
for x in tasks:
x.visited = 0
reverse = self.revdeps
groups_done = set()
for x in tasks:
for k in x.run_after:
if isinstance(k, Task.TaskGroup):
if k not in groups_done:
groups_done.add(k)
for j in k.prev:
reverse[j].add(k)
else:
reverse[k].add(x)
# the priority number is not the tree depth
def visit(n):
if isinstance(n, Task.TaskGroup):
return sum(visit(k) for k in n.next)
if n.visited == 0:
n.visited = 1
if n in reverse:
rev = reverse[n]
n.prio_order = n.tree_weight + len(rev) + sum(visit(k) for k in rev)
else:
n.prio_order = n.tree_weight
n.visited = 2
elif n.visited == 1:
raise Errors.WafError('Dependency cycle found!')
return n.prio_order
for x in tasks:
if x.visited != 0:
# must visit all to detect cycles
continue
try:
visit(x)
except Errors.WafError:
self.debug_cycles(tasks, reverse)
ready = []
waiting = []
for x in tasks:
for k in x.run_after:
if not k.hasrun:
waiting.append(x)
break
else:
ready.append(x)
return (ready, waiting)
def debug_cycles(self, tasks, reverse):
tmp = {}
for x in tasks:
tmp[x] = 0
def visit(n, acc):
if isinstance(n, Task.TaskGroup):
for k in n.next:
visit(k, acc)
return
if tmp[n] == 0:
tmp[n] = 1
for k in reverse.get(n, []):
visit(k, [n] + acc)
tmp[n] = 2
elif tmp[n] == 1:
lst = []
for tsk in acc:
lst.append(repr(tsk))
if tsk is n:
# exclude prior nodes, we want the minimum cycle
break
raise Errors.WafError('Task dependency cycle in "run_after" constraints: %s' % ''.join(lst))
for x in tasks:
visit(x, [])
|
kushview/libjuce
|
waflib/Runner.py
|
Python
|
gpl-2.0
| 16,392
|
[
"VisIt"
] |
156ea5eaec59db093c1c27582c6423f4ef65025b073001857b24313d42d938e0
|
#
# File:
# TRANS_panel.py
#
# Synopsis:
# Illustrates how to create a panel plot
#
# Categories:
# contour plot
# panel plot
#
# Author:
# Karin Meier-Fleischer, based on NCL example
#
# Date of initial publication:
# September 2018
#
# Description:
# This example shows how to create a panel plot.
#
# Effects illustrated:
# o Read netCDF data
# o Drawing a contour fill plot
# o Creating a panel plot
#
# Output:
# A single visualization is produced.
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
'''
Transition Guide Python Example: TRANS_panel.py
- Drawing a contour fill plot
- Creating a panel plot
18-09-10 kmf
'''
from __future__ import print_function
import Ngl, Nio
#-- open file and read variables
f = Nio.open_file("../read_data/rectilinear_grid_2D.nc", "r")
var = f.variables["tsurf"]
lat = f.variables["lat"][:]
lon = f.variables["lon"][:]
#-- start the graphics
wks = Ngl.open_wks("png","plot_TRANS_panel_py")
#-- resource settings
res = Ngl.Resources()
res.nglDraw = False #-- don't draw plots
res.nglFrame = False #-- don't advance the frame
res.cnFillOn = True #-- contour fill
res.cnFillPalette = "cmp_b2r" #-- choose color map
res.cnLineLabelsOn = False #-- no line labels
res.lbLabelBarOn = False #-- don't draw a labelbar
res.sfXArray = lon #-- coordinates for the x-axis
res.sfYArray = lat #-- coordinates for the y-axis
#-- create the contour plots
plot = []
for i in range(0,4):
p = Ngl.contour_map(wks,var[i,:,:],res)
plot.append(p)
#-- panel resources
pnlres = Ngl.Resources()
#pnlres.nglDraw = False
pnlres.nglFrame = False
pnlres.nglPanelLabelBar = True #-- common labelbar
pnlres.txString = "TRANS: panel example" #-- panel title
pnlres.txFontHeightF = 0.02 #-- text font size
Ngl.panel(wks,plot[0:4],[2,2],pnlres)
#-- add title string,long_name and units string to panel
txres = Ngl.Resources()
txres.txFontHeightF = 0.020
Ngl.text_ndc(wks,"TRANS: panel example",0.5,0.825,txres)
txres.txFontHeightF = 0.012
Ngl.text_ndc(wks,f.variables["tsurf"].attributes['long_name'],0.12,0.79,txres)
Ngl.text_ndc(wks,f.variables["tsurf"].attributes['units'], 0.975,0.79,txres)
#-- advance the frame
Ngl.frame(wks)
Ngl.end()
|
KMFleischer/PyEarthScience
|
Transition_examples_NCL_to_PyNGL/panel/TRANS_panel.py
|
Python
|
mit
| 2,506
|
[
"NetCDF"
] |
a9a5216764c21789d064cab017aad4c84602a9998ad0fa53d0a9055957847345
|
"""
This module contains classes for representing Member object
For further information visit http://codeforces.com/api/help/objects#Member
"""
from . import BaseJsonObject
__all__ = ['Member']
class Member(BaseJsonObject):
"""
This class represents Member object
For further information visit http://codeforces.com/api/help/objects#Member
"""
def __init__(self, data=None):
self._handle = None
super().__init__(data)
def __repr__(self):
return '<Member: {}>'.format(self.handle)
def load_required_fields_from_dict(self, values):
super().load_required_fields_from_dict(values)
self.handle = values['handle']
@property
def handle(self):
"""
:return: Codeforces user handle or None if not initialized
:rtype: str
"""
return self._handle
@handle.setter
def handle(self, value):
"""
:param value: Codeforces user handle.
:type value: str
"""
assert isinstance(value, str)
self._handle = value
|
soon/CodeforcesAPI
|
codeforces/api/json_objects/member.py
|
Python
|
mit
| 1,075
|
[
"VisIt"
] |
40dca8b9c89e592711b02968cfe0c8910d85e97955c3d14e2764147797bd31ab
|
#!/usr/bin/env python
from __future__ import division
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import os
from unittest import TestCase, main
import tempfile
import h5py
import numpy as np
from future.utils.six import StringIO, BytesIO
from qiita_db.metadata_template import SampleTemplate, PrepTemplate
from qiita_ware.util import (per_sample_sequences, stats_from_df, open_file,
_is_string_or_bytes)
def mock_sequence_iter(items):
return ({'SequenceID': sid, 'Sequence': seq} for sid, seq in items)
class UtilTests(TestCase):
def setUp(self):
np.random.seed(123)
def test_per_sample_sequences_simple(self):
max_seqs = 10
# note, the result here is sorted by sequence_id but is in heap order
# by the random values associated to each sequence
exp = sorted([('b_2', 'AATTGGCC-b2'),
('a_5', 'AATTGGCC-a5'),
('a_1', 'AATTGGCC-a1'),
('a_4', 'AATTGGCC-a4'),
('b_1', 'AATTGGCC-b1'),
('a_3', 'AATTGGCC-a3'),
('c_3', 'AATTGGCC-c3'),
('a_2', 'AATTGGCC-a2'),
('c_2', 'AATTGGCC-c2'),
('c_1', 'AATTGGCC-c1')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs)
self.assertEqual(sorted(obs), exp)
def test_per_sample_sequences_min_seqs(self):
max_seqs = 10
min_seqs = 3
# note, the result here is sorted by sequence_id but is in heap order
# by the random values associated to each sequence
exp = sorted([('a_5', 'AATTGGCC-a5'),
('a_1', 'AATTGGCC-a1'),
('a_4', 'AATTGGCC-a4'),
('a_3', 'AATTGGCC-a3'),
('c_3', 'AATTGGCC-c3'),
('a_2', 'AATTGGCC-a2'),
('c_2', 'AATTGGCC-c2'),
('c_1', 'AATTGGCC-c1')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs,
min_seqs)
self.assertEqual(sorted(obs), exp)
def test_per_sample_sequences_complex(self):
max_seqs = 2
exp = sorted([('b_2', 'AATTGGCC-b2'),
('b_1', 'AATTGGCC-b1'),
('a_2', 'AATTGGCC-a2'),
('a_3', 'AATTGGCC-a3'),
('c_1', 'AATTGGCC-c1'),
('c_2', 'AATTGGCC-c2')])
obs = per_sample_sequences(mock_sequence_iter(sequences), max_seqs)
self.assertEqual(sorted(obs), exp)
def test_stats_from_df(self):
obs = stats_from_df(SampleTemplate(1).to_dataframe())
for k in obs:
self.assertEqual(obs[k], SUMMARY_STATS[k])
def test_dataframe_from_template(self):
template = PrepTemplate(1)
obs = template.to_dataframe()
# 27 samples
self.assertEqual(len(obs), 27)
self.assertTrue(set(obs.index), {
u'SKB1.640202', u'SKB2.640194', u'SKB3.640195', u'SKB4.640189',
u'SKB5.640181', u'SKB6.640176', u'SKB7.640196', u'SKB8.640193',
u'SKB9.640200', u'SKD1.640179', u'SKD2.640178', u'SKD3.640198',
u'SKD4.640185', u'SKD5.640186', u'SKD6.640190', u'SKD7.640191',
u'SKD8.640184', u'SKD9.640182', u'SKM1.640183', u'SKM2.640199',
u'SKM3.640197', u'SKM4.640180', u'SKM5.640177', u'SKM6.640187',
u'SKM7.640188', u'SKM8.640201', u'SKM9.640192'})
self.assertTrue(set(obs.columns), {
u'tot_org_carb', u'common_name', u'has_extracted_data',
u'required_sample_info_status', u'water_content_soil',
u'env_feature', u'assigned_from_geo', u'altitude', u'env_biome',
u'texture', u'has_physical_specimen', u'description_duplicate',
u'physical_location', u'latitude', u'ph', u'host_taxid',
u'elevation', u'description', u'collection_timestamp',
u'taxon_id', u'samp_salinity', u'host_subject_id', u'sample_type',
u'season_environment', u'temp', u'country', u'longitude',
u'tot_nitro', u'depth', u'anonymized_name', u'target_subfragment',
u'sample_center', u'samp_size', u'run_date', u'experiment_center',
u'pcr_primers', u'center_name', u'barcodesequence', u'run_center',
u'run_prefix', u'library_construction_protocol', u'emp_status',
u'linkerprimersequence', u'experiment_design_description',
u'target_gene', u'center_project_name', u'illumina_technology',
u'sequencing_meth', u'platform', u'experiment_title',
u'study_center'})
class TestFilePathOpening(TestCase):
"""Tests adapted from scikit-bio's skbio.io.util tests"""
def test_is_string_or_bytes(self):
self.assertTrue(_is_string_or_bytes('foo'))
self.assertTrue(_is_string_or_bytes(u'foo'))
self.assertTrue(_is_string_or_bytes(b'foo'))
self.assertFalse(_is_string_or_bytes(StringIO('bar')))
self.assertFalse(_is_string_or_bytes([1]))
def test_file_closed(self):
"""File gets closed in decorator"""
f = tempfile.NamedTemporaryFile('r')
filepath = f.name
with open_file(filepath) as fh:
pass
self.assertTrue(fh.closed)
def test_file_closed_harder(self):
"""File gets closed in decorator, even if exceptions happen."""
f = tempfile.NamedTemporaryFile('r')
filepath = f.name
try:
with open_file(filepath) as fh:
raise TypeError
except TypeError:
self.assertTrue(fh.closed)
else:
# If we're here, no exceptions have been raised inside the
# try clause, so the context manager swallowed them. No
# good.
raise Exception("`open_file` didn't propagate exceptions")
def test_filehandle(self):
"""Filehandles slip through untouched"""
with tempfile.TemporaryFile('r') as fh:
with open_file(fh) as ffh:
self.assertTrue(fh is ffh)
# And it doesn't close the file-handle
self.assertFalse(fh.closed)
def test_StringIO(self):
"""StringIO (useful e.g. for testing) slips through."""
f = StringIO("File contents")
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_BytesIO(self):
"""BytesIO (useful e.g. for testing) slips through."""
f = BytesIO(b"File contents")
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO(self):
f = h5py.File('test', driver='core', backing_store=False)
with open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO_open(self):
name = None
with tempfile.NamedTemporaryFile(delete=False) as fh:
name = fh.name
fh.close()
h5file = h5py.File(name, 'w')
h5file.close()
with open_file(name) as fh_inner:
self.assertTrue(isinstance(fh_inner, h5py.File))
os.remove(name)
# comment indicates the expected random value
sequences = [
('a_1', 'AATTGGCC-a1'), # 2, 3624216819017203053
('a_2', 'AATTGGCC-a2'), # 5, 5278339153051796802
('b_1', 'AATTGGCC-b1'), # 4, 4184670734919783522
('b_2', 'AATTGGCC-b2'), # 0, 946590342492863505
('a_4', 'AATTGGCC-a4'), # 3, 4048487933969823850
('a_3', 'AATTGGCC-a3'), # 7, 7804936597957240377
('c_1', 'AATTGGCC-c1'), # 8, 8868534167180302049
('a_5', 'AATTGGCC-a5'), # 1, 3409506807702804593
('c_2', 'AATTGGCC-c2'), # 9, 8871627813779918895
('c_3', 'AATTGGCC-c3') # 6, 7233291490207274528
]
SUMMARY_STATS = {
'altitude': [('0.0', 27)],
'anonymized_name': [('SKB1', 1),
('SKB2', 1),
('SKB3', 1),
('SKB4', 1),
('SKB5', 1),
('SKB6', 1),
('SKB7', 1),
('SKB8', 1),
('SKB9', 1),
('SKD1', 1),
('SKD2', 1),
('SKD3', 1),
('SKD4', 1),
('SKD5', 1),
('SKD6', 1),
('SKD7', 1),
('SKD8', 1),
('SKD9', 1),
('SKM1', 1),
('SKM2', 1),
('SKM3', 1),
('SKM4', 1),
('SKM5', 1),
('SKM6', 1),
('SKM7', 1),
('SKM8', 1),
('SKM9', 1)],
'assigned_from_geo': [('n', 27)],
'barcodesequence': [('AACTCCTGTGGA', 1),
('ACCTCAGTCAAG', 1),
('ACGCACATACAA', 1),
('AGCAGGCACGAA', 1),
('AGCGCTCACATC', 1),
('ATATCGCGATGA', 1),
('ATGGCCTGACTA', 1),
('CATACACGCACC', 1),
('CCACCCAGTAAC', 1),
('CCGATGCCTTGA', 1),
('CCTCGATGCAGT', 1),
('CCTCTGAGAGCT', 1),
('CGAGGTTCTGAT', 1),
('CGCCGGTAATCT', 1),
('CGGCCTAAGTTC', 1),
('CGTAGAGCTCTC', 1),
('CGTGCACAATTG', 1),
('GATAGCACTCGT', 1),
('GCGGACTATTCA', 1),
('GTCCGCAAGTTA', 1),
('TAATGGTCGTAG', 1),
('TAGCGCGAACTT', 1),
('TCGACCAAACAC', 1),
('TGAGTGGTCTGT', 1),
('TGCTACAGACGT', 1),
('TGGTTATGGCAC', 1),
('TTGCACCGTCGA', 1)],
'center_name': [('ANL', 27)],
'center_project_name': [('None', 27)],
'collection_timestamp': [('2011-11-11 13:00:00', 27)],
'common_name': [('rhizosphere metagenome', 9),
('root metagenome', 9),
('soil metagenome', 9)],
'country': [('GAZ:United States of America', 27)],
'data_type_id': [('2', 27)],
'depth': [('0.15', 27)],
'description': [('Cannabis Soil Microbiome', 27)],
'description_duplicate': [('Bucu Rhizo', 3),
('Bucu Roots', 3),
('Bucu bulk', 3),
('Burmese Rhizo', 3),
('Burmese bulk', 3),
('Burmese root', 3),
('Diesel Rhizo', 3),
('Diesel Root', 3),
('Diesel bulk', 3)],
'ebi_study_accession': [('None', 27)],
'ebi_submission_accession': [('None', 27)],
'elevation': [('114.0', 27)],
'emp_status': [('EMP', 27)],
'env_biome': [('ENVO:Temperate grasslands, savannas, and shrubland biome',
27)],
'env_feature': [('ENVO:plant-associated habitat', 27)],
'experiment_center': [('ANL', 27)],
'experiment_design_description': [('micro biome of soil and rhizosphere '
'of cannabis plants from CA', 27)],
'experiment_title': [('Cannabis Soil Microbiome', 27)],
'has_extracted_data': [('True', 27)],
'has_physical_specimen': [('True', 27)],
'host_subject_id': [('1001:B1', 1),
('1001:B2', 1),
('1001:B3', 1),
('1001:B4', 1),
('1001:B5', 1),
('1001:B6', 1),
('1001:B7', 1),
('1001:B8', 1),
('1001:B9', 1),
('1001:D1', 1),
('1001:D2', 1),
('1001:D3', 1),
('1001:D4', 1),
('1001:D5', 1),
('1001:D6', 1),
('1001:D7', 1),
('1001:D8', 1),
('1001:D9', 1),
('1001:M1', 1),
('1001:M2', 1),
('1001:M3', 1),
('1001:M4', 1),
('1001:M5', 1),
('1001:M6', 1),
('1001:M7', 1),
('1001:M8', 1),
('1001:M9', 1)],
'host_taxid': [('3483', 27)],
'illumina_technology': [('MiSeq', 27)],
'latitude': [('0.291867635913', 1),
('3.21190859967', 1),
('4.59216095574', 1),
('10.6655599093', 1),
('12.6245524972', 1),
('12.7065957714', 1),
('13.089194595', 1),
('23.1218032799', 1),
('29.1499460692', 1),
('31.7167821863', 1),
('35.2374368957', 1),
('38.2627021402', 1),
('40.8623799474', 1),
('43.9614715197', 1),
('44.9725384282', 1),
('53.5050692395', 1),
('57.571893782', 1),
('60.1102854322', 1),
('63.6505562766', 1),
('68.0991287718', 1),
('68.51099627', 1),
('74.0894932572', 1),
('78.3634273709', 1),
('82.8302905615', 1),
('84.0030227585', 1),
('85.4121476399', 1),
('95.2060749748', 1)],
'library_construction_protocol': [('This analysis was done as in Caporaso '
'et al 2011 Genome research. The PCR '
'primers (F515/R806) were developed '
'against the V4 region of the 16S rRNA '
'(both bacteria and archaea), which we '
'determined would yield optimal '
'community clustering with reads of '
'this length using a procedure '
'similar to that of ref. 15. [For '
'reference, this primer pair amplifies '
'the region 533_786 in the Escherichia '
'coli strain 83972 sequence '
'(greengenes accession no. '
'prokMSA_id:470367).] The reverse PCR '
'primer is barcoded with a 12-base '
'error-correcting Golay code to '
'facilitate multiplexing of up '
'to 1,500 samples per lane, and both '
'PCR primers contain sequencer adapter '
'regions.', 27)],
'linkerprimersequence': [('GTGCCAGCMGCCGCGGTAA', 27)],
'longitude': [
('2.35063674718', 1),
('3.48274264219', 1),
('6.66444220187', 1),
('15.6526750776', 1),
('26.8138925876', 1),
('27.3592668624', 1),
('31.2003474585', 1),
('31.6056761814', 1),
('32.5563076447', 1),
('34.8360987059', 1),
('42.838497795', 1),
('63.5115213108', 1),
('65.3283470202', 1),
('66.1920014699', 1),
('66.8954849864', 1),
('68.5041623253', 1),
('68.5945325743', 1),
('70.784770579', 1),
('74.423907894', 1),
('74.7123248382', 1),
('82.1270418227', 1),
('82.8516734159', 1),
('84.9722975792', 1),
('86.3615778099', 1),
('92.5274472082', 1),
('95.5088566087', 1),
('96.0693176066', 1)],
'pcr_primers': [('FWD:GTGCCAGCMGCCGCGGTAA; REV:GGACTACHVGGGTWTCTAAT', 27)],
'ph': [('6.8', 9), ('6.82', 10), ('6.94', 8)],
'physical_location': [('ANL', 27)],
'platform': [('Illumina', 27)],
'required_sample_info_status': [('completed', 27)],
'run_center': [('ANL', 27)],
'run_date': [('8/1/12', 27)],
'run_prefix': [('s_G1_L001_sequences', 27)],
'samp_salinity': [('7.1', 9), ('7.15', 9), ('7.44', 9)],
'samp_size': [('.25,g', 27)],
'sample_center': [('ANL', 27)],
'sample_type': [('ENVO:soil', 27)],
'season_environment': [('winter', 27)],
'sequencing_meth': [('Sequencing by synthesis', 27)],
'study_center': [('CCME', 27)],
'target_gene': [('16S rRNA', 27)],
'target_subfragment': [('V4', 27)],
'taxon_id': [('410658', 9), ('939928', 9), ('1118232', 9)],
'temp': [('15.0', 27)],
'texture': [('63.1 sand, 17.7 silt, 19.2 clay', 9),
('64.6 sand, 17.6 silt, 17.8 clay', 9),
('66 sand, 16.3 silt, 17.7 clay', 9)],
'tot_nitro': [('1.3', 9), ('1.41', 9), ('1.51', 9)],
'tot_org_carb': [('3.31', 9), ('4.32', 9), ('5.0', 9)],
'water_content_soil': [('0.101', 9), ('0.164', 9), ('0.178', 9)]}
if __name__ == '__main__':
main()
|
RNAer/qiita
|
qiita_ware/test/test_util.py
|
Python
|
bsd-3-clause
| 17,827
|
[
"scikit-bio"
] |
079b2410c7750fad96f22d4eb83c9875b2d38e9436d4d6c4b2431258379923bd
|
################################################################################
# Copyright Adam J. Jackson (2015) #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import numpy as np
from scipy.interpolate import interp1d, interp2d
from numpy import genfromtxt
import re
def get_potential_aims(file,property):
"""Thermodynamic property interpolation function. Requires phonopy-FHI-aims output file.
Cv in kB/cell. All other properties in eV/cell
"""
data = genfromtxt(file)
T = data[:,0]
if property in ('Cv','Cp','heat_capacity','C'):
potential = data[:,3]
elif property in ('U','internal_energy'):
potential = data[:,2]
elif property in ('F','A','Helmholtz','free_energy'):
potential = data[:,1]
elif property in ('TS'):
potential = -data[:,4]
elif property in ('S','Entropy','entropy'):
potential = -data[:,4]/T
else:
raise RuntimeError('Property not found')
thefunction = interp1d(T,potential,kind='linear')
return thefunction
def get_potential_nist_table(file, property):
"""Thermodynamic property interpolation function. Requires NIST-JANAF table. All properties in J, mol and K"""
data = genfromtxt(file,skip_header=2)
T = data[:,0]
if property in ('Cp','C','heat_capacity'):
potential = data[:,1]
elif property in ('S','entropy'):
potential = data[:,2]
elif property in ('H','enthalpy'):
potential = (data[:,4] - data[0,4])*1E3
elif property in ('U','internal_energy'):
# U = H - PV; for ideal gas molar PV = RT so U = H - RT
from scipy.constants import R as R
potential = (data[:,4] - data[0,4])*1E3 - R*data[:,0]
elif property in ('DH','Delta_H','standard_enthalpy_change'):
potential = data[:,4]*1E3
else:
raise RuntimeError('Property not found')
thefunction = interp1d(T,potential,kind='cubic')
return thefunction
def get_potential_sulfur_table(filename):
"""
Read thermodynamic property as function of T, P from datafile.
Datafile should be generated by the code at http://github.com/WMD-bath/sulfur-model
or follow the same format
"""
# Import chemical potential in J mol-1 vs T, P from file
data = genfromtxt(filename, comments='#',delimiter=',')
T = data[:,0].flatten()
with open(filename,'r') as f:
header=f.readline()
P = [float(p) for p in re.findall(r'\d+.\d+',header)]
thefunction = interp2d(T,np.log(P),data[:,1:].transpose(), kind='cubic')
def lin_P_function(T,P):
return thefunction(T,np.log(P))
return lin_P_function
|
WMD-Bath/CZTS-model
|
interpolate_thermal_property.py
|
Python
|
gpl-3.0
| 3,788
|
[
"FHI-aims",
"phonopy"
] |
6cf693b0c42dfb19fcd96013938f2a843c2f50f440c682452dc9fe295f6c46d9
|
from __future__ import print_function
from time import time
from os import listdir
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
import numpy as np
import glob
import scipy.io
"""
Custom TFIDF NMF featurizer for the landmark classification dataset.
Built during HackMIT 2015.
Author: Can Koc <cankoc@berkeley.edu>, Brian Su <bsu@berkeley.edu>, Cem Koc <cemkoc@berkeley.edu>
Connects to Clarifai API to extract textual corpus from landmark images using Deep Learning Image Classification methods.
License: BSD
"""
NUM_TRAINING_EXAMPLES = 2620
NUM_TEST_EXAMPLES = 1
GOLDEN_GATE_DIR = "client/ggb_text/"
STONEHENGE_DIR = "client/stonehenge_text/"
EIFFEL_DIR = "client/eiffel_text/"
ROME_DIR = "client/rome_text/"
TEST_DIR = "client/test/"
n_samples = NUM_TRAINING_EXAMPLES + NUM_TEST_EXAMPLES
n_features = 1000
n_topics = 20
n_top_words = 1000
def vectorize_and_featurize(all_paths):
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
documents = []
counter = 0
for path in all_paths:
if counter == n_samples:
break
counter += 1
with open(path, 'r') as f:
try:
content = f.read().lower()
text_string = ''
for c in content:
if c in '+abcdefghijklmnopqrstuvwxyz ':
text_string += c
else:
text_string += ''
content = text_string.encode(errors='ignore').strip()
documents.append(content)
except Exception as e:
print(e)
continue
# print(documents)
# Pre-processing is done
tfidf = vectorizer.fit_transform(documents)
print("done in %0.3fs." % (time() - t0))
#Fit NMF Model
print("Fitting the NMF model with n_samples=%d and n_features=%d..." % (n_samples, n_features))
# Using fit_transform method because in the end I want to get the data matrix
nmf = NMF(n_components=n_topics, random_state=1).fit_transform(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
print()
print("NMF Data Matrix")
print(nmf.shape)
return nmf
def run():
ggb_filenames = glob.glob(GOLDEN_GATE_DIR + '*.txt')
stonehenge_filenames = glob.glob(STONEHENGE_DIR + '*.txt')
eiffel_filenames = glob.glob(EIFFEL_DIR + '*.txt')
rome_filenames = glob.glob(ROME_DIR + '*.txt')
test_filenames = [TEST_DIR + str(x) + '.txt' for x in range(NUM_TEST_EXAMPLES)]
giant_document_paths = ggb_filenames + stonehenge_filenames + eiffel_filenames + rome_filenames
giant_document_paths += test_filenames
all_features = vectorize_and_featurize(giant_document_paths)
indices = list(xrange(NUM_TRAINING_EXAMPLES + NUM_TEST_EXAMPLES))
training_indices = indices[0:NUM_TRAINING_EXAMPLES]
test_indices = indices[NUM_TRAINING_EXAMPLES:]
X = all_features[np.array(training_indices), :]
test_features = all_features[np.array(test_indices), :]
Y = [0]*len(ggb_filenames) + [1]*len(stonehenge_filenames) + [2]*len(eiffel_filenames) + [3]*len(rome_filenames)
file_dict = {}
file_dict['training_data'] = X
file_dict['training_labels'] = Y
file_dict['test_data'] = test_features
print("Printing Final File Sizes")
print("X is: {0} | Y is: {1} | Test Size is: {2}\n".format(len(X), len(Y), len(test_features)))
scipy.io.savemat('client/landmark_data.mat', file_dict)
if __name__ == '__main__':
run()
|
briansudo/Atlas
|
client/nmf_featurize.py
|
Python
|
apache-2.0
| 3,729
|
[
"Brian"
] |
04d4fe0fe546c40dbd832bdb246785d0ac2c4c231059b8644abbf42424a91c4f
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import re
from ast import BinOp, Mod, parse
from six import text_type
import click
from sanity_utils import find_files, XNodeVisitor
encoding_comment_regexp = re.compile(r'^#.+coding[=:]\s*([-\w.]+).+$', re.MULTILINE | re.I)
class StringVisitor(XNodeVisitor):
def __init__(self):
self.texts = set()
self.formattees = set()
def visit_Str(self, node, parents): # noqa (N802)
s = text_type(node.s)
is_being_formatted = (parents and isinstance(parents[-1], BinOp) and isinstance(parents[-1].op, Mod))
if is_being_formatted:
self.formattees.add(s)
return
if not ("\n" in s or s.islower() or s.isupper()): # Doesn't look like a constant or docstring
if " " in s.strip(): # Has spaces, that's texty
if "%" in s or not all(32 <= ord(c) < 127 for c in s): # Has a formatting character or is non-ascii
self.texts.add(s)
def get_stats(self):
stat_bits = []
if self.texts:
stat_bits.append("%d text-like strings" % len(self.texts))
if self.formattees:
stat_bits.append("%d formattee strings" % len(self.formattees))
return ", ".join(stat_bits)
def needs_fix(self):
return bool(self.texts or self.formattees)
def process_file(path):
sv = StringVisitor()
with open(path, "rb") as fp:
source = fp.read()
if b"unicode_literals" not in source:
sv.visit(parse(source, path))
return sv
def fix_file(path):
with open(path, "rb") as fp:
source = fp.read().decode("utf-8")
source_lines = source.splitlines()
need_encoding_comment = any(ord(c) > 127 for c in source)
first_non_comment_line_index = 0
for line_index, line in enumerate(source_lines):
if not line.strip():
continue
if encoding_comment_regexp.match(line):
need_encoding_comment = False
if not line.startswith("#"):
first_non_comment_line_index = line_index
break
if "from __future__ import unicode_literals" not in source:
source_lines.insert(first_non_comment_line_index, "from __future__ import unicode_literals")
source = "\n".join(source_lines)
if need_encoding_comment:
source = "# -*- coding: utf-8 -*-\n" + source
with open(path, "wb") as fp:
fp.write(source.encode("utf-8"))
fp.write(b"\n")
def gather_files(dirnames, filenames):
files_to_process = []
files_to_process.extend(filename for filename in filenames if filename.endswith(".py"))
files_to_process.extend(find_files(dirnames, allowed_extensions=(".py",)))
return files_to_process
@click.command()
@click.option("-f", "--file", "filenames", type=click.Path(exists=True, dir_okay=False), multiple=True)
@click.option("-d", "--dir", "dirnames", type=click.Path(exists=True, file_okay=False), multiple=True)
@click.option('--fix/--no-fix', default=False)
def command(filenames, dirnames, fix):
for filename in gather_files(dirnames, filenames):
visitor = process_file(filename)
if visitor.needs_fix():
print("%s: %s" % (filename, visitor.get_stats()))
if fix:
print("Fixing: %s" % filename)
fix_file(filename)
if __name__ == "__main__":
command()
|
suutari/shoop
|
_misc/ensure_unicode_literals.py
|
Python
|
agpl-3.0
| 3,609
|
[
"VisIt"
] |
5b6d3f80ded4d67e7897169d8cdb3aa696d895e4d37f6bc9a5309018287c02df
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for Latent Semantic Analysis (aka Latent Semantic Indexing) in Python.
Implements scalable truncated Singular Value Decomposition in Python. The SVD
decomposition can be updated with new observations at any time (online, incremental,
memory-efficient training).
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size (though still dependent on the feature set size)
* corpora that are streamed: documents are only accessed sequentially, no
random-access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock `performance on the English Wikipedia <http://radimrehurek.com/gensim/wiki.html>`_
(2G corpus positions, 3.2M documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import sys
import numpy
import scipy.linalg
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
from six import iterkeys
from six.moves import xrange
logger = logging.getLogger('gensim.models.lsimodel')
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""
Given eigenvalues `s`, return how many factors should be kept to avoid
storing spurious (tiny, numerically instable) values.
This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).
The returned value is clipped against `k` (= never return more than `k`).
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)" %
(k, 100 * rel_spectrum[k - 1]))
return k
def asfarray(a, name=''):
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order" % (a.shape, name))
a = numpy.asfortranarray(a)
return a
def ascarray(a, name=''):
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order" % (a.shape, name))
a = numpy.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
"""
Construct the (U, S) projection from a corpus `docs`. The projection can
be later updated by merging it with another Projection via `self.merge()`.
This is the class taking care of the 'core math'; interfacing with corpora,
splitting large corpora into chunks and merging them etc. is done through
the higher-level `LsiModel` class.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(docs, k, chunksize=sys.maxsize,
num_terms=m, power_iters=self.power_iters,
extra_dims=self.extra_dims)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix" % str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30) # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
u = ut.T
del ut, vt
k = clip_spectrum(s**2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""
Merge this Projection with another.
The content of `other` is destroyed in the process, so pass this function a
copy of `other` if you need it further.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError("vector space mismatch: update is using %s features, expected %s" %
(other.m, self.m))
logger.info("merging projections: %s + %s" % (str(self.u.shape), str(other.u.shape)))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = numpy.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= numpy.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = numpy.bmat([[numpy.diag(decay * self.s), numpy.multiply(c, other.s)],
[matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1), numpy.multiply(r, other.s)]])
logger.debug("computing SVD of %s dense matrix" % str(k.shape))
try:
# in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of numpy, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/numpy-discussion@scipy.org/msg07224.html and
# bug ticket http://projects.scipy.org/numpy/ticket/706
# sdoering: replaced numpy's linalg.svd with scipy's linalg.svd:
u_k, s_k, _ = scipy.linalg.svd(k, full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :( //sdoering: maybe there is one in scipy?
except scipy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
u_k, s_k, _ = scipy.linalg.svd(numpy.dot(k, k.T), full_matrices=False) # if this fails too, give up with an exception
s_k = numpy.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k**2, self.k)
u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = numpy.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = numpy.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in xrange(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
# diff = numpy.dot(self.u.T, self.u) - numpy.eye(self.u.shape[1])
# logger.info('orth error after=%f' % numpy.sum(diff * diff))
#endclass Projection
class LsiModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Latent
Semantic Indexing (also known as Latent Semantic Analysis).
The main methods are:
1. constructor, which initializes the projection into latent topics space,
2. the ``[]`` method, which returns representation of any input document in the
latent space,
3. `add_documents()` for incrementally updating the model with new documents.
The left singular vectors are stored in `lsi.projection.u`, singular values
in `lsi.projection.s`. Right singular vectors can be reconstructed from the output
of `lsi[training_corpus]`, if needed. See also FAQ [2]_.
Model persistency is achieved via its load/save methods.
.. [2] https://github.com/piskvorky/gensim/wiki/Recipes-&-FAQ#q4-how-do-you-output-the-u-s-vt-matrices-of-lsi
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
"""
`num_topics` is the number of requested factors (latent dimensions).
After the model has been trained, you can estimate topics for an
arbitrary, unseen document, using the ``topics = self[document]`` dictionary
notation. You can also add new training documents, with ``self.add_documents``,
so that training can be stopped and resumed at any time, and the
LSI transformation is available at any point.
If you specify a `corpus`, it will be used to train the model. See the
method `add_documents` for a description of the `chunksize` and `decay` parameters.
Turn `onepass` off to force a multi-pass stochastic algorithm.
`power_iters` and `extra_samples` affect the accuracy of the stochastic
multi-pass algorithm, which is used either internally (`onepass=True`) or
as the front-end algorithm (`onepass=False`). Increasing the number of
power iterations improves accuracy, but lowers performance. See [3]_ for
some hard numbers.
Turn on `distributed` to enable distributed computing.
Example:
>>> lsi = LsiModel(corpus, num_topics=10)
>>> print(lsi[doc_tfidf]) # project some document into LSI space
>>> lsi.add_documents(corpus2) # update LSI on additional documents
>>> print(lsi[doc_tfidf])
.. [3] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
self.docs_processed = 0
self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError("distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized.")
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
dispatcher._pyroOneway.add("exit")
logger.debug("looking for dispatcher at %s" % str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples,
distributed=False, onepass=onepass)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers" % self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)" % err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""
Update singular value decomposition to take into account a new
corpus of documents.
Training proceeds in chunks of `chunksize` documents at a time. The size of
`chunksize` is a tradeoff between increased speed (bigger `chunksize`)
vs. lower memory footprint (smaller `chunksize`). If the distributed mode
is on, each chunk is sent to a different worker/computer.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the
input document stream, by giving less emphasis to old observations. This allows
LSA to gradually "forget" old observations (documents) and give more
preference to new ones.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None)
update.u, update.s = stochastic_svd(corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
else:
# the one-pass algo
doc_no = 0
if self.dispatcher:
logger.info('initializing %s workers' % self.numworkers)
self.dispatcher.reset()
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i" % chunk_no)
self.dispatcher.putjob(job) # put job into queue; this will eventually block, because the queue has a small finite size
del job
logger.info("dispatched documents up to #%s" % doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples, power_iters=self.power_iters)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s" % doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
# logger.info("top topics after adding %i documents" % doc_no)
# self.print_debug(10)
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
assert self.onepass, "distributed two-pass algo not supported yet"
update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents" % (corpus.shape[1]))
def __str__(self):
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.decay, self.chunksize)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""
Return latent representation, as a list of (topic_id, topic_value) 2-tuples.
This is done by folding input document into the latent topic space.
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform `chunksize` documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# `chunksize` smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
# convert input to scipy.sparse CSC, then do "sparse * dense = dense" multiplication
vec = matutils.corpus2csc(bow, num_terms=self.num_terms, dtype=self.projection.u.dtype)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
# # convert input to dense, then do dense * dense multiplication
# # ± same performance as above (BLAS dense * dense is better optimized than scipy.sparse), but consumes more memory
# vec = matutils.corpus2dense(bow, num_terms=self.num_terms, num_docs=len(bow))
# topic_dist = numpy.dot(self.projection.u[:, :self.num_topics].T, vec)
# # use numpy's advanced indexing to simulate sparse * dense
# # ± same speed again
# u = self.projection.u[:, :self.num_topics]
# topic_dist = numpy.empty((u.shape[1], len(bow)), dtype=u.dtype)
# for vecno, vec in enumerate(bow):
# indices, data = zip(*vec) if vec else ([], [])
# topic_dist[:, vecno] = numpy.dot(u.take(indices, axis=0).T, numpy.array(data, dtype=u.dtype))
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a numpy array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist.flat)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def show_topic(self, topicno, topn=10):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.print_topic(10, topn=5)
'-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + -0.174 * "functor" + -0.168 * "operator"'
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = numpy.asarray(self.projection.u.T[topicno, :]).flatten()
norm = numpy.sqrt(numpy.sum(numpy.dot(c, c)))
most = numpy.abs(c).argsort()[::-1][:topn]
return [(1.0 * c[val] / norm, self.id2word[val]) for val in most]
def print_topic(self, topicno, topn=10):
return ' + '.join(['%.3f*"%s"' % v for v in self.show_topic(topicno, topn)])
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""
Return `num_topics` most significant topics (return all by default).
For each topic, show `num_words` most significant words (10 words by default).
The topics are returned as a list -- a list of strings if `formatted` is
True, or a list of (weight, word) 2-tuples if False.
If `log` is True, also output this result to log.
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in xrange(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i(%.3f): %s" %
(i, self.projection.s[i],
topic))
return shown
def print_topics(self, num_topics=5, num_words=10):
"""Alias for `show_topics()` which prints the top 5 topics to log."""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def print_debug(self, num_topics=5, num_words=10):
"""
Print (to log) the most salient words of the first `num_topics` topics.
Unlike `print_topics()`, this looks for words that are significant for a
particular topic *and* not for others. This *should* result in a more
human-interpretable description of topics.
"""
# only wrap the module-level fnc
print_debug(self.id2word, self.projection.u, self.projection.s,
range(min(num_topics, len(self.projection.u.T))),
num_words=num_words)
def save(self, fname, *args, **kwargs):
"""
Save the model to file.
Large internal arrays may be stored into separate files, with `fname` as prefix.
"""
if self.projection is not None:
self.projection.save(fname + '.projection', *args, **kwargs)
super(LsiModel, self).save(fname, *args, ignore=['projection', 'dispatcher'], **kwargs)
@classmethod
def load(cls, fname, *args, **kwargs):
"""
Load a previously saved object from file (also see `save`).
Large arrays are mmap'ed back as read-only (shared memory).
"""
kwargs['mmap'] = kwargs.get('mmap', 'r')
result = super(LsiModel, cls).load(fname, *args, **kwargs)
try:
result.projection = super(LsiModel, cls).load(fname + '.projection', *args, **kwargs)
except Exception as e:
logging.warning("failed to load projection from %s: %s" % (fname + '.state', e))
return result
#endclass LsiModel
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics' % len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = numpy.abs(numpy.asarray(uvec).flatten())
udiff = uvec / numpy.sqrt(numpy.sum(numpy.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words" % (num_words, num_neg))
for topic in sorted(iterkeys(result)):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s' % (topic, s[topic], ', '.join(pos), ', '.join(neg)))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=numpy.float64, eps=1e-6):
"""
Run truncated Singular Value Decomposition (SVD) on a sparse input.
Return (U, S): the left singular vectors and the singular values of the input
data stream `corpus` [4]_. The corpus may be larger than RAM (iterator of vectors).
This may return less than the requested number of top `rank` factors, in case
the input itself is of lower rank. The `extra_dims` (oversampling) and especially
`power_iters` (power iterations) parameters affect accuracy of the decomposition.
This algorithm uses `2+power_iters` passes over the input data. In case you can only
afford a single pass, set `onepass=True` in :class:`LsiModel` and avoid using
this function directly.
The decomposition algorithm is based on
**Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
.. [4] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
corpus fits into core memory and a different (more efficient) code path is chosen.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations" % (samples - rank, power_iters))
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix" % str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix" % str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations" % power_iters)
for power_iter in xrange(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i' % (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel())
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in xrange(power_iters):
logger.info("running power iteration #%i" % (power_iter + 1))
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
u, s, vt = scipy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
logger.info("2nd phase: constructing %s covariance matrix" % str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix" % str(x.shape))
u, s, vt = scipy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
s = numpy.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s**2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = numpy.dot(q, u)
return u.astype(dtype), s.astype(dtype)
|
ashhher3/gensim
|
gensim/models/lsimodel.py
|
Python
|
gpl-3.0
| 34,370
|
[
"Gaussian"
] |
f6a2a187edb1095224ed9835f1dbd7e7b0244cdec27eb1d12e60440fcd51de68
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
********************************************************
**espressopp.interaction.LennardJonesGeneric**
********************************************************
This class provides methods to compute forces and energies of
a generic Lennard Jones potential with arbitrary integers a and b.
.. math::
V(r) = 4 \varepsilon \left[ \left( \frac{\sigma}{r} \right)^{a} -
\left( \frac{\sigma}{r} \right)^{b} \right]
.. function:: espressopp.interaction.LennardJonesGeneric(epsilon, sigma, a, b, cutoff, shift)
:param epsilon: (default: 1.0)
:param sigma: (default: 1.0)
:param a: (default: 12)
:param b: (default: 6)
:param cutoff: (default: infinity)
:param shift: (default: "auto")
:type epsilon: real
:type sigma: real
:type a: int
:type b: int
:type cutoff:
:type shift:
.. function:: espressopp.interaction.VerletListLennardJonesGeneric(vl)
:param vl:
:type vl:
.. function:: espressopp.interaction.VerletListLennardJonesGeneric.getPotential(type1, type2)
:param type1:
:param type2:
:type type1:
:type type2:
:rtype:
.. function:: espressopp.interaction.VerletListLennardJonesGeneric.getVerletList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.VerletListLennardJonesGeneric.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressLennardJonesGeneric(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListAdressLennardJonesGeneric.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressLennardJonesGeneric.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressLennardJonesGeneric2(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListAdressLennardJonesGeneric2.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListAdressLennardJonesGeneric2.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressLennardJonesGeneric(vl, fixedtupleList)
:param vl:
:param fixedtupleList:
:type vl:
:type fixedtupleList:
.. function:: espressopp.interaction.VerletListHadressLennardJonesGeneric.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressLennardJonesGeneric.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressLennardJonesGeneric2(vl, fixedtupleList, KTI)
:param vl:
:param fixedtupleList:
:param KTI: (default: False)
:type vl:
:type fixedtupleList:
:type KTI:
.. function:: espressopp.interaction.VerletListHadressLennardJonesGeneric2.setPotentialAT(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.VerletListHadressLennardJonesGeneric2.setPotentialCG(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.CellListLennardJonesGeneric(stor)
:param stor:
:type stor:
.. function:: espressopp.interaction.CellListLennardJonesGeneric.setPotential(type1, type2, potential)
:param type1:
:param type2:
:param potential:
:type type1:
:type type2:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesGeneric(system, vl, potential)
:param system:
:param vl:
:param potential:
:type system:
:type vl:
:type potential:
.. function:: espressopp.interaction.FixedPairListLennardJonesGeneric.getFixedPairList()
:rtype: A Python list of lists.
.. function:: espressopp.interaction.FixedPairListLennardJonesGeneric.getPotential()
:rtype:
.. function:: espressopp.interaction.FixedPairListLennardJonesGeneric.setFixedPairList(fixedpairlist)
:param fixedpairlist:
:type fixedpairlist:
.. function:: espressopp.interaction.FixedPairListLennardJonesGeneric.setPotential(potential)
:param potential:
:type potential:
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_LennardJonesGeneric, \
interaction_VerletListLennardJonesGeneric, \
interaction_VerletListAdressLennardJonesGeneric, \
interaction_VerletListAdressLennardJonesGeneric2, \
interaction_VerletListHadressLennardJonesGeneric, \
interaction_VerletListHadressLennardJonesGeneric2, \
interaction_CellListLennardJonesGeneric, \
interaction_FixedPairListLennardJonesGeneric
class LennardJonesGenericLocal(PotentialLocal, interaction_LennardJonesGeneric):
def __init__(self, epsilon=1.0, sigma=1.0, a=12, b=6,
cutoff=infinity, shift="auto"):
"""Initialize the local generic Lennard Jones object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift =="auto":
print "here without shift"
cxxinit(self, interaction_LennardJonesGeneric,
epsilon, sigma, a, b, cutoff)
else:
print "here with shift"
cxxinit(self, interaction_LennardJonesGeneric,
epsilon, sigma, a, b, cutoff, shift)
class VerletListLennardJonesGenericLocal(InteractionLocal, interaction_VerletListLennardJonesGeneric):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListLennardJonesGeneric, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
class VerletListAdressLennardJonesGenericLocal(InteractionLocal, interaction_VerletListAdressLennardJonesGeneric):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressLennardJonesGeneric, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListAdressLennardJonesGeneric2Local(InteractionLocal, interaction_VerletListAdressLennardJonesGeneric2):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListAdressLennardJonesGeneric2, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressLennardJonesGenericLocal(InteractionLocal, interaction_VerletListHadressLennardJonesGeneric):
def __init__(self, vl, fixedtupleList):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressLennardJonesGeneric, vl, fixedtupleList)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class VerletListHadressLennardJonesGeneric2Local(InteractionLocal, interaction_VerletListHadressLennardJonesGeneric2):
def __init__(self, vl, fixedtupleList, KTI = False):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListHadressLennardJonesGeneric2, vl, fixedtupleList, KTI)
def setPotentialAT(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialAT(self, type1, type2, potential)
def setPotentialCG(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotentialCG(self, type1, type2, potential)
class CellListLennardJonesGenericLocal(InteractionLocal, interaction_CellListLennardJonesGeneric):
def __init__(self, stor):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_CellListLennardJonesGeneric, stor)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
class FixedPairListLennardJonesGenericLocal(InteractionLocal, interaction_FixedPairListLennardJonesGeneric):
def __init__(self, system, vl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListLennardJonesGeneric, system, vl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self)
def setFixedPairList(self, fixedpairlist):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fixedpairlist)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class LennardJonesGeneric(Potential):
'The generic Lennard-Jones potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.LennardJonesGenericLocal',
pmiproperty = ['epsilon', 'sigma', 'a', 'b']
)
class VerletListLennardJonesGeneric(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListLennardJonesGenericLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
class VerletListAdressLennardJonesGeneric(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressLennardJonesGenericLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListAdressLennardJonesGeneric2(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListAdressLennardJonesGeneric2Local',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressLennardJonesGeneric(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressLennardJonesGenericLocal',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class VerletListHadressLennardJonesGeneric2(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListHadressLennardJonesGeneric2Local',
pmicall = ['setPotentialAT', 'setPotentialCG']
)
class CellListLennardJonesGeneric(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.CellListLennardJonesGenericLocal',
pmicall = ['setPotential']
)
class FixedPairListLennardJonesGeneric(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListLennardJonesGenericLocal',
pmicall = ['getPotential', 'setPotential', 'setFixedPairList','getFixedPairList' ]
)
|
capoe/espressopp.soap
|
src/interaction/LennardJonesGeneric.py
|
Python
|
gpl-3.0
| 15,841
|
[
"ESPResSo"
] |
87b6e50cc367b54dfcfead09b6c557d576a8703721255ba73dcd8f09baf25515
|
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python
# ----------------------------------------
# USAGE:
# ./hbond.analysis.py pdb_file trajectory_location start_traj end_traj system_descriptor
# ----------------------------------------
# PREAMBLE:
import numpy as np
import sys
import os
import MDAnalysis
import MDAnalysis.core.AtomGroup
import MDAnalysis.analysis.hbonds
import MDAnalysis.coordinates.base
from sel_list import *
# VARIABLE DECLARATION:
pdb = sys.argv[1] # point to a pdb or prmtop or psf file (untested for both prmtop and psf files)
traj_loc = sys.argv[2] # point to the location of the trajectory files
start_traj = int(sys.argv[3])
end_traj = int(sys.argv[4])
system = sys.argv[5]
nSel = len(sel)
flush = sys.stdout.flush
# ----------------------------------------
# SUBROUTINES:
def ffprint(string):
print '%s' %(string)
flush()
def summary(nSteps):
sum_file = open('%s.hbond.summary' %(system),'w')
sum_file.write('Using MDAnalysis version: %s\n' %(MDAnalysis.version.__version__))
sum_file.write('To recreate this analysis, run this line in terminal:\n .hbond.analysis.py pdb_file trajectory_location start_traj end_traj system_descriptor')
sum_file.write('\n\n')
sum_file.write('Progress output is written to:\n ')
sum_file.write('\nTotal number of steps analyzed: %d\n' %(nSteps))
sum_file.write('\nAtom Selections analyzed:n')
for i in range(nSel):
sum_file.write('%02d %s %s\n' %(i,sel[i][1],sel[i][2],sel[i][3]))
sum_file.close()
# ----------------------------------------
# MAIN:
#
# INITIALIZING UNIVERSE
u = MDAnalysis.Universe(pdb)
h_list = []
for i in range(nSel):
if sel[i][0] != sel[i-1][0] or i == 0:
os.mkdir('%s' %(sel[i][0]))
h = MDAnalysis.analysis.hbonds.HydrogenBondAnalysis(u, selection1=sel[i][2], selection2=sel[i][3], selection1_type='donor', update_selection1=False, update_selection2=False, detect_hydrogens='distance', start=None, stop=None, step=None, distance=3.0, angle=120.0, donors=None, acceptors=['O1P','O2P','O3P'])
h_list.append(h)
# BEGINNING TO ANALYZE TRAJECTORIES
nSteps = 0
count = 0
while start_traj <= end_traj:
ffprint('Loading trajectory %s' %(start_traj))
u.load_new('%sproduction.%s/production.%s.dcd' %(traj_loc,start_traj,start_traj))
# Loop through all the trajectories to calculate 'Time (ns)' from each frame continuously
t0 = nSteps * 0.002
t = [t0 + (ts.frame-1) * 0.002 for ts in u.trajectory]
nSteps += len(u.trajectory)
# Loop through all residue pair selections, calculate the Hydrogen Bond distances and angles for all pairs
# Write time (ns), distance, and angle into an output file named: system.sel[i][1].results.dat
for i in range(nSel):
os.chdir('%s' %(sel[i][0]))
out1 = open('%s.%s.results.dat' %(system,sel[i][1]), 'a')
h_list[i].run()
htimeseries = h_list[i].timeseries
# Generate table needed to write out all information about hbond analysis
# h_list[i].generate_table()
# htable = h_list[i].table
for j in range(len(u.trajectory)):
if len(htimeseries[j]) != 0:
for k in range(len(htimeseries[j])):
out1.write('%.6f %.9f %.9f\n' %(t[j],htimeseries[j][k][-2],htimeseries[j][k][-1]))
out1.close()
# To write out all the information that is generated from MDAnalysis: time, donor_idx, acceptor_idx, donor_index, acceptor_index,
# donor_resnm, donor_resid, donor_atom, acceptor_resnm, acceptor_resid, acceptor_atom, distance, angle.
# out2 = open('%s.%s.table.dat' %(system,sel[i][1]), 'a')
# out2.write('%s' %(htable))
# out2.close()
# Change directories back to parent directory, iterate to the next trajectory....REPEAT
ffprint('Finished analyzing trajectory %02d\n' %(start_traj))
os.chdir('..')
start_traj += 1
ffprint('Analyzed %d steps.' %(nSteps))
|
dupontke/hbond_analysis
|
hbond.analysis.py
|
Python
|
gpl-3.0
| 3,966
|
[
"MDAnalysis"
] |
971188e49615ac8a1cfde872927da3bc2122a82b0ee4959a409647280ccfd0db
|
#! /usr/bin/env python2
############################################################################
# Copyright (C) 2011 by Hans Robeers #
# #
# This program is free software; you can redistribute it and#or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the #
# Free Software Foundation, Inc., #
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #
############################################################################
#from numpy import *
#from numpy import array, linspace, sqrt, arange, zeros, ones, nan, transpose, isnan, where, size
from numpy import array, linspace, arange, zeros, transpose, isnan, where, size
from Fspline import spline_val, bezier_curve_val #, spline_overhauser_val
from Ffinlib import make_surface, interpolate#, naca_val
import matplotlib.pyplot as plt
from svgParser import SvgParser, SvgTools
#import guiqwt.pyplot as plt
class Fin:
"""
finFoil class to create Fin objects
* written by hrobeers
* license: GPL
"""
def __init__(self):
""" initialise some values """
# self.fortran = True
self.thick = 1
self.resolution = array([1000, 1000])
pointdata = array([[0,0],[5,6],[9.5,10],[13,12],
[16.25,11.35],[12.9,5],[11.2,0]])
self.set_pointdata(pointdata)
self.contour = FinContour(self.resolution)
"""
%%%%%%%%%%%%%%%%%%%%%%
% auxilliary methods %
%%%%%%%%%%%%%%%%%%%%%%
"""
def thickness(self, height):
""" generate the thickness profile """
basethickness = self.thick
y = linspace(0,height,self.resolution[1])
# 0 = a1 * h**2 + b
a1 = -basethickness/(height**2)
thick = a1 * y**2 + basethickness
return thick
def exponentialThickness(self, baseNumber):
basethickness = self.thick
y = linspace(0,1,self.resolution[1])
a = 1/(2**float(baseNumber))
thick = (a**(1-y)-1)/(a-1) * basethickness
return thick
def percentThickness(self, percent):
leadingedge = self.contour.getLeadingEdge()
trailingedge = self.contour.getTrailingEdge()
# initialise the surface matrices
#surf = ones(self.resolution)*nan #*self.thick
width = self.contour.getWidth()
height = self.contour.getHeight()
x_axis = linspace(0,width,self.resolution[0])
y_axis = linspace(0,height,self.resolution[1])
# interpolate the edges to fit the grid
leading_edge = interpolate(leadingedge[:,1],leadingedge[:,0],y_axis,len(leadingedge[:,1]),self.resolution[1])
trailing_edge = interpolate(trailingedge[:,1],trailingedge[:,0],y_axis,len(trailingedge[:,1]),self.resolution[1])
chordlength = trailing_edge - leading_edge
thick = chordlength * float(percent)
self.set_basethickness(thick[0])
return thick
"""
%%%%%%%%%%%%%%%%%%%%%%
% generation methods %
%%%%%%%%%%%%%%%%%%%%%%
"""
def gen_surface(self, exponConstant):
""" generate the 3D surface of the fin """
leadingedge = self.contour.getLeadingEdge()
trailingedge = self.contour.getTrailingEdge()
# initialise the surface matrices
#surf = ones(self.resolution)*nan #*self.thick
width = self.contour.getWidth()
height = self.contour.getHeight()
x_axis = linspace(0,width,self.resolution[0])
y_axis = linspace(0,height,self.resolution[1])
# interpolate the edges to fit the grid
leading_edge = interpolate(leadingedge[:,1],leadingedge[:,0],y_axis,len(leadingedge[:,1]),self.resolution[1])
trailing_edge = interpolate(trailingedge[:,1],trailingedge[:,0],y_axis,len(trailingedge[:,1]),self.resolution[1])
# rescale to grid
dx = width / self.resolution[0]
leading_edge_grid = (leading_edge / dx).round()
trailing_edge_grid = (trailing_edge / dx).round()
# generate the profiles and surface
if exponConstant == 0:
thick = self.thickness(height)
elif exponConstant < 0:
thick = self.percentThickness(-exponConstant)
else:
thick = self.exponentialThickness(exponConstant)
surf = make_surface(self.resolution[0],leading_edge_grid,trailing_edge_grid,thick,self.resolution[1])
#print surf
self.surf = surf
self.x_axis = x_axis
self.y_axis = y_axis
def gen_contour_fspline(self):
""" Use Fspline to interpolate the input points """
n_dim = 2
n_val = self.con_resolution
n_inputpoints = self.pointdata.size/2
t = arange(0,n_inputpoints)
t_int = linspace(0,n_inputpoints-1,n_val)
ydata = transpose(self.pointdata)
yval = spline_val(t,ydata,t_int,n_dim,len(t),len(t_int))
self.contour.setContour(yval.transpose())
def genContourSVG(self, svgFilePath, pathName=""):
""" Generate the contour from an SVG file """
pointsPerBezier = self.con_resolution
svg = SvgParser()
svg.parseSvgFile(svgFilePath)
path = svg.getPath()
absPath = SvgTools.toAbsolutePath(path)
normPath = SvgTools.toNormalisedPath(absPath)
fullPath = SvgTools.toFullBezierPath(normPath)
contour = zeros((2,len(fullPath)*pointsPerBezier))
controlPoints = zeros((2,len(fullPath)*4))
con = zeros((4,2))
tval = linspace(0,1,pointsPerBezier+1)
bcval = zeros((2,pointsPerBezier))
j=0
for conPartPoints in fullPath:
for i in arange(0,4):
con[i,:] = array(conPartPoints[1][i])
bcval = bezier_curve_val(tval,transpose(con),3,pointsPerBezier)
contour[:,j*pointsPerBezier:(j+1)*pointsPerBezier] = bcval
controlPoints[:,j*4:(j+1)*4] = transpose(con)
j = j+1
self.set_pointdata(transpose(controlPoints))
self.contour.setContour(transpose(contour))
"""
%%%%%%%%%%%%%%%%
% plot methods %
%%%%%%%%%%%%%%%%
"""
def plot_contour(self):
""" plot the fin contour """
plt.close(1)
plt.figure(1)
plt.plot(self.contour.getX(),self.contour.getY(),'-',self.pointdata[:,0],self.pointdata[:,1],'o')
plt.axis('equal')
plt.show()
def plot_surface(self, layer_thick):
""" plot the fin surface """
n_layers = (self.thick/2)//layer_thick + 2
layers = linspace(0,n_layers*layer_thick,n_layers+1)
plt.close(2)
plt.figure(2)
plt.contourf(self.x_axis, self.y_axis, self.surf.transpose(), layers)
plt.axis('equal')
plt.show()
def plot_wireframe(self, lib):
""" plot the fin wireframe """
#lib = 'mayavi'
#Filter to lower resolution
xfilter = arange(0,len(self.x_axis),len(self.x_axis)/1)
yfilter = arange(0,len(self.y_axis),len(self.y_axis)/1)
x = self.x_axis[xfilter]
y = self.y_axis[yfilter]
z = self.surf[xfilter,:]
z = z[:,yfilter]
if lib == 'gnuplot':
import Gnuplot
z[isnan(z)] = 5
g = Gnuplot.Gnuplot()
g.splot(Gnuplot.GridData(z, x, y, binary=0))
raw_input('Please press return to continue...\n')
elif lib == 'matplotlib':
import mpl_toolkits.mplot3d.axes3d as p3
fig=plt.figure(3)
ax = p3.Axes3D(fig)
x2 = zeros((100,100))
x2[:,:]=x[:]
y2 = zeros((100,100))
y2[:,:]=y[:]
ax.plot_wireframe(x2, y2.transpose(), z.transpose())
elif lib == 'mayavi':
from enthought.mayavi.mlab import surf
z[isnan(z)] = 0
surf(x, y, z)
def show_plot(self):
plt.show()
"""
%%%%%%%%%%%%%%%%%%%%%
% getters & setters %
%%%%%%%%%%%%%%%%%%%%%
"""
def get_pointdata(self,pointdata):
""" set the input point data """
return self.pointdata
def set_pointdata(self,pointdata):
""" set the input point data """
self.pointdata = pointdata
def set_resolution(self,resolution):
""" set resolution """
self.resolution = resolution
def set_con_resolution(self,con_resolution):
self.con_resolution = con_resolution
def set_basethickness(self,thick):
""" set the basethickness """
self.thick = thick
#def test_get(self):
#""" test function """
def set_dataPoint(self,row,col,value):
self.pointdata[row][col] = value
class FinContour:
"""
finFoil class to create FinContour objects used in the Fin class
* written by hrobeers
* license: GPL v3
"""
def __init__(self, resolution):
""" initialise the contour """
def __setResolution__(self,resolution):
""" set the resolution """
self.resolution = resolution
def __splitContour__(self):
""" split the contour in leading and trailing edge """
# local variables
extrema = self.contour.max(axis=0)
self.width = extrema[0]
self.height = extrema[1]
# split the outline
top = int(max(where(self.contour[:,1] == self.height)))
self.leadingedge = self.contour[:top+1,:]
self.trailingedge = self.contour[top:,:]
self.leadingedge[0,1] = 0 # first element of y should be zero for interpolation
self.trailingedge = self.trailingedge[::-1,:] # reverse the array for interpolation
self.trailingedge[0,1] = 0 # first element of y should be zero for interpolation
"""
%%%%%%%%%%%%%%%%%%%%%
% getters & setters %
%%%%%%%%%%%%%%%%%%%%%
"""
def getResolution(self):
""" return resolution """
return self.resolution
def getContour(self):
""" return contour """
return self.contour
def setContour(self, contour):
""" set contour """
resolution = size(contour,0)
self.__setResolution__(resolution)
self.contour = contour
self.__splitContour__()
def getLeadingEdge(self):
""" return leading edge """
return self.leadingedge
def getTrailingEdge(self):
""" return trailing edge """
return self.trailingedge
def getX(self):
""" return X """
return self.contour[:,0]
def setX(self, X):
""" set X """
self.contour[:,0] = X
def getY(self):
""" return Y """
return self.contour[:,1]
def setY(self, Y):
""" set Y """
self.contour[:,1] = Y
def getWidth(self):
""" return width """
return self.width
def getHeight(self):
""" return height """
return self.height
if __name__ == "__main__":
fortran = True
x = Fin()
basethickness = 1
resolution = array([1000, 1000])
pointdata = array([[0,0],[5,6],[9.5,10],[13,12],
[16.25,11.35],[12.9,5],[11.2,0]])
x.set_basethickness(basethickness)
x.set_resolution(resolution)
# x.set_pointdata(pointdata)
x.set_con_resolution(2000)
#tic = time.time()
#x.gen_contour_cubic()
# x.gen_contour_fspline()
x.genContourSVG("/home/frieda/tmp/test.svg")
x.gen_surface(-0.15)
#toc = time.time()
x.plot_contour()
x.plot_surface(0.8)
# x.plot_wireframe('mayavi')
raw_input('Please press return to continue...\n')
# x.show_plot()
#x.test_get()
#print 'time: ', toc-tic
|
hrobeers/finFoil_legacy
|
fin.py
|
Python
|
gpl-3.0
| 13,286
|
[
"Mayavi"
] |
abfd45afd9675caa61f97906e0face4bd39592a56266ce90eadca989359e39d7
|
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2018 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
bl_info = {
"name": "appleseed",
"author": "The appleseedhq Organization",
"version": (2, 0, 0),
"blender": (2, 80, 0),
"location": "Info Header (Render Engine Menu)",
"description": "appleseed Render Engine",
"warning": "",
"wiki_url": "https://appleseed.readthedocs.io/projects/appleseed-blenderseed/en/latest",
"tracker_url": "https://github.com/appleseedhq/blenderseed/issues",
"category": "Render"}
if "bpy" in locals():
import importlib
importlib.reload(properties)
importlib.reload(operators)
importlib.reload(export)
importlib.reload(ui)
else:
import bpy
def register():
from .utils import path_util
path_util.load_appleseed_python_paths()
from . import preferences
preferences.register()
from . import properties
from . import operators
from . import ui
from . import render # This is needed
from .utils import util
properties.register()
operators.register()
ui.register()
render.register()
bpy.app.handlers.load_post.append(util.update_project)
def unregister():
from . import preferences
from . import properties
from . import operators
from . import ui
from . import render
from .utils import util
render.unregister()
ui.unregister()
operators.unregister()
properties.unregister()
preferences.unregister()
bpy.app.handlers.load_post.remove(util.update_project)
|
dictoon/blenderseed
|
__init__.py
|
Python
|
mit
| 2,755
|
[
"VisIt"
] |
6c3d90ad0c443d5e8d4db2d40ce628cd6c1979ad26e50ccf972f134f03761e68
|
#!/usr/bin/python
########################################################################
# 2 September 2014
# Patrick Lombard, Centre for Stem Research
# Core Bioinformatics Group
# University of Cambridge
# All right reserved.
########################################################################
import os, re, sys
import argparse
import numpy as np
import HTSeq
import subprocess
#This just works with peaks
def peak_homer_analysis(peaks, bam_file=None, tag=None):
#Bam file has to be sorted and indexed!
if bam_file:
name = re.sub(".bam", "", bam_file)
name = re.sub("_sort", "", name)
command = ["makeTagDirectory", "{}_homer".format(name), bam_file]
subprocess.call(command)
command2 = "annotatePeaks.pl {} mm10 -size 4000 -hist 10 -ghist -d {}_homer".format(peaks, name)
out = name + "_homer_output.txt"
output = open(out, "w")
subprocess.call(command2.split(), stdout=output)
output.close()
elif tag:
name = re.sub("_homer", "", tag)
command2 = "annotatePeaks.pl {} mm10 -size 4000 -hist 10 -ghist -d {}_homer".format(peaks, name)
out = name + "_homer_output.txt"
print command2
output = open(out, "w")
subprocess.call(command2.split(), stdout=output)
output.close()
return out
#TSS heatmap, create a list of lists and then convert it to numpy array, problem is you need values for individual positions which are tricky!
#For now just use current approach
def tss_homer_analysis(gtf, gene_filter, ucsc, bam_file=None, tag=None):
#Bam file has to be sorted and indexed!
peaks = "tmp.bed"
output = open("tmp.bed", "w")
tsspos = set()
gtffile = HTSeq.GFF_Reader( gtf )
if gene_filter:
g_filter = {}
with open(gene_filter) as f:
for line in f:
line = line.rstrip()
word = line.split("\t")
g_filter[word[0]] = 1
for feature in gtffile:
# feature.name is gene name, useful for delving into GFF files again!
if feature.name in g_filter:
if feature.type == "exon" and feature.attr["exon_number"] == "1":
tsspos.add( feature.iv.start_d_as_pos )
for p in tsspos: #Problem if ensembl vs UCSC
if ucsc == True:
if p.chrom == "MT":
chrom = "chrM"
else:
chrom = "chr" + p.chrom
else:
chrom = p.chrom
start = p.pos - 200
end = p.pos + 200
if start < 0:
start = 0
output.write("{}\t{}\t{}\n".format(chrom, start, end)),
output.close()
if bam_file:
name = re.sub(".bam", "", bam_file)
name = re.sub("_sort", "", name)
command = ["makeTagDirectory", "{}_homer".format(name), bam_file]
subprocess.call(command)
command2 = "annotatePeaks.pl {} mm10 -size 10000 -hist 10 -ghist -d {}_homer".format(peaks, name)
out = name + "_homer_output.txt"
output = open(out, "w")
subprocess.call(command2.split(), stdout=output)
output.close()
elif tag:
name = re.sub("_homer", "", tag)
command2 = "annotatePeaks.pl {} mm10 -size 10000 -hist 10 -ghist -d {}_homer".format(peaks, name)
out = name + "_homer_output.txt"
output = open(out, "w")
subprocess.call(command2.split(), stdout=output)
output.close()
return out
|
pdl30/pyngsplot
|
pyngsplot/tools/homer_analysis.py
|
Python
|
mit
| 3,046
|
[
"HTSeq"
] |
85df0c2c20fbdcbe72537d5fe8a91055b907cc2449de35846b43a933910bfb7a
|
# Copyright Anshuman73.
# Visit anshuman73.github.io for more info.
# Released under MIT License.
# Works with Python 2.7
import json
import requests
def main(query):
#get the data
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'
'Chrome/53.0.2785.116 Safari/537.36'}
raw_data = requests.get("https://www.musixmatch.com/search/%s/lyrics" % query, headers=headers).text.encode('utf-8')
#raw_data is now a HTML dump, parse it to make it perfect json
raw_data = raw_data[raw_data.find('{', raw_data.find('mxmProps')) : raw_data.find('</script>', raw_data.find('mxmProps'))]
data = json.loads(raw_data) # Data ready to be queried
total_results = data['lyricsTracks']['length']
print "Top 5 results are:\n"
for x in xrange(5):
song = data['lyricsTracks'][str(x)]['attributes']['track_name']
artist = data['lyricsTracks'][str(x)]['attributes']['artist_name']
album = data['lyricsTracks'][str(x)]['attributes']['album_name']
print "\n\nSong Name:", song
print "\nArtist:", artist
print "\nAlbum:", album
main(str(raw_input("\nEnter the lyrics: \n\n")))
|
anshuman73/lyric-matcher
|
get_song.py
|
Python
|
mit
| 1,163
|
[
"VisIt"
] |
8f80aa8183e0901bf6425e021d09dc565aa2a63421fb75dea45d0f5a45428ed5
|
import numpy as np
import cv2
from binaryTransform import mag_thresh, dir_sobel_thresh
from helpers import *
import matplotlib.pyplot as plt
# normalize the _lower_ part (containing the street) to the range 0...255
def normstreet(channel):
maxval=np.max(channel[420:,:])
return 255*(channel/maxval)
def binarypipeline(img):
hls=bgr_hls(img)
hsv=bgr_hsv(img)
# combute binary image based on color threshold on S channel
hls_bin = np.zeros_like(hls[:,:,2])
hls_bin[(normstreet(hls[:,:,2]) >= 110) & (normstreet(hls[:,:,2]) <= 240)] = 1
#b_s, s=dirabs_threshold(hls[:,:,2], sobel_kernel=5, thresh=(0.9, 1.1))
#b_v, v=dirabs_threshold(hsv[:,:,2], sobel_kernel=5, thresh=(0.9, 1.1))
b = np.zeros_like(hls_bin)
#b[(b_s==1) & (b_v==1)]=1
b_s, s=dir_sobel_thresh(hls[:,:,2], sobel_kernel=11, alpha=0, thresh=(40, 255)) #np.arctan(-400/300)
b_v, s=dir_sobel_thresh(hsv[:,:,2], sobel_kernel=11, alpha=0, thresh=(30, 255))
#b_s, s=mag_thresh(hls[:,:,2], sobel_kernel=11, thresh=(80, 255), nrm=normstreet)
#b_v, s=mag_thresh(hsv[:,:,2], sobel_kernel=11, thresh=(60, 255), nrm=normstreet)
b[(b==1) | (b_s==1) | (b_v==1)] = 1
b[(b==1) | (hls_bin==1)] = 1
return b
# factory for perspective transform from src to dst
def srcdst():
h=720
src=np.float32([[596, 450], [686, 450], [1027, h-50], [276, h-50]])
offset=100
dst = np.float32([[305, offset], [1005, offset],
[1005, h-1],
[305, h-1]])
return src, dst
def warpFactory():
src, dst=srcdst()
M=cv2.getPerspectiveTransform(src, dst)
## Given src and dst points, calculate the perspective transform matrix
#M = cv2.getPerspectiveTransform(src, dst)
return lambda x: cv2.warpPerspective(x, M, (x.shape[1], x.shape[0]))
def unwarpFactory():
src, dst=srcdst()
M=cv2.getPerspectiveTransform(dst, src)
## Given src and dst points, calculate the perspective transform matrix
#M = cv2.getPerspectiveTransform(src, dst)
return lambda x: cv2.warpPerspective(x, M, (x.shape[1], x.shape[0]))
# findLanes_windowed() as described in main.ipynb, section "Identifying lane lines"
def findLanes_windowed(warped, sigma=20, nwindows = 9, margin = 100, minpix = 200, ym_per_pix = 3/50, xm_per_pix = 3.7/700):
# take a histogram of the lower half of the image:
histogram = np.sum(warped[360:,:], axis=0)
# apply some smoothing. I use a convolution with
# Gaussian kernel
s=sigma
n=np.array(range(3*s), dtype=np.double)
kernel=np.exp(-((n-1.5*s)/(2*s))**2)
norm=sum(kernel)
hc=np.convolve(histogram, np.array(kernel, dtype=np.double)/norm, mode='same')
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(hc.shape[0]/2)
leftx_base = np.argmax(hc[:midpoint])
rightx_base = np.argmax(hc[midpoint:]) + midpoint
# Set height of windows
window_height = np.int(warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = warped.shape[0] - (window+1)*window_height
win_y_high = warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Define conversions in x and y from pixels space to meters
# as function arguments
# ym_per_pix = 30/720 # meters per pixel in y dimension
# xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Fit a second order polynomial to each, in meters
left_fit = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
return nonzerox, nonzeroy, left_lane_inds, right_lane_inds, left_fit, right_fit
def findLanes_reuse(warped, left_fit, right_fit, margin = 100, ym_per_pix = 3/50, xm_per_pix = 3.7/700):
def marginCalc(fit, margin):
#m=np.ones_like(fit)*margin*xm_per_pix
#z=np.zeros_like(fit)
#m=np.maximum(np.minimum(1280*xm_per_pix-fit, m), z)
#m=np.maximum(np.minimum(fit, m), z)
return margin*xm_per_pix
# Assume you now have a new warped binary image
# from the next frame of video (also called "binary_warped")
# It's now much easier to find line pixels!
nonzero = warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
nonzeroym = nonzeroy*ym_per_pix
nonzeroxm = nonzerox*xm_per_pix
lf=left_fit[0]*(nonzeroym**2) + left_fit[1]*nonzeroym + left_fit[2]
rf=right_fit[0]*(nonzeroym**2) + right_fit[1]*nonzeroym + right_fit[2]
lm=marginCalc(lf, margin)
rm=marginCalc(rf, margin)
left_lane_inds = ((nonzeroxm > (lf - lm)) &
(nonzeroxm < (lf + lm)))
right_lane_inds = ((nonzeroxm > (rf - rm)) &
(nonzeroxm < (rf + rm)))
# Again, extract left and right line pixel positions in meters
leftx = nonzeroxm[left_lane_inds]
lefty = nonzeroym[left_lane_inds]
rightx = nonzeroxm[right_lane_inds]
righty = nonzeroym[right_lane_inds]
# Fit a second order polynomial to each, in meters
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
return nonzerox, nonzeroy, left_lane_inds, right_lane_inds, left_fit, right_fit
def drawLanes_warped(warped, nonzerox, nonzeroy, left_lane_inds, right_lane_inds, left_fit, right_fit, ym_per_pix = 3/50, xpix_per_m = 700/3.7, col_line=(0, 255, 255)):
# ym_per_pix and xpix_per_m define conversions in x and y from pixels space to meters
# as function arguments
# ym_per_pix = 30/720 # meters per pixel in y dimension
# xpix_per_m = 700/3.7 # pixels per meter in x dimension
# Generate x and y values for plotting
ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
left_fitx = (left_fit[0]*(ploty*ym_per_pix)**2 + left_fit[1]*(ploty*ym_per_pix) + left_fit[2])*xpix_per_m
right_fitx = (right_fit[0]*(ploty*ym_per_pix)**2 + right_fit[1]*(ploty*ym_per_pix) + right_fit[2])*xpix_per_m
if (len(warped.shape)==2):
out_img=np.dstack((warped, warped, warped))*255
else:
out_img=np.uint8(warped)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
pts = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
out_img=cv2.polylines(out_img, np.int32(pts), 0, col_line, 2)
pts = np.array([np.transpose(np.vstack([right_fitx, ploty]))])
out_img=cv2.polylines(out_img, np.int32(pts), 0, col_line, 2)
return out_img
# drawLane3d(): Draw lane marking on an image
# img: image to be drawn on
# left_fit, right_fit: coefficients of 2nd order polynomial which have been
# computed for the lane lines on the image, for example using
# findLanes_windowed()
# unwarpFun: function to perform the inverse perspective transform.
# can be created using unwarpFactory()
# ym_per_pix, xpix_per_m: unit conversions pixels <=> meters
def drawLane3d(img, left_fit, right_fit, unwarpFun, ym_per_pix = 3/50, xpix_per_m = 700/3.7):
# Create an image to draw the lines on
color_warp = np.zeros_like(img).astype(np.uint8)
# y values
ploty = np.linspace(0, 719, num=720)
# convert to meters
plotym = ym_per_pix * ploty
# evaluate polynomial & convert to pixels
left_fitxm = left_fit[0]*plotym**2 + left_fit[1]*plotym + left_fit[2]
left_fitx = xpix_per_m * left_fitxm
right_fitxm = right_fit[0]*plotym**2 + right_fit[1]*plotym + right_fit[2]
right_fitx = xpix_per_m * right_fitxm
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix
# see unwarpFactory(), pipeline.py
newwarp = unwarpFun(color_warp)
# Combine the result with the original image
result = cv2.addWeighted(img, 1, newwarp, 0.3, 0)
return result
|
alex-n-braun/curve
|
pipeline.py
|
Python
|
gpl-3.0
| 10,278
|
[
"Gaussian"
] |
aeff957738b5a8fb01fed50b3827eb0049e42a204cc9542af1c700508cd6bd97
|
""" core implementation of testing process: init, session, runtest loop. """
import re
import py
import pytest, _pytest
import os, sys, imp
try:
from collections import MutableMapping as MappingMixin
except ImportError:
from UserDict import DictMixin as MappingMixin
from _pytest.runner import collect_one_node
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
# exitcodes for the command line
EXIT_OK = 0
EXIT_TESTSFAILED = 1
EXIT_INTERRUPTED = 2
EXIT_INTERNALERROR = 3
EXIT_USAGEERROR = 4
name_re = re.compile("^[a-zA-Z_]\w*$")
def pytest_addoption(parser):
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
#parser.addini("dirpatterns",
# "patterns specifying possible locations of test files",
# type="linelist", default=["**/test_*.txt",
# "**/test_*.py", "**/*_test.py"]
#)
group = parser.getgroup("general", "running and selection options")
group._addoption('-x', '--exitfirst', action="store_true", default=False,
dest="exitfirst",
help="exit instantly on first error or failed test."),
group._addoption('--maxfail', metavar="num",
action="store", type=int, dest="maxfail", default=0,
help="exit after first num failures or errors.")
group._addoption('--strict', action="store_true",
help="run pytest in strict mode, warnings become errors.")
group._addoption("-c", metavar="file", type=str, dest="inifilename",
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
group = parser.getgroup("collect", "collection")
group.addoption('--collectonly', '--collect-only', action="store_true",
help="only collect tests, don't execute them."),
group.addoption('--pyargs', action="store_true",
help="try to interpret all arguments as python packages.")
group.addoption("--ignore", action="append", metavar="path",
help="ignore path during collection (multi-allowed).")
# when changing this to --conf-cut-dir, config.py Conftest.setinitial
# needs upgrading as well
group.addoption('--confcutdir', dest="confcutdir", default=None,
metavar="dir",
help="only load conftest.py's relative to specified dir.")
group.addoption('--noconftest', action="store_true",
dest="noconftest", default=False,
help="Don't load any conftest.py files.")
group = parser.getgroup("debugconfig",
"test session debugging and configuration")
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
help="base temporary directory for this test run.")
def pytest_namespace():
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
return dict(collect=collect)
def pytest_configure(config):
pytest.config = config # compatibiltiy
if config.option.exitfirst:
config.option.maxfail = 1
def wrap_session(config, doit):
"""Skeleton command line program"""
session = Session(config)
session.exitstatus = EXIT_OK
initstate = 0
try:
try:
config._do_configure()
initstate = 1
config.hook.pytest_sessionstart(session=session)
initstate = 2
doit(config, session)
except pytest.UsageError:
raise
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
session.exitstatus = EXIT_INTERRUPTED
except:
excinfo = py.code.ExceptionInfo()
config.notify_exception(excinfo, config.option)
session.exitstatus = EXIT_INTERNALERROR
if excinfo.errisinstance(SystemExit):
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
else:
if session._testsfailed:
session.exitstatus = EXIT_TESTSFAILED
finally:
excinfo = None # Explicitly break reference cycle.
session.startdir.chdir()
if initstate >= 2:
config.hook.pytest_sessionfinish(
session=session,
exitstatus=session.exitstatus)
config._ensure_unconfigure()
return session.exitstatus
def pytest_cmdline_main(config):
return wrap_session(config, _main)
def _main(config, session):
""" default command line protocol for initialization, session,
running tests and reporting. """
config.hook.pytest_collection(session=session)
config.hook.pytest_runtestloop(session=session)
def pytest_collection(session):
return session.perform_collect()
def pytest_runtestloop(session):
if session.config.option.collectonly:
return True
def getnextitem(i):
# this is a function to avoid python2
# keeping sys.exc_info set when calling into a test
# python2 keeps sys.exc_info till the frame is left
try:
return session.items[i+1]
except IndexError:
return None
for i, item in enumerate(session.items):
nextitem = getnextitem(i)
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
if session.shouldstop:
raise session.Interrupted(session.shouldstop)
return True
def pytest_ignore_collect(path, config):
p = path.dirpath()
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
ignore_paths = ignore_paths or []
excludeopt = config.getoption("ignore")
if excludeopt:
ignore_paths.extend([py.path.local(x) for x in excludeopt])
return path in ignore_paths
class FSHookProxy:
def __init__(self, fspath, pm, remove_mods):
self.fspath = fspath
self.pm = pm
self.remove_mods = remove_mods
def __getattr__(self, name):
x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
self.__dict__[name] = x
return x
def compatproperty(name):
def fget(self):
# deprecated - use pytest.name
return getattr(pytest, name)
return property(fget)
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return iter(seen)
def __len__(self):
return len(self.__iter__())
def keys(self):
return list(self)
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node, )
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(self, name, parent=None, config=None, session=None):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = getattr(parent, 'fspath', None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
#self.extrainit()
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
#def extrainit(self):
# """"extra initialization after Node is initialized. Implemented
# by some subclasses. """
Module = compatproperty("Module")
Class = compatproperty("Class")
Instance = compatproperty("Instance")
Function = compatproperty("Function")
File = compatproperty("File")
Item = compatproperty("Item")
def _getcustomclass(self, name):
cls = getattr(self, name)
if cls != getattr(pytest, name):
py.log._apiwarn("2.0", "use of node.%s is deprecated, "
"use pytest_pycollect_makeitem(...) to create custom "
"collection nodes" % name)
return cls
def __repr__(self):
return "<%s %r>" %(self.__class__.__name__,
getattr(self, 'name', None))
def warn(self, code, message):
""" generate a warning with the given code and message for this
item. """
assert isinstance(code, str)
fslocation = getattr(self, "location", None)
if fslocation is None:
fslocation = getattr(self, "fspath", None)
else:
fslocation = "%s:%s" % fslocation[:2]
self.ihook.pytest_logwarning(code=code, message=message,
nodeid=self.nodeid,
fslocation=fslocation)
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
try:
return self._nodeid
except AttributeError:
self._nodeid = x = self._makeid()
return x
def _makeid(self):
return self.parent.nodeid + "::" + self.name
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def _memoizedcall(self, attrname, function):
exattrname = "_ex_" + attrname
failure = getattr(self, exattrname, None)
if failure is not None:
py.builtin._reraise(failure[0], failure[1], failure[2])
if hasattr(self, attrname):
return getattr(self, attrname)
try:
res = function()
except py.builtin._sysex:
raise
except:
failure = sys.exc_info()
setattr(self, exattrname, failure)
raise
setattr(self, attrname, res)
return res
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
"""
from _pytest.mark import MarkDecorator
if isinstance(marker, py.builtin._basestring):
marker = MarkDecorator(marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name. """
val = self.keywords.get(name, None)
if val is not None:
from _pytest.mark import MarkInfo, MarkDecorator
if isinstance(val, (MarkDecorator, MarkInfo)):
return val
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
item = self
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style="long"
else:
self._prunetraceback(excinfo)
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
return excinfo.getrepr(funcargs=True,
showlocals=self.config.option.showlocals,
style=style, tbfilter=tbfilter)
repr_failure = _repr_failure_py
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _memocollect(self):
""" internal helper method to cache results of calling collect(). """
return self._memoizedcall('_collected', lambda: list(self.collect()))
def _prunetraceback(self, excinfo):
if hasattr(self, 'fspath'):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, "/")
super(FSCollector, self).__init__(name, parent, config, session)
self.fspath = fspath
def _makeid(self):
relpath = self.fspath.relto(self.config.rootdir)
if os.sep != "/":
relpath = relpath.replace(os.sep, "/")
return relpath
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None):
super(Item, self).__init__(name, parent, config, session)
self._report_sections = []
def add_report_section(self, when, key, content):
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location
class NoMatch(Exception):
""" raised if matching cannot locate a matching names. """
class Session(FSCollector):
class Interrupted(KeyboardInterrupt):
""" signals an interrupted test run. """
__module__ = 'builtins' # for py3
def __init__(self, config):
FSCollector.__init__(self, config.rootdir, parent=None,
config=config, session=self)
self._fs2hookproxy = {}
self._testsfailed = 0
self.shouldstop = False
self.trace = config.trace.root.get("collection")
self._norecursepatterns = config.getini("norecursedirs")
self.startdir = py.path.local()
self.config.pluginmanager.register(self, name="session")
def _makeid(self):
return ""
@pytest.hookimpl(tryfirst=True)
def pytest_collectstart(self):
if self.shouldstop:
raise self.Interrupted(self.shouldstop)
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
if report.failed and not hasattr(report, 'wasxfail'):
self._testsfailed += 1
maxfail = self.config.getvalue("maxfail")
if maxfail and self._testsfailed >= maxfail:
self.shouldstop = "stopping after %d failures" % (
self._testsfailed)
pytest_collectreport = pytest_runtest_logreport
def isinitpath(self, path):
return path in self._initialpaths
def gethookproxy(self, fspath):
try:
return self._fs2hookproxy[fspath]
except KeyError:
# check if we have the common case of running
# hooks with all conftest.py filesall conftest.py
pm = self.config.pluginmanager
my_conftestmodules = pm._getconftestmodules(fspath)
remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
if remove_mods:
# one or more conftests are not in use at this fspath
proxy = FSHookProxy(fspath, pm, remove_mods)
else:
# all plugis are active for this fspath
proxy = self.config.hook
self._fs2hookproxy[fspath] = proxy
return proxy
def perform_collect(self, args=None, genitems=True):
hook = self.config.hook
try:
items = self._perform_collect(args, genitems)
hook.pytest_collection_modifyitems(session=self,
config=self.config, items=items)
finally:
hook.pytest_collection_finish(session=self)
return items
def _perform_collect(self, args, genitems):
if args is None:
args = self.config.args
self.trace("perform_collect", self, args)
self.trace.root.indent += 1
self._notfound = []
self._initialpaths = set()
self._initialparts = []
self.items = items = []
for arg in args:
parts = self._parsearg(arg)
self._initialparts.append(parts)
self._initialpaths.add(parts[0])
rep = collect_one_node(self)
self.ihook.pytest_collectreport(report=rep)
self.trace.root.indent -= 1
if self._notfound:
errors = []
for arg, exc in self._notfound:
line = "(no name %r in any of %r)" % (arg, exc.args[0])
errors.append("not found: %s\n%s" % (arg, line))
#XXX: test this
raise pytest.UsageError(*errors)
if not genitems:
return rep.result
else:
if rep.passed:
for node in rep.result:
self.items.extend(self.genitems(node))
return items
def collect(self):
for parts in self._initialparts:
arg = "::".join(map(str, parts))
self.trace("processing argument", arg)
self.trace.root.indent += 1
try:
for x in self._collect(arg):
yield x
except NoMatch:
# we are inside a make_report hook so
# we cannot directly pass through the exception
self._notfound.append((arg, sys.exc_info()[1]))
self.trace.root.indent -= 1
def _collect(self, arg):
names = self._parsearg(arg)
path = names.pop(0)
if path.check(dir=1):
assert not names, "invalid arg %r" %(arg,)
for path in path.visit(fil=lambda x: x.check(file=1),
rec=self._recurse, bf=True, sort=True):
for x in self._collectfile(path):
yield x
else:
assert path.check(file=1)
for x in self.matchnodes(self._collectfile(path), names):
yield x
def _collectfile(self, path):
ihook = self.gethookproxy(path)
if not self.isinitpath(path):
if ihook.pytest_ignore_collect(path=path, config=self.config):
return ()
return ihook.pytest_collect_file(path=path, parent=self)
def _recurse(self, path):
ihook = self.gethookproxy(path.dirpath())
if ihook.pytest_ignore_collect(path=path, config=self.config):
return
for pat in self._norecursepatterns:
if path.check(fnmatch=pat):
return False
ihook = self.gethookproxy(path)
ihook.pytest_collect_directory(path=path, parent=self)
return True
def _tryconvertpyarg(self, x):
mod = None
path = [os.path.abspath('.')] + sys.path
for name in x.split('.'):
# ignore anything that's not a proper name here
# else something like --pyargs will mess up '.'
# since imp.find_module will actually sometimes work for it
# but it's supposed to be considered a filesystem path
# not a package
if name_re.match(name) is None:
return x
try:
fd, mod, type_ = imp.find_module(name, path)
except ImportError:
return x
else:
if fd is not None:
fd.close()
if type_[2] != imp.PKG_DIRECTORY:
path = [os.path.dirname(mod)]
else:
path = [mod]
return mod
def _parsearg(self, arg):
""" return (fspath, names) tuple after checking the file exists. """
arg = str(arg)
if self.config.option.pyargs:
arg = self._tryconvertpyarg(arg)
parts = str(arg).split("::")
relpath = parts[0].replace("/", os.sep)
path = self.config.invocation_dir.join(relpath, abs=True)
if not path.check():
if self.config.option.pyargs:
msg = "file or package not found: "
else:
msg = "file not found: "
raise pytest.UsageError(msg + arg)
parts[0] = path
return parts
def matchnodes(self, matching, names):
self.trace("matchnodes", matching, names)
self.trace.root.indent += 1
nodes = self._matchnodes(matching, names)
num = len(nodes)
self.trace("matchnodes finished -> ", num, "nodes")
self.trace.root.indent -= 1
if num == 0:
raise NoMatch(matching, names[:1])
return nodes
def _matchnodes(self, matching, names):
if not matching or not names:
return matching
name = names[0]
assert name
nextnames = names[1:]
resultnodes = []
for node in matching:
if isinstance(node, pytest.Item):
if not names:
resultnodes.append(node)
continue
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
has_matched = False
for x in rep.result:
if x.name == name:
resultnodes.extend(self.matchnodes([x], nextnames))
has_matched = True
# XXX accept IDs that don't have "()" for class instances
if not has_matched and len(rep.result) == 1 and x.name == "()":
nextnames.insert(0, name)
resultnodes.extend(self.matchnodes([x], nextnames))
node.ihook.pytest_collectreport(report=rep)
return resultnodes
def genitems(self, node):
self.trace("genitems", node)
if isinstance(node, pytest.Item):
node.ihook.pytest_itemcollected(item=node)
yield node
else:
assert isinstance(node, pytest.Collector)
rep = collect_one_node(node)
if rep.passed:
for subnode in rep.result:
for x in self.genitems(subnode):
yield x
node.ihook.pytest_collectreport(report=rep)
|
Carreau/pytest
|
_pytest/main.py
|
Python
|
mit
| 26,018
|
[
"VisIt"
] |
28f97eb349e1c9eed4bb7ab55558ae6e6b174d9858e2884454eab2b064801f8a
|
"""
set of tools to deal with IRIS observations
"""
import numpy as np
from pkg_resources import resource_filename
def si2dn(wave, band='NUV'):
"""
Computes the coefficient to convert from flux SI units (W m^2 Hz^-1 sr^-1)
to IRIS DN (counts). Most of this code came from Viggo's routines.
Parameters
----------
wave : 1-D array
Wavelengths of the observations in nm
band : string, optional
Band of the observations. At the moment only 'NUV' is supported.
Returns
-------
result : float
Conversion factor from W / (m2 Hz sr) to IRIS DN.
"""
from scipy import constants as const
from scipy import interpolate as interp
from scipy.io.idl import readsav
band = band.upper()
# File with IRIS effective area
CFILE = resource_filename('helita', 'data/iris_sra_20130211.geny')
arcsec2sr = (2. * const.pi / 360. / 60.**2)**2
assert band == 'NUV', 'Only NUV band supported'
dellambda = {'NUV': 25.46e-3} # spectral pixel size
eperph = {'NUV': 1.} # electrons per photon (in CCD)
resx = {'NUV': 1 / 3.} # x resolution in arcsec
resy = 1 / 6. # y resolution (along slit) in arcsec
enph = 1.e7 * const.h * const.c / (wave * 1.e-9) # energy per photon (erg)
# convert from W m-2 Hz-1 sr-1 to erg s-1 cm-2 AA-1 sr-1
iconv = 1e3 * const.c * 1e9 / wave**2 / 10 # from I_nu to I_lambda
if band == 'NUV':
ea = readsav(CFILE).p0
fint = interp.interp1d(ea['lambda'][0], ea['area_sg'][0][1])
else:
raise ValueError
effective_area = fint(wave)
sr = arcsec2sr * resx[band] * resy # from arcsec^2 to sr
return iconv * effective_area * sr * dellambda[band] * eperph[band] / enph
def add_iris_noise(spec, exptime=1.):
"""
Adds realistic IRIS noise to Mg II spectra from RH, using an approximate
recipe assuming that the counts/sec at 282.0 nm are as given in the
instrument description (~3500), and adding shot noise from Poisson and
from a realistic effective gain and read noise.
Parameters
----------
spec : 3-D array
Spectrum from RH (ideally spectrally and spatially convolved)
exptime: float, optional
Exposure time in seconds
Returns
-------
result : 3-D array
Spectrum with added noise
"""
rh2cnts = 3500 / 2.6979319e-09 # ad hoc conversion from SI units to counts
gain1 = 14.5 # photon counts/DN, pristine gain
gain2 = 16.0 # gain measured with charge spreading
read_noise = 1.2 # DN
spec_cts = spec.copy() * rh2cnts * (gain1 / gain2) * exptime
# Add poisson noise. Parallelise this part?
spec_cts = np.random.poisson(spec_cts)
spec_cts /= gain1 / gain2 * exptime
rn = np.empty(spec_cts.shape, dtype='f')
rn[:] = read_noise
rn = np.random.poisson(rn)
return (spec_cts + rn) / rh2cnts
def sj_filter(wave, band='IRIS_MGII_CORE', norm=True):
"""
Returns the Solc filter for a given wavelength grid, for one of the
NUV slit-jaw bands. Reads effective area.
Parameters:
-----------
wave - 1D array
Wavelength values (for interpolation)
band - string, optional
Band to use, either 'IRIS_MGII_CORE' or 'IRIS_MGII_WING'
norm - bool
Defines weather resulting filter is normalised (ie, has unit area,
NOT sum)
Returns:
--------
wfilt - 1D array
Array with wavelength filter.
"""
from scipy import interpolate as interp
from scipy.io.idl import readsav
# File with IRIS effective area
CFILE = resource_filename('helita', 'data/iris_sra_20130211.geny')
ea = readsav(CFILE).p0
wave_filt = ea['lambda'][0]
if band.upper() == 'IRIS_MGII_CORE':
filt = ea['area_sji'][0][2]
elif band.upper() == 'IRIS_MGII_WING':
filt = ea['area_sji'][0][3]
else:
raise ValueError
wfilt = interp.splev(wave, interp.splrep(wave_filt, filt, k=3, s=0))
if band.upper() == 'IRIS_MGII_CORE':
widx = (wave > 277.8) & (wave < 283.5)
elif band.upper() == 'IRIS_MGII_WING':
widx = (wave > 281) & (wave < 285)
if norm:
wfilt /= np.trapz(wfilt[widx], x=wave[widx])
wfilt[~widx] = 0.
return wfilt
def make_fits_level3_skel(filename, dtype, naxis, times, waves, wsizes,
desc="File from make_fits_level3_skel", descw=None,
cwaves=None, header_extra={}):
"""
Creates a FITS file compliant with IRIS level 3 for use in CRISPEX. The
file structure will be created and dummy data will be written, so that
it can be later populated.
Parameters
----------
filename : string
Name of file to write.
dtype : string or numpy.dtype object
Numpy datatype. Must be one of 'uint8', 'int16', 'int32', 'float32',
or 'float64'.
naxis : list of two integers
Spatial dimensions of the file (nx, ny).
times : array_like
Array with times for each raster and slit position. Can either be
1 dimension (same time for all slit positions), or 2 dimensions as
(ntime, nsteps).
The values are the time in seconds since DATE_OBS.
waves : array_like
Array with wavelength values (in Angstrom) for all the windows.
wsizes : list of ints
Sizes of the wavelength window(s). Can have one or more items, but
the sum of all sizes must be less than len(waves). Each size has to
be more than 2.
desc : string, optional
String describing observations (to go into OBS_DESC).
descw : list of strings, optional
Description of each wavelength window, to write in the WDESCx cards.
Must have same size as wsizes.
cwaves : list of floats, optional
Central wavelengths of each wavelength window. IMPORTANT: must be
increasing (smallest wavelength comes first). If not given, will use
average from each window.
header_extra : dictionary or pyfits.hdu.header object, optional
Extra header information. This should be used to write important
information such as CDELTx, CRVALx, CPIXx, XCEN, YCEN, DATE_OBS.
"""
from astropy.io import fits as pyfits
from datetime import datetime
VERSION = '001'
FITSBLOCK = 2880 # FITS blocksize in bytes
# Consistency checks
VALID_DTYPES = ['uint8', 'int16', 'int32', 'float32', 'float64']
if type(dtype) != np.dtype:
dtype = dtype.lower()
if dtype not in VALID_DTYPES:
raise TypeError("dtype %s not one of %s" % (dtype,
repr(VALID_DTYPES)))
stime = times.shape
if len(stime) == 2:
if stime[1] != naxis[0]:
raise ValueError("Second dimension of times must be same as nx")
if np.any(wsizes < 3):
raise ValueError("All wavelength windows must be bigger than 2.")
if np.sum(wsizes) != len(waves):
raise ValueError("wsizes do not add up to len(waves)")
if np.any(np.diff(waves) < 0):
raise ValueError("Wavelengths must be increasing!")
if cwaves is None:
cwaves = []
s = 0
for w in wsizes:
cwaves.append(waves[s:s + w].mean())
s += w
if descw is None:
descw = []
for i in wsizes:
descw.append("Some wavelength")
# Create header
tmp = np.zeros((1, 1, 1, 1), dtype=dtype)
hdu = pyfits.PrimaryHDU(data=tmp)
hd = hdu.header
hd['NAXIS1'] = naxis[0]
hd['NAXIS2'] = naxis[1]
hd['NAXIS3'] = len(waves)
hd['NAXIS4'] = len(times)
hd['EXTEND'] = (True, 'FITS data may contain extensions')
hd['INSTRUME'] = ('IRIS', 'Data generated in IRIS format')
hd['DATA_LEV'] = (3., 'Data level')
hd['LVL_NUM'] = (3., 'Data level')
hd['VER_RF3'] = (VERSION, 'Version number of make_fits_level3_skel')
hd['OBJECT'] = ('Sun', 'Type of solar area')
hd['OBSID'] = (0000000000, 'obsid')
hd['OBS_DESC'] = (desc, '')
hd['DATE_OBS'] = (str(np.datetime64(datetime.today()))[:-5], '')
hd['STARTOBS'] = (str(np.datetime64(datetime.today()))[:-5], '')
hd['BTYPE'] = ('Intensity', '')
hd['BUNIT'] = ('Corrected DN', '')
hd['CDELT1'] = (0.34920, '[arcsec] x-coordinate increment')
hd['CDELT2'] = (0.16635, '[arcsec] y-coordinate increment')
hd['CDELT3'] = (0.02596, '[AA] wavelength increment')
hd['CDELT4'] = (51.8794, '[s] t-coordinate axis increment')
hd['CRPIX1'] = (1., 'reference pixel x-coordinate')
hd['CRPIX2'] = (1., 'reference pixel y-coordinate')
hd['CRPIX3'] = (1., 'reference pixel lambda-coordinate')
hd['CRPIX4'] = (1., 'reference pixel t-coordinate')
hd['CRVAL1'] = (0., '[arcsec] Position refpixel x-coordinate')
hd['CRVAL2'] = (0., '[arcsec] Position refpixel y-coordinate')
hd['CRVAL3'] = (1332.7, '[Angstrom] wavelength refpixel lambda-coordina')
hd['CRVAL4'] = (0., '[s] Time mid-pixel t-coordinate')
hd['CTYPE1'] = ('x', '[arcsec]')
hd['CTYPE2'] = ('y', '[arcsec]')
hd['CTYPE3'] = ('wave', '[Angstrom]')
hd['CTYPE4'] = ('time', '[s]')
hd['CUNIT1'] = ('arcsec', '')
hd['CUNIT2'] = ('arcsec', '')
hd['CUNIT3'] = ('Angstrom', '')
hd['CUNIT4'] = ('s', '')
hd['XCEN'] = (0., '[arcsec] x-coordinate center of FOV 1 raster')
hd['YCEN'] = (0., '[arcsec] y-coordinate center of FOV 1 raster')
# HERE PUT WCS STUFF
nwin = len(wsizes)
hd['NWIN'] = (nwin, 'Number of windows concatenated')
istart = 0
for i in range(nwin):
iss = str(i + 1)
hd['WSTART' + iss] = (istart, 'Start pixel for subwindow')
hd['WWIDTH' + iss] = (wsizes[i], 'Width of subwindow')
hd['WDESC' + iss] = (descw[i], 'Name of subwindow')
hd['TWAVE' + iss] = (cwaves[i], 'Line center wavelength in subwindow')
istart += wsizes[i]
hd['COMMENT'] = 'Index order is (x,y,lambda,t)'
for key, value in list(header_extra.items()):
hd[key] = value
# add some empty cards for contingency
for i in range(10):
hd.append()
hd['DATE'] = (str(np.datetime64(datetime.today()))[:10],
'Creation UTC (CCCC-MM-DD) date of FITS header')
hd.tofile(filename)
# fill up empty file with correct size
with open(filename, 'rb+') as fobj:
fsize = len(hd.tostring()) + (naxis[0] * naxis[1] * len(waves) *
len(times) * np.dtype(dtype).itemsize)
fobj.seek(int(np.ceil(fsize / float(FITSBLOCK)) * FITSBLOCK) - 1)
fobj.write(b'\0')
# put wavelengths and times as extensions
pyfits.append(filename, waves)
pyfits.append(filename, times)
f = pyfits.open(filename, mode='update', memmap=True)
f[1].header['EXTNAME'] = 'lambda-coordinate'
f[1].header['BTYPE'] = 'lambda axis'
f[1].header['BUNIT'] = '[AA]'
f[2].header['EXTNAME'] = 'time-coordinates'
f[2].header['BTYPE'] = 't axes'
f[2].header['BUNIT'] = '[s]'
f.close()
return
def transpose_fits_level3(filename, outfile=None):
"""
Transposes an 'im' level 3 FITS file into 'sp' file
(ie, transposed). INCOMPLETE, ONLY HEADER SO FAR.
"""
from astropy.io import fits as pyfits
hdr_in = pyfits.getheader(filename)
hdr_out = hdr_in.copy()
(nx, ny, nz, nt) = [hdr_in['NAXIS*'][i] for i in range(1, 5)]
TRANSP_KEYS = ['NAXIS', 'CDELT', 'CRPIX', 'CRVAL', 'CTYPE', 'CUNIT']
ORDER = [2, 3, 0, 1]
for item in TRANSP_KEYS:
for i in range(4):
hdr_out[item + str(ORDER[i] + 1)] = hdr_in[item + str(i + 1)]
return hdr_out
def rh_to_fits_level3(filelist, outfile, windows, window_desc, times=None,
xsize=24., clean=False, time_collapse_2d=False,
cwaves=None, make_sp=False, desc=None, wave2vac=None,
wave_select=np.array([False])):
"""
Converts a sequence of RH netCDF/HDF5 ray files to a FITS file
compliant with IRIS level 3 for use in CRISPEX.
Parameters
----------
filelist : list
Name(s) of RH ray files.
outfile : string
Name of output file.
windows : list of lists / tuples
List with start/end wavelengths of different windows to write.
E.g. windows=[(279.0, 281.0), (656.1, 656.4)]. Wavelengths given
in air and nm. WINDOWS CANNOT BE OVERLAPPING!
window_desc : list of strings
List with same number of elements as windows, with description
strings for the different windows
times : array_like, optional
Times for different snapshots in list. If none given, assumed to
be 10 seconds between snapshots.
xsize : float, optional.
Size of x dimension in Mm. Default is 24 Mm.
clean : bool, optional
If True, will clean up any masked values using an inpainting
algorithm. Default is False.
time_collapse_2d : bool, optional
If True, will collapse the y dimension into a time dimension.
Use for 2D models with time as the y dimension. Default is False.
cwaves : list of floats, optional
Central wavelengths of each wavelength window, in AA. IMPORTANT: must
be increasing (smallest wavelength comes first). If not given, will
use average from each window.
make_sp : bool, optional
If True, will also produce an sp cube. Default is False.
desc : string, optional
Description string.
wave2vac : list, optional
Defines whether to convert the wavelengths from vacuum to air.If not
None, should be a boolean list with the same number of elements
as there are windows. Windows that are True will be converted. Default
is set to None, so no conversion takes place.
wave_select : array, optional
If present, will only use wavelengths that are contained in this
array. Must be exact match. Useful to combine output files that
have common wavelengths.
"""
from ..sim import rh15d
from specutils.utils.wcs_utils import air_to_vac
from astropy.io import fits as pyfits
from astropy import units as u
nt = len(filelist)
robj = rh15d.Rh15dout()
robj.read_ray(filelist[0])
hd = robj.ray.params.copy()
# Consistency checks to make sure all files are compatible
if nt > 1:
for f in filelist[1:]:
robj.read_ray(f)
hd_tmp = robj.ray.params
if wave_select is None:
assert hd_tmp['nwave'] == hd['nwave']
assert hd_tmp['nx'] == hd['nx']
assert hd_tmp['ny'] == hd['ny']
nx, ny = hd['nx'], hd['ny']
if time_collapse_2d:
nt = ny
ny = 1
if wave2vac is None:
wave2vac = [False] * len(windows)
wave_full = robj.ray.wavelength[:]
if wave_select.size == robj.ray.wavelength.size:
wave_full = wave_full[wave_select]
nwave = len(wave_full)
waves = np.array([])
indices = np.zeros(nwave, dtype='bool')
nwaves = np.array([])
for (wi, wf), air_conv in zip(windows, wave2vac):
idx = (wave_full > wi) & (wave_full < wf)
tmp = wave_full[idx]
if air_conv:
# RH converts to air using Edlen (1966) method
tmp = air_to_vac(tmp * u.nm, method='edlen1966', scheme='iteration').value
waves = np.append(waves, tmp)
nwaves = np.append(nwaves, len(tmp))
indices += idx
waves *= 10. # in Angstrom
if times is None:
times = np.arange(0., nt) * 10.
asec2Mm = 696. / 959.5
xres = xsize / nx / asec2Mm # in arcsec
tres = 0
if nt > 1:
tres = np.median(np.diff(times))
header_extra = {"XCEN": 0.0, "YCEN": 0.0,
"CRPIX1": ny // 2, "CRPIX2": nx // 2, "CRPIX3": 1,
"CRPIX4": 1, "CRVAL1": 0.0, "CRVAL2": 0.0,
"CRVAL3": waves[0], "CRVAL4": times[0], "CDELT1": xres,
"CDELT2": xres, "CDELT3": np.median(np.diff(waves)),
"CDELT4": tres}
desc = "Calculated from %s" % (robj.ray.params['atmosID'])
make_fits_level3_skel(outfile, robj.ray.intensity.dtype,
(ny, nx), times, waves, nwaves, descw=window_desc,
cwaves=cwaves, header_extra=header_extra)
fobj = pyfits.open(outfile, mode="update", memmap=True)
if time_collapse_2d:
robj.read_ray(filelist[0])
tmp = robj.ray.intensity[:]
if wave_select.size == robj.ray.wavelength.size:
tmp = tmp[..., wave_select]
tmp = tmp[:, :, indices]
if clean:
tmp = rh15d.clean_var(tmp, only_positive=True)
else: # always clean up for NaNs, Infs, masked, and negative
idx = (~np.isfinite(tmp)) | (tmp < 0) | (tmp > 9e36)
tmp[idx] = 0.0
fobj[0].data[:] = tmp[:, :, np.newaxis].transpose((1, 3, 0, 2))
else:
for i, f in enumerate(filelist):
robj.read_ray(f)
tmp = robj.ray.intensity[:]
if wave_select.size == robj.ray.wavelength.size:
# common wavelengths, if applicable
tmp = tmp[..., wave_select]
tmp = tmp[:, ::-1, indices] # right-handed system
if clean:
tmp = rh15d.clean_var(tmp, only_positive=True)
else: # always clean up for NaNs, Infs, masked, and negative
idx = (~np.isfinite(tmp)) | (tmp < 0) | (tmp > 9e36)
tmp[idx] = 0.0
fobj[0].data[i] = tmp.T
fobj.close()
return
|
ITA-Solar/helita
|
helita/obs/iris.py
|
Python
|
bsd-3-clause
| 17,676
|
[
"NetCDF"
] |
3a772b284833b763327002376f6e10be54f97cab86a429658d5073e5c64d06d5
|
#!/usr/bin/python
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
"""rts2saf_analyze.py performs the offline analysis of many runs and
optionally creates a linear temperature model. class ``Do`` provides the environment.
"""
__author__ = 'markus.wildi@bluewin.ch'
import os
import sys
# if executed in background or without X Window plt.figure() fails
#
# http://stackoverflow.com/questions/1027894/detect-if-x11-is-available-python
import psutil
import matplotlib
XDISPLAY=None
import pkg_resources
p1, p2, p3= pkg_resources.get_distribution("psutil").version.split('.')
if int(p1) >= 2:
pnm=psutil.Process(psutil.Process(os.getpid()).ppid()).name()
elif int(p1) >= 1:
pnm=psutil.Process(psutil.Process(os.getpid()).ppid).name
# ok if it dies here
if 'init' in pnm or 'rts2-executor' in pnm:
matplotlib.use('Agg')
XDISPLAY=False
else:
from subprocess import Popen, PIPE
p = Popen(["xset", "-q"], stdout=PIPE, stderr=PIPE)
p.communicate()
if p.returncode == 0:
XDISPLAY=True
else:
matplotlib.use('Agg')
XDISPLAY=False
from rts2saf.analyzeruns import AnalyzeRuns
from rts2saf.temperaturemodel import TemperatureFocPosModel
if __name__ == '__main__':
import argparse
from rts2saf.config import Configuration
from rts2saf.environ import Environment
from rts2saf.log import Logger
defaultCfg = '/usr/local/etc/rts2/rts2saf/rts2saf.cfg'
script=os.path.basename(__file__)
parser = argparse.ArgumentParser(prog = script, description = 'rts2asaf analysis')
parser.add_argument('--debug', dest = 'debug', action = 'store_true', default = False, help = ': %(default)s,add more output')
parser.add_argument('--sxdebug', dest = 'sxDebug', action = 'store_true', default = False, help = ': %(default)s,add more output on SExtract')
parser.add_argument('--level', dest = 'level', default = 'INFO', help = ': %(default)s, debug level')
parser.add_argument('--topath', dest = 'toPath', metavar = 'PATH', action = 'store', default = '.', help = ': %(default)s, write log file to path')
parser.add_argument('--logfile', dest = 'logfile', default = '{0}.log'.format(script), help = ': %(default)s, logfile name')
parser.add_argument('--toconsole', dest = 'toconsole', action = 'store_true', default = False, help = ': %(default)s, log to console')
parser.add_argument('--config', dest = 'config', action = 'store', default = defaultCfg, help = ': %(default)s, configuration file path')
parser.add_argument('--basepath', dest = 'basePath', action = 'store', default = None, help = ': %(default)s, directory where FITS images from possibly many focus runs are stored')
parser.add_argument('--filternames', dest = 'filterNames', action = 'store', default = None, type = str, nargs = '+', help = ': %(default)s, list of SPACE separated filters to analyzed, None: all')
#ToDo parser.add_argument('--ds9region', dest = 'ds9region', action = 'store_true', default = False, help = ': %(default)s, create ds9 region files')
parser.add_argument('--ds9display', dest = 'Ds9Display', action = 'store_true', default = False, help = ': %(default)s, display fits images and region files')
parser.add_argument('--fitdisplay', dest = 'FitDisplay', action = 'store_true', default = False, help = ': %(default)s, display fit')
parser.add_argument('--cataloganalysis', dest = 'catalogAnalysis', action = 'store_true', default = False, help = ': %(default)s, analysis is done with CatalogAnalysis')
parser.add_argument('--criteria', dest = 'criteria', action = 'store', default = 'rts2saf.criteria_radius', help = ': %(default)s, CatalogAnalysis criteria Python module to load at run time')
parser.add_argument('--associate', dest = 'associate', action = 'store_true', default = False, help = ': %(default)s, let sextractor associate the objects among images')
parser.add_argument('--flux', dest = 'flux', action = 'store_true', default = False, help = ': %(default)s, do flux analysis')
parser.add_argument('--model', dest = 'model', action = 'store_true', default = False, help = ': %(default)s, fit temperature model')
parser.add_argument('--fraction', dest = 'fractObjs', action = 'store', default = 0.5, type = float, help = ': %(default)s, fraction of objects which must be present on each image, base: object number on reference image, this option is used only together with --associate')
parser.add_argument('--emptySlots', dest = 'emptySlots', action = 'store', default = None, type = str, nargs = '+', help = ': %(default)s, list of SPACE separated names of the empty slots')
parser.add_argument('--focuser-interval', dest = 'focuserInterval', action = 'store', default = list(), type = int, nargs = '+', help = ': %(default)s, focuser position interval, positions out side this interval will be ignored')
# ToDo parser.add_argument('--display-failures', dest = 'display_failures', action = 'store_true', default = False, help = ': %(default)s, display focus run where the fit failed')
parser.add_argument('--means', dest = 'means', action = 'store_true', default = False, help = ': %(default)s, calculate weighted means')
args = parser.parse_args()
if args.debug:
args.level = 'DEBUG'
args.toconsole = True
# logger
logger = Logger(debug = args.debug, args = args).logger # if you need to chage the log format do it here
# hint to the user
if defaultCfg in args.config:
logger.info('rts2saf_focus: using default configuration file: {0}'.format(args.config))
# config
rtc = Configuration(logger = logger)
if not rtc.readConfiguration(fileName=args.config):
logger.error('rts2saf_focus: exiting, wrong syntax, check the configuration file: {0}'.format(args.config))
sys.exit(1)
# overwrite config defaults
rtc.cfg['ANALYZE_FLUX'] = args.flux
rtc.cfg['ANALYZE_ASSOC'] = args.associate
rtc.cfg['ANALYZE_ASSOC_FRACTION'] = args.fractObjs
rtc.cfg['FOCUSER_INTERVAL'] = args.focuserInterval
rtc.cfg['WEIGHTED_MEANS'] = args.means
if args.FitDisplay:
rtc.cfg['WITH_MATHPLOTLIB'] = True
if args.emptySlots is not None:
rtc.cfg['EMPTY_SLOT_NAMES'] = [ x.strip() for x in args.emptySlots ]
# ToDo ugly
if args.filterNames is not None:
fts = [ x.strip() for x in args.filterNames ]
args.filterNames = fts
rtc.checkConfiguration(args=args)
# environment
ev = Environment(debug = args.debug, rt = rtc, logger = logger)
if not args.basePath:
parser.print_help()
logger.warn('rts2saf_analyze: no --basepath specified')
sys.exit(1)
if not args.toconsole:
print 'you may wish to enable logging to console --toconsole'
print 'log file is written to: {}'.format(args.logfile)
aRs = AnalyzeRuns(debug = args.debug, basePath = args.basePath, args = args, rt = rtc, ev = ev, logger = logger, xdisplay = XDISPLAY)
aRs.aggregateRuns()
if len(aRs.fS) == 0:
logger.warn('rts2saf_analyze: exiting, no files found in basepath: {}'.format(args.basePath))
sys.exit(1)
rFf = aRs.analyzeRuns()
if len(rFf)==0:
logger.error('rts2saf_analyze: no results, exiting')
sys.exit(1)
if args.model:
if rFf[0].ambientTemp in 'NoTemp':
logger.warn('rts2saf_analyze: no ambient temperature available in FITS files, no model fitted')
else:
# temperature model
PLOTFN = ev.expandToPlotFileName( plotFn = '{}/temp-model.png'.format(args.basePath))
dom = TemperatureFocPosModel(showPlot = True, date = ev.startTime[0:19], comment = 'test run', plotFn = PLOTFN, resultFitFwhm = rFf, logger = logger)
dom.fitData()
dom.plotData()
logger.info('rts2saf_analyze: storing plot at: {}'.format(PLOTFN))
|
zguangyu/rts2
|
scripts/rts2saf/rts2saf_analyze.py
|
Python
|
gpl-2.0
| 8,781
|
[
"VisIt"
] |
298113a40a073f05016f9d89a2f91b53af38a7d896b96108f8e4c7b31d3c2686
|
#!/usr/bin/python3
#This file is the back-end for the GUI and CLI programs.
import sys;
class DCTranslator:
species = [ "", "D|Dragons", "H|Humanoids", "A|Amphibians", "B|Birds", "C|Crustaceans", "S|Dinosaurs", "E|Extraterrestrials", "F|Fish", "I|Insects", "L|Legends", "M|Mammals", "M|Molluscs", "Y|Mythical", "P|Plants", "R|Reptiles", "Q|Spirits", "U|Undead", "~|Shape Changers" ]
subSpecies = [
[], #Unknown
[ "", "a|Amphiteres", "c|Draconids", "d|Dragonettes", "e|Eastern Dragons", "f|Faerie Dragons", "h|Hydra", "i|Dimar", "l|Dracolich", "p|Pernese Dragons", "r|Turtle Dragons", "s|Serpents", "t|Tarrasques", "u|Pseudodragons", "v|Wyverns", "w|Western Dragons", "y|Wyrms" ], #Dragons
[ "", "a|Apes", "d|Dwarves", "e|Elves", "f|Fairies", "i|Giants", "g|Gnomes", "h|Hobbits", "k|Kender", "y|Nymphs", "t|Troll", "w|Wolfman", "?|Unknown" ], #Humanoids
[ "", "f|Frog", "n|Newts", "s|Salamanders", "t|Toads" ], #Amphibians
[ "", "c|Crows", "e|Eagles", "h|Hawks", "p|Phoenix", "r|Ravens" ], #Birds
[ "", "c|Crabs", "l|Lobsters", "s|Shrimps" ], #Crustaceans
[ "", "a|Allosaurs", "c|Triceratops", "p|Apatosaurs", "s|Stegosaurs", "t|Tyrannosaurs", "v|Velociraptors" ], #Dinosaurs
[ "", "d|Daleks", "t|Tribbles" ], #Extraterrestrials
[ "", "h|Sea horses", "f|Freshwater fish", "s|Sharks" ], #Fish
[ "", "a|Ants", "b|Beetles", "f|Flies", "l|Locusts", "m|Moths", "u|Butterflies" ], #Insects
[ "", "r|Gargoyles", "l|Gremlins", "g|Griffins or Gryphons", "n|Manticores", "m|Merfolk", "f|Salamanders", "s|Sprites", "t|Treants & Treefolk", "u|Unicorns" ], #Legends
[ "", "a|Avians/Bats", "b|Bears", "c|Canines", "f|Felines", "h|Horses", "m|Monkeys", "p|Polecats", "r|Rodents", "w|Whales/Cetaceans" ], #Mammals
[ "", "c|Cuttlefish", "l|Limpets", "o|Octopuses", "y|Oysters", "s|Snails" ], #Molluscs
[ "", "c|Centaurs", "y|Cyclopses", "g|Golems", "h|Hellhounds", "m|Minotaurs", "p|Pegasi", "t|Satyrs", "s|Sphinxes" ], #Mythical
[ "", "c|Cacti", "f|Fungi", "t|Trees" ], #Plants
[ "", "a|Alligators/Crocodiles", "c|Chameleons", "g|Geckos", "k|Komodo Dragons", "l|Lizards", "n|Skinks", "s|Snakes", "t|Turtles" ], #Reptiles
[ "", "a|Angels", "d|Devils/Demons", "g|Ghosts", "i|Imps", "p|Poltergeists", "s|Spectres", "w|Will-o-the-wisps" ], #Spirits
[ "", "g|Ghouls", "v|Vampires", "z|Zombies" ], #Undead
[], #Shape changers
]
subSubSpecies = [
[], #Unknown
[ [], [], [], [], [], [], [], [], [], [], [], [
"", "s|Sea Serpents", "f|Fire Serpents" #Serpents
], [], [], [], [], [] ], #Dragons
[ [], [], [], [
"", "w|Wood Elves" #Elves
], [], [], [], [], [], [], [], [], [] ], #Humanoids
[ [], [], [], [], [] ], #Amphibians
[ [], [], [], [], [], [] ], #Birds
[ [], [], [], [] ], #Crustaceans
[ [], [], [], [], [], [], [] ], #Dinosaurs
[ [], [], [] ], #Extraterrestrials
[ [], [], [
"", "g|Goldfish", "t|Trout" #Freshwater fish
], [] ], #Fish
[ [], [], [], [], [], [], [] ], #Insects
[ [], [], [], [], [], [], [], [], [], [] ], #Legends
[ [], [], [], [
"", "d|Domestic dogs", "f|Foxes", "w|Wolves" #Canines
], [
"", "b|Black panthers", "c|Cheetahs", "d|Domestic cats", "p|Leopard", "l|Lions", "x|Lynxes", "a|Panthers", "u|Pumas", "t|Tigers" #Felines
], [], [
"", "g|Gibbons" #Monkeys
], [
"", "f|Ferrets", "m|Mink" #Polecats
], [
"", "g|Gerbils", "h|Hamsters", "m|Mice", "r|Rats", "s|Squirrels" #Rodents
], [
"", "d|Dolphins", "k|Killer Whales", "p|Porpoises" #Cetaceans
] ], #Mammals
[ [], [], [], [], [], [] ], #Molluscs
[ [], [], [], [], [], [], [], [], [] ], #Mythical
[ [], [], [], [
"", "a|Ash", "e|Elm", "o|Oak" #Trees
] ], #Plants
[ [], [], [], [], [], [], [
"", "f|Fire Skinks" #Skinks
], [], [] ], #Reptiles
[ [], [], [], [], [], [], [], [] ], #Spirits
[ [], [], [], [] ], #Undead
[ [] ], #Shape changers
]
subSubSubSpecies = [
[], #Unknown
[], #Dragons
[], #Humanoids
[], #Amphibians
[], #Birds
[], #Crustaceans
[], #Dinosaurs
[], #Extraterrestrials
[], #Fish
[], #Insects
[], #Legends
[
[], #Blank
[], #Bats
[], #Bears
[], #Canines
[
[], #Blank
[], #Black panthers
[], #Cheetahs
[], #Domestic cats
[
"", "s|Snow leopards"
], #Leopards
[], #Lions
[], #Lynxes
[], #Panthers
[], #Pumas
[] #Tigers
], #Felines
[], #Horses
[], #Monkeys
[], #Polecats
[], #Rodents
[] #Cetaceans
], #Mammals
[], #Molluscs
[], #Mythical
[], #Plants
[], #Reptiles
[], #Spirits
[], #Undead
[], #Shape changers
]
gender = [ "", "f|Female", "h|Hermaphrodite", "m|Male", "n|Neuter", "p|Pseudo-hermaphrodite", "~|Variable", "?|Unknown" ]
lengthMagnitude = [ "+++!|Celestial", "+++|Mistaken for mountain ranges", "++|Can't see own tail on a foggy day", "+|Godzilla-sized", "|Draco-sized", "-|Human-sized", "--|Dog-sized", "---|Pocket Dragon-sized", "---!|Microscopic", "~|Variable", "^|One-Dragon-sized" ]
lengthUnit = [ "i|inches", "f|feet", "y|yards", "c|centimeters", "m|meters", "k|kilometers" ]
lengthMod = [ "a|Arms", "l|Legs", "n|Head and neck", "t|Tail", "w|Wingspan" ]
width = [ "", "+++!|I am Athelind! My belly is now several galaxies wide... while I'm only a few hundred feet long!", "+++|Planets have been known to crack in half with my arrival!", "++|My digestion of food has been known to cause earthquakes.", "+|I move by rolling. Flying has always been an effort for me.", "|What can I say... I'm normal, except for a few feasts here or there.", "-|I'm slightly on the slim side", "--|Ever heard of serpentine?", "---|Whoah! Whaddaya mean I look like a long string with wings?", "---!|I'm one-dimensional - all length and no width or depth. Just one long super-string!", "~|Variable - My girth depends on what I've just eaten!" ]
weightUnit = [ "c|long hundredweight avoirdupois", "g|grams", "k|kilograms", "l|pounds avoirdupois", "o|ounces avoirdupois", "s|stones avoirdupois", "t|tons avoirdupois OR metric tons" ]
weightMagnitude = [ "+++!|Black hole", "+++|Massive", "++|Obese", "+|Over-weight", "|Normal", "-|Under-weight", "--|Buoyant", "---|Feather-weight", "---!|Weightless" ]
appendageType = [ "a|A pair of arms", "f|A pair of fore-limbs (e.g. limbs that can be used as both arms and legs", "h|A head", "k|A crest", "l|A pair of legs", "p|A pair of paddles, flukes, or fins", "t|A tail", "v|A pair of horns or spines on the head", "w|A pair of wings", "w'|A pair of wings that also act as arms, legs, or fore-limbs" ]
appendageMod = [ "^|Appendage ends in a webbed hand or foot.", "+|One more than normal", "-|One less than normal", "!|I have many of these", "<number>|I have this many of these", "~|Variable" ]
skinType = [ "", "b|Bark", "c|Cellulose", "e|Exoskeleton (shells, calcium carbonate)", "f|feathers", "h|Hide", "k|Skin", "l|Leather", "m|Metal", "r|Rock (Stone)", "s|Scales", "u|Fur", "x|Crystals", "|None (just bones)" ]
appendageThatCanHaveDifferentSkin = [ "a|Arms", "b|Belly", "h|Head", "l|Legs", "n|Neck", "t|Tail", "w|Wings" ]
color = [ "ag|Silver (Argent)", "am|Amber", "aq|Aquamarine", "au|Gold", "bk|Black", "bl|Blue", "br|Brown", "bs|Brass", "bz|Bronze", "ch|Chromium", "cu|Copper (Cuprum)", "cy|Cyan", "eb|Ebony", "fs|Flesh (Human)", "gr|Green", "gy|Gray", "hg|Mercury/Quicksilver (Hydrargyrum)", "in|Indigo", "iv|Ivory", "ja|Jade", "ma|Magenta", "mv|Mauve", "or|Orange", "pi|Pink", "pu|Purple", "rb|Rainbow (violet, indigo, blue, green, yellow, orange, red)", "re|Red", "ta|Tan", "tu|Turquoise", "mb|Umber", "vi|Violet", "wh|White", "ye|Yellow", "~|Chameleonic", "?|Unknown", "|Colorless (ice, crystal, or invisible creatures" ]
colorMod1 = [ "+", "-", "^", "_", "'", "%", "!" ]
colorMod2 = [ "|", "=", ":", "*", "@", "\\", "/", "#", "&", "&1", ">" ]
def decode( self, coded ):
"Accepts a single string as the argument. Processes it. Returns a tuple, the first part of which is a Boolean indicating success and the second part of which is a string, the result of the decoding (also contains any relevant error messages)"
if( type( coded ) is not str ):
return False
coded = coded.strip()
tags = coded.split( sep=" " );
result = str();
versionIDAndSpeciesTag = tags[ 0 ]
#Version identifier - currently must be "DC2."
print ( 'current trace function', sys.gettrace() )
versionID = versionIDAndSpeciesTag.partition( "." )[ 0 ].upper(); #In case we want to deal with multiple versions in some future update
speciesTag = versionIDAndSpeciesTag.partition( "." )[ 2 ]
if( versionID != "DC2" ):
result = "Code must start with version identifier"
print( result, file = sys.stderr )
return ( False, result );
else:
print( "blah" )
return ( True, result );
#Split this into a separate function so the GUI can display a list of appendages
def encodeAppendage( self, appendageTypeNum, webbed, oneMore, oneLess, many, thisMany, thisManyNum, variable ):
result = ""
result += self.appendageType[ appendageTypeNum ][ :self.appendageType[ appendageTypeNum ].find( "|" ) ]
if( webbed ):
result += self.appendageMod[ 0 ][ :self.appendageMod[ 0 ].find( "|" ) ]
if( oneMore ):
result += self.appendageMod[ 1 ][ :self.appendageMod[ 1 ].find( "|" ) ]
if( oneLess ):
result += self.appendageMod[ 2 ][ :self.appendageMod[ 2 ].find( "|" ) ]
if( many ):
result += self.appendageMod[ 3 ][ :self.appendageMod[ 3 ].find( "|" ) ]
if( thisMany ):
result += str( thisManyNum )
if( variable ):
result += self.appendageMod[ 5 ][ :self.appendageMod[ 5 ].find( "|" ) ]
return result
def encode( self, version, speciesNum, subSpeciesNum, subSubSpeciesNum, subSubSubSpeciesNum, genderNum, lengthType, lengthNum, lengthUnitNum, lengthModifiers, widthNum, weightType, weightNum, weightUnitNum, appendages, mainSkinTypeNum, appendageSkins ):
"Accepts several arguments. Processes them into a string. Invalid arguments are ignored."
result = "DC"
if( version == 2 ):
#DC version
result += str( version )
result += "."
#Species, Subspecies, Subsubspecies, Subsubsubspecies
if( speciesNum >= 0 ):
result += self.species[ speciesNum ][ :1 ]
if( subSpeciesNum >= 0 ):
result += self.subSpecies[ speciesNum ][subSpeciesNum][ :1 ]
if( subSubSpeciesNum >= 0 ):
result += self.subSubSpecies[ speciesNum ][ subSpeciesNum ][ subSubSpeciesNum ][ :1 ]
if( subSubSubSpeciesNum >= 0 ):
result += self.subSubSubSpecies[ speciesNum ][ subSpeciesNum ][ subSubSpeciesNum ][ subSubSubSpeciesNum ][ :1 ]
#Gender
if( self.gender[ genderNum ] != "" ):
result += " G"
result += self.gender[ genderNum ][ :1 ]
#Length
if( lengthType != 0 ): #0 is 'Unspecified'
result += " L"
if( lengthType == 1 ): #Order of Magnitude
result += self.lengthMagnitude[ lengthNum ][ :self.lengthMagnitude[ lengthNum ].find( "|" ) ]
else: #Exact measure
result += str( lengthNum )
result += self.lengthUnit[ lengthUnitNum ][ :self.lengthUnit[ lengthUnitNum ].find( "|" ) ]
for lm in lengthModifiers:
if( len( lm ) == 2 ):
result += str( lm[ 0 ] )
result += self.lengthMod[ lm[ 1 ] ][ :self.lengthMod[ lm[ 1 ] ].find( "|" ) ]
else:
print( "Error: lengthModifiers must be a list of 2-tuples.", file=sys.stderr )
#Width
if( self.width[ widthNum ] != "" ):
result += " W"
result += self.width[ widthNum ][ :self.width[ widthNum ].find( "|" ) ]
#Weight
if( weightType != 0 ):
result += " T"
if( weightType == 1 ): #Order of Magnitude
result += self.weightMagnitude[ weightNum ][ :self.weightMagnitude[ weightNum ].find( "|" ) ]
else: #Exact measure
result += str( weightNum )
result += self.weightUnit[ weightUnitNum ][ :self.weightUnit[ weightUnitNum ].find( "|" ) ]
#Appendages
if( len( appendages ) > 0 ):
result += " P"
for a in appendages:
result += a
#Skins
if( self.skinType[ mainSkinTypeNum ] != "" ):
result += " Sk"
result += self.skinType[ mainSkinTypeNum ][ :self.skinType[ mainSkinTypeNum ].find( "|" ) ]
for askin in appendageSkins:
result += ","
if( len( askin ) == 2 ):
result += self.appendageThatCanHaveDifferentSkin[ askin[ 0 ] ][ :self.appendageThatCanHaveDifferentSkin[ askin[ 0 ] ].find( "|" ) ]
result += self.skinType[ askin[ 1 ] ][ :self.skinType[ askin[ 1 ] ].find( "|" ) ]
else:
print( "Error: appendageSkins must be a list of 2-tuples.", file=sys.stderr )
elif( version > 2 ):
result += str( version )
result += "."
return result
|
dearingj/PyDC
|
PyDC_backend.py
|
Python
|
gpl-3.0
| 12,874
|
[
"Amber",
"CRYSTAL"
] |
15561357dbe9e4ad9cf2aadc072903b53e19c1c966cc959d7f0a9bcbfd59250c
|
import vtk
from vtk.util.colors import *
import mabdi
import logging
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)s %(module)s @ %(funcName)s: %(message)s")
"""
Script to test SourceEnvironmentTable
SourceEnvironmentTable creates vtkPolyData based on an environment
with a floor, table, and two cups. Also, methods to add and
remove these objects.
"""
source = mabdi.SourceEnvironmentTable()
source.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
renWin.AddRenderer(ren)
ren.Render()
iren.SetRenderWindow(renWin)
ren.AddActor(actor)
def user_event_callback(obj, env):
logging.debug('')
# toggle state
if not hasattr(user_event_callback, "state"):
user_event_callback.state = True # it doesn't exist yet, so initialize it
user_event_callback.state = not user_event_callback.state
source.set_object_state(object_name='floor',
state=not user_event_callback.state)
source.set_object_state(object_name='table',
state=user_event_callback.state)
source.set_object_state(object_name='left_cup',
state=not user_event_callback.state)
source.set_object_state(object_name='right_cup',
state=user_event_callback.state)
source.Modified()
iren.Render()
iren.AddObserver('UserEvent', user_event_callback)
iren.Initialize()
iren.Start()
|
lucasplus/MABDI
|
scripts/TestSourceEnvironmentTable.py
|
Python
|
bsd-3-clause
| 1,630
|
[
"VTK"
] |
8a42f803f09661e8d17e75fa2ae9c9c28bbaf10180b6d297d43d42e8480aa952
|
from datascope_tools.iterators import itergrams, rank_sorted
def test_itergrams():
bunga = ['a', 'b', 'c', 'd']
# check with a couple of different sizes
assert list(itergrams(bunga, 2)) == [('a', 'b'), ('b', 'c'), ('c', 'd')]
assert list(itergrams(bunga, 3)) == [('a', 'b', 'c'), ('b', 'c', 'd')]
# check with the pad argument
assert list(itergrams(bunga, 2, pad=True)) == \
[('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', None)]
# make sure it works with a generator, not just a list
bunga = iter(bunga)
assert list(itergrams(bunga, 2)) == [('a', 'b'), ('b', 'c'), ('c', 'd')]
def test_rank_sorted():
bunga = ['a', 'c', 'b', 'd']
assert list(rank_sorted(bunga)) == [(1, 'd'), (2, 'c'), (3, 'b'), (4, 'a')]
# try with reverse
assert list(rank_sorted(bunga, reverse=False)) == \
[(1, 'a'), (2, 'b'), (3, 'c'), (4, 'd')]
# now put in some ties
bunga = [95, 95, 90, 85]
assert list(rank_sorted(bunga)) == [(1, 95), (1, 95), (3, 90), (4, 85)]
def test_rank_sorted_with_key():
import operator
scores = [
('alex', 100),
('bo', 100),
('brian', 100),
('dean', 10),
('horatio', 90),
('irmak', 100),
('jess', 100),
('karlota', 90),
('melba', 90),
('meridith', 100),
('michael', 100),
('mike', 100),
('mollie', 100),
('vlad', 100),
]
answer = [
(1, ('alex', 100)),
(1, ('bo', 100)),
(1, ('brian', 100)),
(1, ('irmak', 100)),
(1, ('jess', 100)),
(1, ('meridith', 100)),
(1, ('michael', 100)),
(1, ('mike', 100)),
(1, ('mollie', 100)),
(1, ('vlad', 100)),
(11, ('horatio', 90)),
(11, ('karlota', 90)),
(11, ('melba', 90)),
(14, ('dean', 10)),
]
assert list(rank_sorted(scores, key=operator.itemgetter(1))) == answer
|
datascopeanalytics/datascope-tools
|
tests/test_iterators.py
|
Python
|
mit
| 1,942
|
[
"Brian"
] |
ccd1566b49b4e7c5b0a0a102f7c41367ee8514138959ddf9b8be3aad5d9fcc25
|
"""
Notes/Code for:
<jstor>
The Cadence of English Oratorical Prose
Author(s): Morris W. Croli
Source: Studies in Philology, Vol. 16, No. 1 (Jan., 1919), pp. 1-55
Published by: University of North Carolina Press
Stable URL: http://www.jstor.org/stable/4171740 .
Accessed: 05/07/2011 14:54
</jstor>
Mr.J Shelly, "Church Quarterly Review":
in English Book of Common Prayer:
~50% of all collects end in one of three forms of cursus
~54% of all Sunday collects end in one of three forms of cursus
"""
cursus={
'planus1':{
'croll':(5,2),
'cicero':'cretic-trochee',
'eg':['help and defend us']
},
'planus2':{
'croll':(6,2),
'cicero':'peon-trochee',
'eg':['[suppli]CAtions of thy PEOple', 'WRItten for our LEArning']
},
'tardus':{
'croll':(6,3),
'cicero':'dicretic',
'eg':['GOVerned and SANCtified','[vo]CAtion and MINistry']
},
'velox':{
'croll':(7,4,2),
'cicero':'cretic-ditrochee',
'eg':['PUNished for OUR ofFENses']
},
'trispondaic':{
'croll':(9,6,4,2),
'cicero':'',
'eg':['proFItable to OUR salVAtion', 'PASS to our JOYful RESurRECtion', 'such good things as pass man\'s understanding', 'in the midst of so many and great dangers',]
}
}
"""
According to medieval theory, the cursus was used at tile ends
of the commata, cola, and periodus (or conclusio), the parts, large
or small, of which a rhetorical period is constructed. (4)
In practice, I will try to show, it was not used
only in the final positions.
One is to include in the number all rhetorical
divisions of a period (all the Sunday collects consist of a single
period, that is, a single articulated sentence), however short, all
the commata and cola, that is, which according to Latin rule might
have cursus-endings.
The only other natural process is
to count those divisions of a prayer which are indicated by semi-
colons in the authorized editions.
There would be advantages in each method; but I choose the latter,
the method of counting the pauses marked by semi-colons, because
it eliminates the necessity of doubtful decisions.
The result, it must be said, is not favorable to Shelly's conclu-
sions. Of the 113 endings occurring at the places described only
43, or 38%o
of the whole number, are in the three forms, according
to the strictest possible interpretation of the requirements of these
forms; only 45, or 40%, according to the freest interpretation of
them.9
the cadences occur in English where there are
none in Latin and vice versa.
The purpose of the two following chapters
of this paper will be to show that he has limited too narrowly the
area in which we may properly look for the influence of the cursus
in the Collects, in the first place in his study of the forms of English
cadence, and in the second place in his ideas concerning the places
where cadence may occur.
There are three variations of the regular Latin forms which
would be most likely to appear with frequency if the translators
worked in the free way we have described.
1. The endinog
velox would easily become 8 -4 -2 in English,
and woitud
not lose its essential character in so doing. Some ex-
amples are:-carry us through all temptations (4th Sun. aft. Ep.);
defended by thy mighty power (5th Sun. aft. Ep.); partakers of
thy resurrection (Sun. bef. East.); the weakness of our mortal
nature (Tr. Sun.); declarest thy almighty power (1lth Sun. aft.
Tr.); continually to be given (17th Sun. aft. Tr.).
2. Velox again could be modified by the addition of a light
syllable at the end, the form tlhus becoming 8-5-3 instead of
7 - 4- 2, or 9 - 5 - 3 instead of 8 - 4 - 2. This is a very common
ending:-defend us from all adversities (Tr. Sun.) ; serve thee in
all godly quietness (5th Sun. aft. Tr.) ; return into the way of
righteousness (3d Sun. aft. East.) ; always prevent and follow us 14
(17th Sun. aft. Tr.) ; visit us in great humility (lst Sun. in Adv.);
the example of his great humility (Sun. bef. East.); our defence
against all our enemies (3d Sun. in Lent); protection of thy good
Providence (2d Sun. aft. Tr.); hearts of the disobedient (3d Sun.
in Adv.).
3. Tardus would often become 7 - 3 instead of 6 - 3. This is
in fact the commoner form, I believe, in elevated prose; and cer-
tainly some of the most beautiful phrases in the prayer-book owe
their character to it. Examples are:-several necessities (All
Cond. of Men); dangers and adversities (3d Sun. aft. Tr.);
troubles and adversities (Collect in the Litany); free from all ad-
versities (22d Sun. aft. Tr.); acknowledging our wretchedness
(Ash-Wed.); ordered by thy governance (5th Sun. aft. Tr.);
never-failing Providence (8th Sun. aft. Tr.).
What are the reasons then for accepting this principle; that is,
for expecting the three variations, and perhaps still others, to
appear in English as equivalents of the regular Latin forms?
There are two, both derived from differences between the two lan-
guages: the first from a difference in the character of their words,
the second from a difference in their metrical character and cus-
toms.
I. English is far less polysyllabic than Latin. It had been so
even in its classical Anglo-Saxon form, in the period when Anglo-
Saxon was enjoying its highest courtly ana literary cultivation;
and with the loss of inflections which attended its rapid decline
The only point to be made here, however, is the more general one
that in as far as this process of Latinization of the vocabulary had
gone on it was possible to have the cadences in English,-and no
furtlher. Native English was not of a character to lend itself to
them, aind it had become still more foreign to them during the
period of its decline.
[SECONDARY STRESS:]
This point may first be illustrated by a rather full consideration
of velox. This form is very inadequately represented by the for-
mula 7-4-2, _uuvu-- u for it is of its essence that the accent
on 4 shall be subordinate to that on 2, and the characteristic case
of it is that in which it ends in a four- or five-syllable word, with
the main accent on the penult, and hence (according to Latin rule)
a subordinate accent on the second syllable preceding. Thus-
et ad implenda quae viderint convalescant (lst Sun. aft. Ep.);
misericorditer liberemur (Sept. Sun).
[LATIN WORDS:]
And,
moreover, it is to be observed that there were not so many of them
in the middle of the sixteentlh century as thlere
are now. Not many
can be gathered from the prayer-book
itself :-confirmation, media-
tion, resurrection, supplications, satisfaction, regeneration, circum-
cision, advantageous and a few others, nearly all words in -ion or
else words that are not likely to occur at the ends of phrases.'
[VELOX]
Again: In the case of the two-word phrases ending a velox, as
mortal nature, faithful servants, etc., there is a departure from
the exact Latin effect, but in the opposite direction from that just
mentioned. That is, there is here a tendency to put too strong an
accent on the adjective, and hence to give too mueh importance to
the minor accent of the cadence
This effect will not be produced, however, if the last accent of the
cadence is followed by two unaccented syllables instead of by one,
because the lengthening of this unaccented part of the period has
the effect of strengthening its accent, and the minor accent of the
preceding period is thus relatively reduced. Defend us from all
adversities, our defence against all our enemies, and serve thee in
all godly quietness are better reproductions of velox than phrases
of the form 7 - 4 - 2 would be in their places.
The syllable-counting custom of medieval Latin gives a
definite inalterable value to each unaccented syllable of a metrical
unit; and a slight difference between the number of such syllables
in one part of a cadence and another, between the two of the first
period of velox, for instance, and the one of each of its other
periods, may be depended upon to produce an effect and establish a
desired relation between the parts.
It follows that English cadence can never be pro-
erly described by a numerical system, and that it can never produce
the same effect as the Latin cadence unless it is allowed a certain
freedom in its use of unaccented syllables.
[THE PERIOD]
1. There is no better definition of the period than Hobbes' curt
translation of Aristotle in his Brief of the Art of Rhetorick
(1681) : 26 " A period is such a part as is perfect in itself, and has
such length as may easily be comprehended by the understanding."
Aristotle's statement in full (Rhetoric, iii, ch. 9) is as follows:
"I call a period a form of words which has independently in itself
a beginning and ending, and a length easily taken in at a glance."
not a syntactic or logical unit, but on the one hand a psycho-
logical, and on the other a rhythmical, unit.28
2. The parts of a divided period are called members (membra)
or cola (in medieval Latin also distinctiones or versus), and the
number of these that may constitute a period is undefined
The " harmony," " number," or " rhythm " of a period depends
chiefly upon the relations between the members of which it consists:
relations of length, form, and sound.
3. Some theorists give a place in the doctrine of the period to a
phenomenon which is very frequent in every oratorical style in
which there is a certain amplitude and dignity, namely, the com-
bination of two members, related to each other syntactically in
certain ways, to form a larger unit within the period. This double
unit, consisting of two members, is called a phrase
Unless there are at least two phrases,
balanced in form, we may describe the period as consisting merely
of members.
4. A colon of a certain length may fall into two (sometimes even
three) parts in utterance, the division between them being indicated
by a pause shorter than that at the end of a colon. One of these
parts, which, however, like the phrases, never occur singly, is called
a comma (caesum, incisum, or sometimes in medieval Latin sub-
distinctio). The division of the colon into commata is not con-
nected apparently with the physiological process of breathing, or at
least is not primarily due to this, but is chiefly the effect of a law
of beauty of sound which seems to demand such a break
It corresponds, that is, to the division of the
line made by the cesura in formal verse.80
The fact is that the neglect of this study has
been due to the tendency to avoid the oratorical models on which
all the theory of rhetoric is formed, and to consider prose chiefly
as it is addressed to the intellect, rather than as language spoken
and heard. The characteristic prose of the nineteenth century
has been the essay, rather than the address; and even in the eigh-
teenth century, the great authority of the Addisonian model of
style, especially as it was described in Blair's widely-used rhetoric,
tended to outweigh the influence of Johnson, Gibbon, Burke, Rob-
ertson, and other writers of the latter part of the century, who
wrote the more copious and sonorous language of oratory.
The English Collects themselves are the best possible corpus for
such an experiment, first, because they fulfill ideally the conditions
of an oral prose, and secondly, because they are made in close
rhetorical imitation of Latin models in which the formal rules of
the period were observed.
We may be sure that a science of the rhythmic period will
never be discovered. And if it is true that even in our older prose,
composed in the regular manner of the rhetorical tradition, we
often find it necessary to defend by an appeal to personal preference
our choice of this or that reading, it is certain that the reader will
find an ever-widening range for the exercise of his artistic gifts of
interpretation as he approaches the prose of our own time.
[PHRASES!]
The end of any phrase felt as having a unitary character may
be cadenced, whether or not it coincides with the end of one of the
divisions of a period.
1. A very simple type is that which consists of a noun preceded
by its adjective.
2. More interesting is the phrase in which two words, often
synonyms, are connected by and
3. The prepositional phrase, that is, a noun, adjective, or verb
with a prepositional modifier following it, is an equally common
form:
The point urged in this section of my paper certainly does not
tend to simplify the subject of cadence. It tends rather to blur
and disarrange some of the definite lines that have been drawn
about it heretofore.
The same remark may be made, indeed, about
the preceding section; for the doctrine of the period, though it
seems to be the only trustworthy guide through the uncertainties
of cadence-occurrence,
is itself full of uncertainties, difficulties, and
problems
When velox may vary in length from
seven to ten, or even more syllables, and its later accents move about
as freely as we have been asserting they may, the method of scansion
becomes absurd. It is true that a table might still be made of the
forms that produce the required effect, and those that do not, but
it is far simpler to state general rules which will allow for all the
varieties of forms that we have discovered.
The rules, then., are as follows:
1. The English cadence ordinarily begins on one of the syllables
five to ten, counting from the end. It never begins later than the
fifth, but sometimes the long cadence may begin as far back as the
eleventh syllable, as in 11 - 7 - 3, or even on the twelfth, as in
12 - 8 -4. These are, however, extreme cases.
2. The first accent is the strongest in the cadence, as marking
its beginning. It is the climax as to height of pitch and strength
of accent of the member in which the cadence occurs, and indicates
the point at which the tendency to rhythmical form always observ-
able in oratory, but restrained earlier in the phrase by the necessities
of logical statement, is finally allowed to appear without check.
3. At this point a trochaic movement begins which carries
through to the end of the phrase and cadence. The trochaic move-
ment of the English cadence is alone enough to mark the influence
of the classical cadences upon it, for it is not the nature of English
prose, except under this influence, to keep to the same movement
(rising or falling) throughout a phrase.
It inclines to shift from
one to the other, and perhaps prefers, on the whole, to end in a
rising movement rather than a falling one.
4. Each cadence has two accents, of which the first is stronger
than the second, and is followed by a greater number of unaccented
syllables, or by an equal number of syllables which makes the effect
of being greater, than the second. Stated differently, this law is
that there is an effect of decreasing length of period and strength
of accent from the beginning of a cadence to the end
5. If the number of syllables following an accent exceeds three
a secondary subsidiary accent appears. This rule applies in practice
only to the period of the first accent because if the second period
contained more than four syllables it could not seem shorter than
the first (see rule 4); that is, this rule explains the form of the long
cadence
Velox in
Latin is a binary rhythm, the accent on 4 being only of importance
as serving to prop up or carry on the long run of syllables between
the accent on 7 and the accent on 2.
If they are
observed with the utmost freedom allowable to English rythmical
custom, they still produce cadences which have the essential rhyth-
mical-though not the exact metrical-character of these three
Latin cadences.
Cadence, then, is perhaps the euphonious way of accompanying
in speech this natural fall or subsidence of energy.
Since we have drifted so
far from these actual metrical schemes in following the facts of
English practice, is it not safer to assume that the rules merely
describe a necessary and universal tendency of oratorical style, and
that the frequent occurrence in English of the exact metrical form
of the Latin cursus is due, not to medieval tradition, but to the
fact that these forms are the perfect and simplest manifestation of
this tendency?
"""
|
quadrismegistus/litlab-poetry
|
prosodic/croll/croll.py
|
Python
|
mit
| 16,415
|
[
"VisIt"
] |
44611fa5e849d43dcc63fa42a652c06354b5640f9775c45f59095f1c6cace687
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS Instructor Dashboard.
"""
import time
from nose.plugins.attrib import attr
from bok_choy.promise import EmptyPromise
from ..helpers import UniqueCourseTest, get_modal_alert, EventsTestMixin
from ...pages.common.logout import LogoutPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.studio.overview import CourseOutlinePage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.lms.dashboard import DashboardPage
from ...pages.lms.problem import ProblemPage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
class BaseInstructorDashboardTest(EventsTestMixin, UniqueCourseTest):
"""
Mixin class for testing the instructor dashboard.
"""
def log_in_as_instructor(self):
"""
Logs in as an instructor and returns the id.
"""
username = "test_instructor_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username, course_id=self.course_id, staff=True)
return username, auto_auth_page.visit().get_user_id()
def visit_instructor_dashboard(self):
"""
Visits the instructor dashboard.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
return instructor_dashboard_page
@attr('shard_5')
class AutoEnrollmentWithCSVTest(BaseInstructorDashboardTest):
"""
End-to-end tests for Auto-Registration and enrollment functionality via CSV file.
"""
def setUp(self):
super(AutoEnrollmentWithCSVTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.auto_enroll_section = instructor_dashboard_page.select_membership().select_auto_enroll_section()
def test_browse_and_upload_buttons_are_visible(self):
"""
Scenario: On the Membership tab of the Instructor Dashboard, Auto-Enroll Browse and Upload buttons are visible.
Given that I am on the Membership tab on the Instructor Dashboard
Then I see the 'REGISTER/ENROLL STUDENTS' section on the page with the 'Browse' and 'Upload' buttons
"""
self.assertTrue(self.auto_enroll_section.is_file_attachment_browse_button_visible())
self.assertTrue(self.auto_enroll_section.is_upload_button_visible())
def test_clicking_file_upload_button_without_file_shows_error(self):
"""
Scenario: Clicking on the upload button without specifying a CSV file results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I click the Upload Button without specifying a CSV file
Then I should be shown an Error Notification
And The Notification message should read 'File is not attached.'
"""
self.auto_enroll_section.click_upload_file_button()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "File is not attached.")
def test_uploading_correct_csv_file_results_in_success(self):
"""
Scenario: Uploading a CSV with correct data results in Success.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with correct data and click the Upload Button
Then I should be shown a Success Notification.
"""
self.auto_enroll_section.upload_correct_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_SUCCESS))
def test_uploading_csv_file_with_bad_data_results_in_errors_and_warnings(self):
"""
Scenario: Uploading a CSV with incorrect data results in error and warnings.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with incorrect data and click the Upload Button
Then I should be shown an Error Notification
And a corresponding Error Message.
And I should be shown a Warning Notification
And a corresponding Warning Message.
"""
self.auto_enroll_section.upload_csv_file_with_errors_warnings()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Data in row #2 must have exactly four columns: email, username, full name, and country")
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_WARNING))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_WARNING), "ename (d@a.com): (An account with email d@a.com exists but the provided username ename is different. Enrolling anyway with d@a.com.)")
def test_uploading_non_csv_file_results_in_error(self):
"""
Scenario: Uploading an image file for auto-enrollment results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I select an image file (a non-csv file) and click the Upload Button
Then I should be shown an Error Notification
And The Notification message should read 'Make sure that the file you upload is in CSV..'
"""
self.auto_enroll_section.upload_non_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Make sure that the file you upload is in CSV format with no extraneous characters or rows.")
class ProctoredExamsTest(BaseInstructorDashboardTest):
"""
End-to-end tests for Proctoring Sections of the Instructor Dashboard.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(ProctoredExamsTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
course_fixture = CourseFixture(**self.course_info)
course_fixture.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
).install()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(
self.browser, self.course_id, entry_point='verify-now'
)
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
self.problem_page = ProblemPage(self.browser)
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate',
min_price=10, suggested_prices='10,20'
).visit()
# Auto-auth register for the course.
self._auto_auth(self.USERNAME, self.EMAIL, False)
def _auto_auth(self, username, email, staff):
"""
Logout and login with given credentials.
"""
AutoAuthPage(self.browser, username=username, email=email,
course_id=self.course_id, staff=staff).visit()
def _login_as_a_verified_user(self):
"""
login as a verififed user
"""
self._auto_auth(self.USERNAME, self.EMAIL, False)
# the track selection page cannot be visited. see the other tests to see if any prereq is there.
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
def _create_a_proctored_exam_and_attempt(self):
"""
Creates a proctored exam and makes the student attempt it so that
the associated allowance and attempts are visible on the Instructor Dashboard.
"""
# Visit the course outline page in studio
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
#open the exam settings to make it a proctored exam.
self.course_outline.open_exam_settings_dialog()
self.course_outline.make_exam_proctored()
time.sleep(2) # Wait for 2 seconds to save the settings.
# login as a verified student and visit the courseware.
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
# Start the proctored exam.
self.courseware_page.start_proctored_exam()
def _create_a_timed_exam_and_attempt(self):
"""
Creates a timed exam and makes the student attempt it so that
the associated allowance and attempts are visible on the Instructor Dashboard.
"""
# Visit the course outline page in studio
LogoutPage(self.browser).visit()
self._auto_auth("STAFF_TESTER", "staff101@example.com", True)
self.course_outline.visit()
#open the exam settings to make it a proctored exam.
self.course_outline.open_exam_settings_dialog()
self.course_outline.make_exam_timed()
time.sleep(2) # Wait for 2 seconds to save the settings.
# login as a verified student and visit the courseware.
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
# Start the proctored exam.
self.courseware_page.start_timed_exam()
def test_can_add_remove_allowance(self):
"""
Make sure that allowances can be added and removed.
"""
# Given that an exam has been configured to be a proctored exam.
self._create_a_proctored_exam_and_attempt()
# When I log in as an instructor,
self.log_in_as_instructor()
# And visit the Allowance Section of Instructor Dashboard's Proctoring tab
instructor_dashboard_page = self.visit_instructor_dashboard()
allowance_section = instructor_dashboard_page.select_proctoring().select_allowance_section()
# Then I can add Allowance to that exam for a student
self.assertTrue(allowance_section.is_add_allowance_button_visible)
def test_can_reset_attempts(self):
"""
Make sure that Exam attempts are visible and can be reset.
"""
# Given that an exam has been configured to be a proctored exam.
self._create_a_timed_exam_and_attempt()
# When I log in as an instructor,
self.log_in_as_instructor()
# And visit the Student Proctored Exam Attempts Section of Instructor Dashboard's Proctoring tab
instructor_dashboard_page = self.visit_instructor_dashboard()
exam_attempts_section = instructor_dashboard_page.select_proctoring().select_exam_attempts_section()
# Then I can see the search text field
self.assertTrue(exam_attempts_section.is_search_text_field_visible)
# And I can see one attempt by a student.
self.assertTrue(exam_attempts_section.is_student_attempt_visible)
# And I can remove the attempt by clicking the "x" at the end of the row.
exam_attempts_section.remove_student_attempt()
self.assertFalse(exam_attempts_section.is_student_attempt_visible)
@attr('shard_5')
class EntranceExamGradeTest(BaseInstructorDashboardTest):
"""
Tests for Entrance exam specific student grading tasks.
"""
def setUp(self):
super(EntranceExamGradeTest, self).setUp()
self.course_info.update({"settings": {"entrance_exam_enabled": "true"}})
CourseFixture(**self.course_info).install()
self.student_identifier = "johndoe_saee@example.com"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username="johndoe_saee",
email=self.student_identifier,
course_id=self.course_id,
staff=False
).visit()
LogoutPage(self.browser).visit()
# go to the student admin page on the instructor dashboard
self.log_in_as_instructor()
self.student_admin_section = self.visit_instructor_dashboard().select_student_admin()
def test_input_text_and_buttons_are_visible(self):
"""
Scenario: On the Student admin tab of the Instructor Dashboard, Student Email input box,
Reset Student Attempt, Rescore Student Submission, Delete Student State for entrance exam
and Show Background Task History for Student buttons are visible
Given that I am on the Student Admin tab on the Instructor Dashboard
Then I see Student Email input box, Reset Student Attempt, Rescore Student Submission,
Delete Student State for entrance exam and Show Background Task History for Student buttons
"""
self.assertTrue(self.student_admin_section.is_student_email_input_visible())
self.assertTrue(self.student_admin_section.is_reset_attempts_button_visible())
self.assertTrue(self.student_admin_section.is_rescore_submission_button_visible())
self.assertTrue(self.student_admin_section.is_delete_student_state_button_visible())
self.assertTrue(self.student_admin_section.is_background_task_history_button_visible())
def test_clicking_reset_student_attempts_button_without_email_shows_error(self):
"""
Scenario: Clicking on the Reset Student Attempts button without entering student email
address or username results in error.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment without enter an email address
Then I should be shown an Error Notification
And The Notification message should read 'Please enter a student email address or username.'
"""
self.student_admin_section.click_reset_attempts_button()
self.assertEqual(
'Please enter a student email address or username.',
self.student_admin_section.top_notification.text[0]
)
def test_clicking_reset_student_attempts_button_with_success(self):
"""
Scenario: Clicking on the Reset Student Attempts button with valid student email
address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_reset_attempts_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_reset_student_attempts_button_with_error(self):
"""
Scenario: Clicking on the Reset Student Attempts button with email address or username
of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment after non existing student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_reset_attempts_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_rescore_submission_button_with_success(self):
"""
Scenario: Clicking on the Rescore Student Submission button with valid student email
address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Rescore Student Submission Button under Entrance Exam Grade
Adjustment after entering a valid student email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_rescore_submissions_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_rescore_submission_button_with_error(self):
"""
Scenario: Clicking on the Rescore Student Submission button with email address or username
of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Rescore Student Submission Button under Entrance Exam Grade
Adjustment after non existing student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_rescore_submissions_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_skip_entrance_exam_button_with_success(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
valid student email address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_skip_entrance_exam_button()
#first we have window.confirm
alert = get_modal_alert(self.student_admin_section.browser)
alert.accept()
# then we have alert confirming action
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_skip_entrance_exam_button_with_error(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
email address or username of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering non existing
student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_skip_entrance_exam_button()
#first we have window.confirm
alert = get_modal_alert(self.student_admin_section.browser)
alert.accept()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_delete_student_attempts_button_with_success(self):
"""
Scenario: Clicking on the Delete Student State for entrance exam button
with valid student email address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Delete Student State for entrance exam Button
under Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_delete_student_state_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_delete_student_attempts_button_with_error(self):
"""
Scenario: Clicking on the Delete Student State for entrance exam button
with email address or username of a non existing student should result
in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Delete Student State for entrance exam Button
under Entrance Exam Grade Adjustment after non existing student
email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_delete_student_state_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_task_history_button_with_success(self):
"""
Scenario: Clicking on the Show Background Task History for Student
with valid student email address or username should result in table of tasks.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Show Background Task History for Student Button
under Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an table listing all background tasks
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_task_history_button()
self.assertTrue(self.student_admin_section.is_background_task_history_table_visible())
class DataDownloadsTest(BaseInstructorDashboardTest):
"""
Bok Choy tests for the "Data Downloads" tab.
"""
def setUp(self):
super(DataDownloadsTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.instructor_username, self.instructor_id = self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.data_download_section = instructor_dashboard_page.select_data_download()
def verify_report_requested_event(self, report_type):
"""
Verifies that the correct event is emitted when a report is requested.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.requested', 'report_type': report_type}
)
def verify_report_downloaded_event(self, report_url):
"""
Verifies that the correct event is emitted when a report is downloaded.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.downloaded', 'report_url': report_url}
)
def verify_report_download(self, report_name):
"""
Verifies that a report can be downloaded and an event fired.
"""
download_links = self.data_download_section.report_download_links
self.assertEquals(len(download_links), 1)
download_links[0].click()
expected_url = download_links.attrs('href')[0]
self.assertIn(report_name, expected_url)
self.verify_report_downloaded_event(expected_url)
def test_student_profiles_report_download(self):
"""
Scenario: Verify that an instructor can download a student profiles report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Download profile information as a CSV" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"student_profile_info"
self.data_download_section.generate_student_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"grade_report"
self.data_download_section.generate_grade_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_problem_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a problem grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Problem Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"problem_grade_report"
self.data_download_section.generate_problem_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
@attr('shard_5')
class CertificatesTest(BaseInstructorDashboardTest):
"""
Tests for Certificates functionality on instructor dashboard.
"""
def setUp(self):
super(CertificatesTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.certificates_section = instructor_dashboard_page.select_certificates()
def test_generate_certificates_buttons_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Generate Certificates button is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
And the instructor-generation feature flag has been enabled
Then I see a 'Generate Certificates' button
And when I click on the 'Generate Certificates' button
Then I should see a status message and 'Generate Certificates' button should be disabled.
"""
self.assertTrue(self.certificates_section.generate_certificates_button.visible)
self.certificates_section.generate_certificates_button.click()
alert = get_modal_alert(self.certificates_section.browser)
alert.accept()
self.certificates_section.wait_for_ajax()
EmptyPromise(
lambda: self.certificates_section.certificate_generation_status.visible,
'Certificate generation status shown'
).fulfill()
disabled = self.certificates_section.generate_certificates_button.attrs('disabled')
self.assertEqual(disabled[0], 'true')
def test_pending_tasks_section_is_visible(self):
"""
Scenario: On the Certificates tab of the Instructor Dashboard, Pending Instructor Tasks section is visible.
Given that I am on the Certificates tab on the Instructor Dashboard
Then I see 'Pending Instructor Tasks' section
"""
self.assertTrue(self.certificates_section.pending_tasks_section.visible)
|
tiagochiavericosta/edx-platform
|
common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py
|
Python
|
agpl-3.0
| 29,971
|
[
"VisIt"
] |
74394b16b7ea216d7ecb09ca155aa5da6df6a3a04682a9c371107babf24a13d8
|
# Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# Copyright (c) 2010-20013 Advanced Micro Devices, Inc.
# Copyright (c) 2013 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Nathan Binkert
# Andreas Hansson
import sys
from types import FunctionType, MethodType, ModuleType
import m5
from m5.util import *
# Have to import params up top since Param is referenced on initial
# load (when SimObject class references Param to create a class
# variable, the 'name' param)...
from m5.params import *
# There are a few things we need that aren't in params.__all__ since
# normal users don't need them
from m5.params import ParamDesc, VectorParamDesc, \
isNullPointer, SimObjectVector, Port
from m5.proxy import *
from m5.proxy import isproxy
#####################################################################
#
# M5 Python Configuration Utility
#
# The basic idea is to write simple Python programs that build Python
# objects corresponding to M5 SimObjects for the desired simulation
# configuration. For now, the Python emits a .ini file that can be
# parsed by M5. In the future, some tighter integration between M5
# and the Python interpreter may allow bypassing the .ini file.
#
# Each SimObject class in M5 is represented by a Python class with the
# same name. The Python inheritance tree mirrors the M5 C++ tree
# (e.g., SimpleCPU derives from BaseCPU in both cases, and all
# SimObjects inherit from a single SimObject base class). To specify
# an instance of an M5 SimObject in a configuration, the user simply
# instantiates the corresponding Python object. The parameters for
# that SimObject are given by assigning to attributes of the Python
# object, either using keyword assignment in the constructor or in
# separate assignment statements. For example:
#
# cache = BaseCache(size='64KB')
# cache.hit_latency = 3
# cache.assoc = 8
#
# The magic lies in the mapping of the Python attributes for SimObject
# classes to the actual SimObject parameter specifications. This
# allows parameter validity checking in the Python code. Continuing
# the example above, the statements "cache.blurfl=3" or
# "cache.assoc='hello'" would both result in runtime errors in Python,
# since the BaseCache object has no 'blurfl' parameter and the 'assoc'
# parameter requires an integer, respectively. This magic is done
# primarily by overriding the special __setattr__ method that controls
# assignment to object attributes.
#
# Once a set of Python objects have been instantiated in a hierarchy,
# calling 'instantiate(obj)' (where obj is the root of the hierarchy)
# will generate a .ini file.
#
#####################################################################
# list of all SimObject classes
allClasses = {}
# dict to look up SimObjects based on path
instanceDict = {}
# Did any of the SimObjects lack a header file?
noCxxHeader = False
def public_value(key, value):
return key.startswith('_') or \
isinstance(value, (FunctionType, MethodType, ModuleType,
classmethod, type))
def createCxxConfigDirectoryEntryFile(code, name, simobj, is_header):
entry_class = 'CxxConfigDirectoryEntry_%s' % name
param_class = '%sCxxConfigParams' % name
code('#include "params/%s.hh"' % name)
if not is_header:
for param in simobj._params.values():
if isSimObjectClass(param.ptype):
code('#include "%s"' % param.ptype._value_dict['cxx_header'])
code('#include "params/%s.hh"' % param.ptype.__name__)
else:
param.ptype.cxx_ini_predecls(code)
if is_header:
member_prefix = ''
end_of_decl = ';'
code('#include "sim/cxx_config.hh"')
code()
code('class ${param_class} : public CxxConfigParams,'
' public ${name}Params')
code('{')
code(' private:')
code.indent()
code('class DirectoryEntry : public CxxConfigDirectoryEntry')
code('{')
code(' public:')
code.indent()
code('DirectoryEntry();');
code()
code('CxxConfigParams *makeParamsObject() const')
code('{ return new ${param_class}; }')
code.dedent()
code('};')
code()
code.dedent()
code(' public:')
code.indent()
else:
member_prefix = '%s::' % param_class
end_of_decl = ''
code('#include "%s"' % simobj._value_dict['cxx_header'])
code('#include "base/str.hh"')
code('#include "cxx_config/${name}.hh"')
if simobj._ports.values() != []:
code('#include "mem/mem_object.hh"')
code('#include "mem/port.hh"')
code()
code('${member_prefix}DirectoryEntry::DirectoryEntry()');
code('{')
def cxx_bool(b):
return 'true' if b else 'false'
code.indent()
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
code('parameters["%s"] = new ParamDesc("%s", %s, %s);' %
(param.name, param.name, cxx_bool(is_vector),
cxx_bool(is_simobj)));
for port in simobj._ports.values():
is_vector = isinstance(port, m5.params.VectorPort)
is_master = port.role == 'MASTER'
code('ports["%s"] = new PortDesc("%s", %s, %s);' %
(port.name, port.name, cxx_bool(is_vector),
cxx_bool(is_master)))
code.dedent()
code('}')
code()
code('bool ${member_prefix}setSimObject(const std::string &name,')
code(' SimObject *simObject)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if is_simobj and not is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
code('this->${{param.name}} = '
'dynamic_cast<${{param.ptype.cxx_type}}>(simObject);')
code('if (simObject && !this->${{param.name}})')
code(' ret = false;')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('bool ${member_prefix}setSimObjectVector('
'const std::string &name,')
code(' const std::vector<SimObject *> &simObjects)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if is_simobj and is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
code('this->${{param.name}}.clear();')
code('for (auto i = simObjects.begin(); '
'ret && i != simObjects.end(); i ++)')
code('{')
code.indent()
code('${{param.ptype.cxx_type}} object = '
'dynamic_cast<${{param.ptype.cxx_type}}>(*i);')
code('if (*i && !object)')
code(' ret = false;')
code('else')
code(' this->${{param.name}}.push_back(object);')
code.dedent()
code('}')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('void ${member_prefix}setName(const std::string &name_)'
'${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('this->name = name_;')
code('this->pyobj = NULL;')
code.dedent()
code('}')
if is_header:
code('const std::string &${member_prefix}getName()')
code('{ return this->name; }')
code()
code('bool ${member_prefix}setParam(const std::string &name,')
code(' const std::string &value, const Flags flags)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if not is_simobj and not is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
param.ptype.cxx_ini_parse(code,
'value', 'this->%s' % param.name, 'ret =')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('bool ${member_prefix}setParamVector('
'const std::string &name,')
code(' const std::vector<std::string> &values,')
code(' const Flags flags)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false) {')
for param in simobj._params.values():
is_vector = isinstance(param, m5.params.VectorParamDesc)
is_simobj = issubclass(param.ptype, m5.SimObject.SimObject)
if not is_simobj and is_vector:
code('} else if (name == "${{param.name}}") {')
code.indent()
code('${{param.name}}.clear();')
code('for (auto i = values.begin(); '
'ret && i != values.end(); i ++)')
code('{')
code.indent()
code('${{param.ptype.cxx_type}} elem;')
param.ptype.cxx_ini_parse(code,
'*i', 'elem', 'ret =')
code('if (ret)')
code(' this->${{param.name}}.push_back(elem);')
code.dedent()
code('}')
code.dedent()
code('} else {')
code(' ret = false;')
code('}')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('bool ${member_prefix}setPortConnectionCount('
'const std::string &name,')
code(' unsigned int count)${end_of_decl}')
if not is_header:
code('{')
code.indent()
code('bool ret = true;')
code()
code('if (false)')
code(' ;')
for port in simobj._ports.values():
code('else if (name == "${{port.name}}")')
code(' this->port_${{port.name}}_connection_count = count;')
code('else')
code(' ret = false;')
code()
code('return ret;')
code.dedent()
code('}')
code()
code('SimObject *${member_prefix}simObjectCreate()${end_of_decl}')
if not is_header:
code('{')
if hasattr(simobj, 'abstract') and simobj.abstract:
code(' return NULL;')
else:
code(' return this->create();')
code('}')
if is_header:
code()
code('static CxxConfigDirectoryEntry'
' *${member_prefix}makeDirectoryEntry()')
code('{ return new DirectoryEntry; }')
if is_header:
code.dedent()
code('};')
# The metaclass for SimObject. This class controls how new classes
# that derive from SimObject are instantiated, and provides inherited
# class behavior (just like a class controls how instances of that
# class are instantiated, and provides inherited instance behavior).
class MetaSimObject(type):
# Attributes that can be set only at initialization time
init_keywords = { 'abstract' : bool,
'cxx_class' : str,
'cxx_type' : str,
'cxx_header' : str,
'type' : str,
'cxx_bases' : list }
# Attributes that can be set any time
keywords = { 'check' : FunctionType }
# __new__ is called before __init__, and is where the statements
# in the body of the class definition get loaded into the class's
# __dict__. We intercept this to filter out parameter & port assignments
# and only allow "private" attributes to be passed to the base
# __new__ (starting with underscore).
def __new__(mcls, name, bases, dict):
assert name not in allClasses, "SimObject %s already present" % name
# Copy "private" attributes, functions, and classes to the
# official dict. Everything else goes in _init_dict to be
# filtered in __init__.
cls_dict = {}
value_dict = {}
for key,val in dict.items():
if public_value(key, val):
cls_dict[key] = val
else:
# must be a param/port setting
value_dict[key] = val
if 'abstract' not in value_dict:
value_dict['abstract'] = False
if 'cxx_bases' not in value_dict:
value_dict['cxx_bases'] = []
cls_dict['_value_dict'] = value_dict
cls = super(MetaSimObject, mcls).__new__(mcls, name, bases, cls_dict)
if 'type' in value_dict:
allClasses[name] = cls
return cls
# subclass initialization
def __init__(cls, name, bases, dict):
# calls type.__init__()... I think that's a no-op, but leave
# it here just in case it's not.
super(MetaSimObject, cls).__init__(name, bases, dict)
# initialize required attributes
# class-only attributes
cls._params = multidict() # param descriptions
cls._ports = multidict() # port descriptions
# class or instance attributes
cls._values = multidict() # param values
cls._hr_values = multidict() # human readable param values
cls._children = multidict() # SimObject children
cls._port_refs = multidict() # port ref objects
cls._instantiated = False # really instantiated, cloned, or subclassed
# We don't support multiple inheritance of sim objects. If you want
# to, you must fix multidict to deal with it properly. Non sim-objects
# are ok, though
bTotal = 0
for c in bases:
if isinstance(c, MetaSimObject):
bTotal += 1
if bTotal > 1:
raise TypeError, "SimObjects do not support multiple inheritance"
base = bases[0]
# Set up general inheritance via multidicts. A subclass will
# inherit all its settings from the base class. The only time
# the following is not true is when we define the SimObject
# class itself (in which case the multidicts have no parent).
if isinstance(base, MetaSimObject):
cls._base = base
cls._params.parent = base._params
cls._ports.parent = base._ports
cls._values.parent = base._values
cls._hr_values.parent = base._hr_values
cls._children.parent = base._children
cls._port_refs.parent = base._port_refs
# mark base as having been subclassed
base._instantiated = True
else:
cls._base = None
# default keyword values
if 'type' in cls._value_dict:
if 'cxx_class' not in cls._value_dict:
cls._value_dict['cxx_class'] = cls._value_dict['type']
cls._value_dict['cxx_type'] = '%s *' % cls._value_dict['cxx_class']
if 'cxx_header' not in cls._value_dict:
global noCxxHeader
noCxxHeader = True
warn("No header file specified for SimObject: %s", name)
# Export methods are automatically inherited via C++, so we
# don't want the method declarations to get inherited on the
# python side (and thus end up getting repeated in the wrapped
# versions of derived classes). The code below basicallly
# suppresses inheritance by substituting in the base (null)
# versions of these methods unless a different version is
# explicitly supplied.
for method_name in ('export_methods', 'export_method_cxx_predecls',
'export_method_swig_predecls'):
if method_name not in cls.__dict__:
base_method = getattr(MetaSimObject, method_name)
m = MethodType(base_method, cls, MetaSimObject)
setattr(cls, method_name, m)
# Now process the _value_dict items. They could be defining
# new (or overriding existing) parameters or ports, setting
# class keywords (e.g., 'abstract'), or setting parameter
# values or port bindings. The first 3 can only be set when
# the class is defined, so we handle them here. The others
# can be set later too, so just emulate that by calling
# setattr().
for key,val in cls._value_dict.items():
# param descriptions
if isinstance(val, ParamDesc):
cls._new_param(key, val)
# port objects
elif isinstance(val, Port):
cls._new_port(key, val)
# init-time-only keywords
elif cls.init_keywords.has_key(key):
cls._set_keyword(key, val, cls.init_keywords[key])
# default: use normal path (ends up in __setattr__)
else:
setattr(cls, key, val)
def _set_keyword(cls, keyword, val, kwtype):
if not isinstance(val, kwtype):
raise TypeError, 'keyword %s has bad type %s (expecting %s)' % \
(keyword, type(val), kwtype)
if isinstance(val, FunctionType):
val = classmethod(val)
type.__setattr__(cls, keyword, val)
def _new_param(cls, name, pdesc):
# each param desc should be uniquely assigned to one variable
assert(not hasattr(pdesc, 'name'))
pdesc.name = name
cls._params[name] = pdesc
if hasattr(pdesc, 'default'):
cls._set_param(name, pdesc.default, pdesc)
def _set_param(cls, name, value, param):
assert(param.name == name)
try:
hr_value = value
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, cls.__name__, name, value)
e.args = (msg, )
raise
cls._values[name] = value
# if param value is a SimObject, make it a child too, so that
# it gets cloned properly when the class is instantiated
if isSimObjectOrVector(value) and not value.has_parent():
cls._add_cls_child(name, value)
# update human-readable values of the param if it has a literal
# value and is not an object or proxy.
if not (isSimObjectOrVector(value) or\
isinstance(value, m5.proxy.BaseProxy)):
cls._hr_values[name] = hr_value
def _add_cls_child(cls, name, child):
# It's a little funky to have a class as a parent, but these
# objects should never be instantiated (only cloned, which
# clears the parent pointer), and this makes it clear that the
# object is not an orphan and can provide better error
# messages.
child.set_parent(cls, name)
cls._children[name] = child
def _new_port(cls, name, port):
# each port should be uniquely assigned to one variable
assert(not hasattr(port, 'name'))
port.name = name
cls._ports[name] = port
# same as _get_port_ref, effectively, but for classes
def _cls_get_port_ref(cls, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = cls._port_refs.get(attr)
if not ref:
ref = cls._ports[attr].makeRef(cls)
cls._port_refs[attr] = ref
return ref
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(cls, attr, value):
# normal processing for private attributes
if public_value(attr, value):
type.__setattr__(cls, attr, value)
return
if cls.keywords.has_key(attr):
cls._set_keyword(attr, value, cls.keywords[attr])
return
if cls._ports.has_key(attr):
cls._cls_get_port_ref(attr).connect(value)
return
if isSimObjectOrSequence(value) and cls._instantiated:
raise RuntimeError, \
"cannot set SimObject parameter '%s' after\n" \
" class %s has been instantiated or subclassed" \
% (attr, cls.__name__)
# check for param
param = cls._params.get(attr)
if param:
cls._set_param(attr, value, param)
return
if isSimObjectOrSequence(value):
# If RHS is a SimObject, it's an implicit child assignment.
cls._add_cls_child(attr, coerceSimObjectOrVector(value))
return
# no valid assignment... raise exception
raise AttributeError, \
"Class %s has no parameter \'%s\'" % (cls.__name__, attr)
def __getattr__(cls, attr):
if attr == 'cxx_class_path':
return cls.cxx_class.split('::')
if attr == 'cxx_class_name':
return cls.cxx_class_path[-1]
if attr == 'cxx_namespaces':
return cls.cxx_class_path[:-1]
if cls._values.has_key(attr):
return cls._values[attr]
if cls._children.has_key(attr):
return cls._children[attr]
raise AttributeError, \
"object '%s' has no attribute '%s'" % (cls.__name__, attr)
def __str__(cls):
return cls.__name__
# See ParamValue.cxx_predecls for description.
def cxx_predecls(cls, code):
code('#include "params/$cls.hh"')
# See ParamValue.swig_predecls for description.
def swig_predecls(cls, code):
code('%import "python/m5/internal/param_$cls.i"')
# Hook for exporting additional C++ methods to Python via SWIG.
# Default is none, override using @classmethod in class definition.
def export_methods(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be compiled in the _wrap.cc
# file. Typically generates one or more #include statements. If
# any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_cxx_predecls(cls, code):
pass
# Generate the code needed as a prerequisite for the C++ methods
# exported via export_methods() to be processed by SWIG.
# Typically generates one or more %include or %import statements.
# If any methods are exported, typically at least the C++ header
# declaring the relevant SimObject class must be included.
def export_method_swig_predecls(cls, code):
pass
# Generate the declaration for this object for wrapping with SWIG.
# Generates code that goes into a SWIG .i file. Called from
# src/SConscript.
def swig_decl(cls, code):
class_path = cls.cxx_class.split('::')
classname = class_path[-1]
namespaces = class_path[:-1]
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here). Sort the params based on their key
params = map(lambda (k, v): v, sorted(cls._params.local.items()))
ports = cls._ports.local
code('%module(package="m5.internal") param_$cls')
code()
code('%{')
code('#include "sim/sim_object.hh"')
code('#include "params/$cls.hh"')
for param in params:
param.cxx_predecls(code)
code('#include "${{cls.cxx_header}}"')
cls.export_method_cxx_predecls(code)
code('''\
/**
* This is a workaround for bug in swig. Prior to gcc 4.6.1 the STL
* headers like vector, string, etc. used to automatically pull in
* the cstddef header but starting with gcc 4.6.1 they no longer do.
* This leads to swig generated a file that does not compile so we
* explicitly include cstddef. Additionally, including version 2.0.4,
* swig uses ptrdiff_t without the std:: namespace prefix which is
* required with gcc 4.6.1. We explicitly provide access to it.
*/
#include <cstddef>
using std::ptrdiff_t;
''')
code('%}')
code()
for param in params:
param.swig_predecls(code)
cls.export_method_swig_predecls(code)
code()
if cls._base:
code('%import "python/m5/internal/param_${{cls._base}}.i"')
code()
for ns in namespaces:
code('namespace $ns {')
if namespaces:
code('// avoid name conflicts')
sep_string = '_COLONS_'
flat_name = sep_string.join(class_path)
code('%rename($flat_name) $classname;')
code()
code('// stop swig from creating/wrapping default ctor/dtor')
code('%nodefault $classname;')
code('class $classname')
if cls._base:
bases = [ cls._base.cxx_class ] + cls.cxx_bases
else:
bases = cls.cxx_bases
base_first = True
for base in bases:
if base_first:
code(' : public ${{base}}')
base_first = False
else:
code(' , public ${{base}}')
code('{')
code(' public:')
cls.export_methods(code)
code('};')
for ns in reversed(namespaces):
code('} // namespace $ns')
code()
code('%include "params/$cls.hh"')
# Generate the C++ declaration (.hh file) for this SimObject's
# param struct. Called from src/SConscript.
def cxx_param_decl(cls, code):
# The 'local' attribute restricts us to the params declared in
# the object itself, not including inherited params (which
# will also be inherited from the base class's param struct
# here). Sort the params based on their key
params = map(lambda (k, v): v, sorted(cls._params.local.items()))
ports = cls._ports.local
try:
ptypes = [p.ptype for p in params]
except:
print cls, p, p.ptype_str
print params
raise
class_path = cls._value_dict['cxx_class'].split('::')
code('''\
#ifndef __PARAMS__${cls}__
#define __PARAMS__${cls}__
''')
# A forward class declaration is sufficient since we are just
# declaring a pointer.
for ns in class_path[:-1]:
code('namespace $ns {')
code('class $0;', class_path[-1])
for ns in reversed(class_path[:-1]):
code('} // namespace $ns')
code()
# The base SimObject has a couple of params that get
# automatically set from Python without being declared through
# the normal Param mechanism; we slip them in here (needed
# predecls now, actual declarations below)
if cls == SimObject:
code('''
#ifndef PY_VERSION
struct PyObject;
#endif
#include <string>
''')
for param in params:
param.cxx_predecls(code)
for port in ports.itervalues():
port.cxx_predecls(code)
code()
if cls._base:
code('#include "params/${{cls._base.type}}.hh"')
code()
for ptype in ptypes:
if issubclass(ptype, Enum):
code('#include "enums/${{ptype.__name__}}.hh"')
code()
# now generate the actual param struct
code("struct ${cls}Params")
if cls._base:
code(" : public ${{cls._base.type}}Params")
code("{")
if not hasattr(cls, 'abstract') or not cls.abstract:
if 'type' in cls.__dict__:
code(" ${{cls.cxx_type}} create();")
code.indent()
if cls == SimObject:
code('''
SimObjectParams() {}
virtual ~SimObjectParams() {}
std::string name;
PyObject *pyobj;
''')
for param in params:
param.cxx_decl(code)
for port in ports.itervalues():
port.cxx_decl(code)
code.dedent()
code('};')
code()
code('#endif // __PARAMS__${cls}__')
return code
# Generate the C++ declaration/definition files for this SimObject's
# param struct to allow C++ initialisation
def cxx_config_param_file(cls, code, is_header):
createCxxConfigDirectoryEntryFile(code, cls.__name__, cls, is_header)
return code
# This *temporary* definition is required to support calls from the
# SimObject class definition to the MetaSimObject methods (in
# particular _set_param, which gets called for parameters with default
# values defined on the SimObject class itself). It will get
# overridden by the permanent definition (which requires that
# SimObject be defined) lower in this file.
def isSimObjectOrVector(value):
return False
# This class holds information about each simobject parameter
# that should be displayed on the command line for use in the
# configuration system.
class ParamInfo(object):
def __init__(self, type, desc, type_str, example, default_val, access_str):
self.type = type
self.desc = desc
self.type_str = type_str
self.example_str = example
self.default_val = default_val
# The string representation used to access this param through python.
# The method to access this parameter presented on the command line may
# be different, so this needs to be stored for later use.
self.access_str = access_str
self.created = True
# Make it so we can only set attributes at initialization time
# and effectively make this a const object.
def __setattr__(self, name, value):
if not "created" in self.__dict__:
self.__dict__[name] = value
# The SimObject class is the root of the special hierarchy. Most of
# the code in this class deals with the configuration hierarchy itself
# (parent/child node relationships).
class SimObject(object):
# Specify metaclass. Any class inheriting from SimObject will
# get this metaclass.
__metaclass__ = MetaSimObject
type = 'SimObject'
abstract = True
cxx_header = "sim/sim_object.hh"
cxx_bases = [ "Drainable", "Serializable" ]
eventq_index = Param.UInt32(Parent.eventq_index, "Event Queue Index")
@classmethod
def export_method_swig_predecls(cls, code):
code('''
%include <std_string.i>
%import "python/swig/drain.i"
%import "python/swig/serialize.i"
''')
@classmethod
def export_methods(cls, code):
code('''
void init();
void loadState(CheckpointIn &cp);
void initState();
void memInvalidate();
void memWriteback();
void regStats();
void resetStats();
void regProbePoints();
void regProbeListeners();
void startup();
''')
# Returns a dict of all the option strings that can be
# generated as command line options for this simobject instance
# by tracing all reachable params in the top level instance and
# any children it contains.
def enumerateParams(self, flags_dict = {},
cmd_line_str = "", access_str = ""):
if hasattr(self, "_paramEnumed"):
print "Cycle detected enumerating params"
else:
self._paramEnumed = True
# Scan the children first to pick up all the objects in this SimObj
for keys in self._children:
child = self._children[keys]
next_cmdline_str = cmd_line_str + keys
next_access_str = access_str + keys
if not isSimObjectVector(child):
next_cmdline_str = next_cmdline_str + "."
next_access_str = next_access_str + "."
flags_dict = child.enumerateParams(flags_dict,
next_cmdline_str,
next_access_str)
# Go through the simple params in the simobject in this level
# of the simobject hierarchy and save information about the
# parameter to be used for generating and processing command line
# options to the simulator to set these parameters.
for keys,values in self._params.items():
if values.isCmdLineSettable():
type_str = ''
ex_str = values.example_str()
ptype = None
if isinstance(values, VectorParamDesc):
type_str = 'Vector_%s' % values.ptype_str
ptype = values
else:
type_str = '%s' % values.ptype_str
ptype = values.ptype
if keys in self._hr_values\
and keys in self._values\
and not isinstance(self._values[keys], m5.proxy.BaseProxy):
cmd_str = cmd_line_str + keys
acc_str = access_str + keys
flags_dict[cmd_str] = ParamInfo(ptype,
self._params[keys].desc, type_str, ex_str,
values.pretty_print(self._hr_values[keys]),
acc_str)
elif not keys in self._hr_values\
and not keys in self._values:
# Empty param
cmd_str = cmd_line_str + keys
acc_str = access_str + keys
flags_dict[cmd_str] = ParamInfo(ptype,
self._params[keys].desc,
type_str, ex_str, '', acc_str)
return flags_dict
# Initialize new instance. For objects with SimObject-valued
# children, we need to recursively clone the classes represented
# by those param values as well in a consistent "deep copy"-style
# fashion. That is, we want to make sure that each instance is
# cloned only once, and that if there are multiple references to
# the same original object, we end up with the corresponding
# cloned references all pointing to the same cloned instance.
def __init__(self, **kwargs):
ancestor = kwargs.get('_ancestor')
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# prepare to memoize any recursively instantiated objects
memo_dict = {}
elif ancestor:
# memoize me now to avoid problems with recursive calls
memo_dict[ancestor] = self
if not ancestor:
ancestor = self.__class__
ancestor._instantiated = True
# initialize required attributes
self._parent = None
self._name = None
self._ccObject = None # pointer to C++ object
self._ccParams = None
self._instantiated = False # really "cloned"
# Clone children specified at class level. No need for a
# multidict here since we will be cloning everything.
# Do children before parameter values so that children that
# are also param values get cloned properly.
self._children = {}
for key,val in ancestor._children.iteritems():
self.add_child(key, val(_memo=memo_dict))
# Inherit parameter values from class using multidict so
# individual value settings can be overridden but we still
# inherit late changes to non-overridden class values.
self._values = multidict(ancestor._values)
self._hr_values = multidict(ancestor._hr_values)
# clone SimObject-valued parameters
for key,val in ancestor._values.iteritems():
val = tryAsSimObjectOrVector(val)
if val is not None:
self._values[key] = val(_memo=memo_dict)
# clone port references. no need to use a multidict here
# since we will be creating new references for all ports.
self._port_refs = {}
for key,val in ancestor._port_refs.iteritems():
self._port_refs[key] = val.clone(self, memo_dict)
# apply attribute assignments from keyword args, if any
for key,val in kwargs.iteritems():
setattr(self, key, val)
# "Clone" the current instance by creating another instance of
# this instance's class, but that inherits its parameter values
# and port mappings from the current instance. If we're in a
# "deep copy" recursive clone, check the _memo dict to see if
# we've already cloned this instance.
def __call__(self, **kwargs):
memo_dict = kwargs.get('_memo')
if memo_dict is None:
# no memo_dict: must be top-level clone operation.
# this is only allowed at the root of a hierarchy
if self._parent:
raise RuntimeError, "attempt to clone object %s " \
"not at the root of a tree (parent = %s)" \
% (self, self._parent)
# create a new dict and use that.
memo_dict = {}
kwargs['_memo'] = memo_dict
elif memo_dict.has_key(self):
# clone already done & memoized
return memo_dict[self]
return self.__class__(_ancestor = self, **kwargs)
def _get_port_ref(self, attr):
# Return reference that can be assigned to another port
# via __setattr__. There is only ever one reference
# object per port, but we create them lazily here.
ref = self._port_refs.get(attr)
if ref == None:
ref = self._ports[attr].makeRef(self)
self._port_refs[attr] = ref
return ref
def __getattr__(self, attr):
if self._ports.has_key(attr):
return self._get_port_ref(attr)
if self._values.has_key(attr):
return self._values[attr]
if self._children.has_key(attr):
return self._children[attr]
# If the attribute exists on the C++ object, transparently
# forward the reference there. This is typically used for
# SWIG-wrapped methods such as init(), regStats(),
# resetStats(), startup(), drain(), and
# resume().
if self._ccObject and hasattr(self._ccObject, attr):
return getattr(self._ccObject, attr)
err_string = "object '%s' has no attribute '%s'" \
% (self.__class__.__name__, attr)
if not self._ccObject:
err_string += "\n (C++ object is not yet constructed," \
" so wrapped C++ methods are unavailable.)"
raise AttributeError, err_string
# Set attribute (called on foo.attr = value when foo is an
# instance of class cls).
def __setattr__(self, attr, value):
# normal processing for private attributes
if attr.startswith('_'):
object.__setattr__(self, attr, value)
return
if self._ports.has_key(attr):
# set up port connection
self._get_port_ref(attr).connect(value)
return
param = self._params.get(attr)
if param:
try:
hr_value = value
value = param.convert(value)
except Exception, e:
msg = "%s\nError setting param %s.%s to %s\n" % \
(e, self.__class__.__name__, attr, value)
e.args = (msg, )
raise
self._values[attr] = value
# implicitly parent unparented objects assigned as params
if isSimObjectOrVector(value) and not value.has_parent():
self.add_child(attr, value)
# set the human-readable value dict if this is a param
# with a literal value and is not being set as an object
# or proxy.
if not (isSimObjectOrVector(value) or\
isinstance(value, m5.proxy.BaseProxy)):
self._hr_values[attr] = hr_value
return
# if RHS is a SimObject, it's an implicit child assignment
if isSimObjectOrSequence(value):
self.add_child(attr, value)
return
# no valid assignment... raise exception
raise AttributeError, "Class %s has no parameter %s" \
% (self.__class__.__name__, attr)
# this hack allows tacking a '[0]' onto parameters that may or may
# not be vectors, and always getting the first element (e.g. cpus)
def __getitem__(self, key):
if key == 0:
return self
raise IndexError, "Non-zero index '%s' to SimObject" % key
# this hack allows us to iterate over a SimObject that may
# not be a vector, so we can call a loop over it and get just one
# element.
def __len__(self):
return 1
# Also implemented by SimObjectVector
def clear_parent(self, old_parent):
assert self._parent is old_parent
self._parent = None
# Also implemented by SimObjectVector
def set_parent(self, parent, name):
self._parent = parent
self._name = name
# Return parent object of this SimObject, not implemented by SimObjectVector
# because the elements in a SimObjectVector may not share the same parent
def get_parent(self):
return self._parent
# Also implemented by SimObjectVector
def get_name(self):
return self._name
# Also implemented by SimObjectVector
def has_parent(self):
return self._parent is not None
# clear out child with given name. This code is not likely to be exercised.
# See comment in add_child.
def clear_child(self, name):
child = self._children[name]
child.clear_parent(self)
del self._children[name]
# Add a new child to this object.
def add_child(self, name, child):
child = coerceSimObjectOrVector(child)
if child.has_parent():
warn("add_child('%s'): child '%s' already has parent", name,
child.get_name())
if self._children.has_key(name):
# This code path had an undiscovered bug that would make it fail
# at runtime. It had been here for a long time and was only
# exposed by a buggy script. Changes here will probably not be
# exercised without specialized testing.
self.clear_child(name)
child.set_parent(self, name)
self._children[name] = child
# Take SimObject-valued parameters that haven't been explicitly
# assigned as children and make them children of the object that
# they were assigned to as a parameter value. This guarantees
# that when we instantiate all the parameter objects we're still
# inside the configuration hierarchy.
def adoptOrphanParams(self):
for key,val in self._values.iteritems():
if not isSimObjectVector(val) and isSimObjectSequence(val):
# need to convert raw SimObject sequences to
# SimObjectVector class so we can call has_parent()
val = SimObjectVector(val)
self._values[key] = val
if isSimObjectOrVector(val) and not val.has_parent():
warn("%s adopting orphan SimObject param '%s'", self, key)
self.add_child(key, val)
def path(self):
if not self._parent:
return '<orphan %s>' % self.__class__
ppath = self._parent.path()
if ppath == 'root':
return self._name
return ppath + "." + self._name
def __str__(self):
return self.path()
def config_value(self):
return self.path()
def ini_str(self):
return self.path()
def find_any(self, ptype):
if isinstance(self, ptype):
return self, True
found_obj = None
for child in self._children.itervalues():
visited = False
if hasattr(child, '_visited'):
visited = getattr(child, '_visited')
if isinstance(child, ptype) and not visited:
if found_obj != None and child != found_obj:
raise AttributeError, \
'parent.any matched more than one: %s %s' % \
(found_obj.path, child.path)
found_obj = child
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if found_obj != None and found_obj != match_obj:
raise AttributeError, \
'parent.any matched more than one: %s and %s' % (found_obj.path, match_obj.path)
found_obj = match_obj
return found_obj, found_obj != None
def find_all(self, ptype):
all = {}
# search children
for child in self._children.itervalues():
# a child could be a list, so ensure we visit each item
if isinstance(child, list):
children = child
else:
children = [child]
for child in children:
if isinstance(child, ptype) and not isproxy(child) and \
not isNullPointer(child):
all[child] = True
if isSimObject(child):
# also add results from the child itself
child_all, done = child.find_all(ptype)
all.update(dict(zip(child_all, [done] * len(child_all))))
# search param space
for pname,pdesc in self._params.iteritems():
if issubclass(pdesc.ptype, ptype):
match_obj = self._values[pname]
if not isproxy(match_obj) and not isNullPointer(match_obj):
all[match_obj] = True
# Also make sure to sort the keys based on the objects' path to
# ensure that the order is the same on all hosts
return sorted(all.keys(), key = lambda o: o.path()), True
def unproxy(self, base):
return self
def unproxyParams(self):
for param in self._params.iterkeys():
value = self._values.get(param)
if value != None and isproxy(value):
try:
value = value.unproxy(self)
except:
print "Error in unproxying param '%s' of %s" % \
(param, self.path())
raise
setattr(self, param, value)
# Unproxy ports in sorted order so that 'append' operations on
# vector ports are done in a deterministic fashion.
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name)
if port != None:
port.unproxy(self)
def print_ini(self, ini_file):
print >>ini_file, '[' + self.path() + ']' # .ini section header
instanceDict[self.path()] = self
if hasattr(self, 'type'):
print >>ini_file, 'type=%s' % self.type
if len(self._children.keys()):
print >>ini_file, 'children=%s' % \
' '.join(self._children[n].get_name() \
for n in sorted(self._children.keys()))
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
print >>ini_file, '%s=%s' % (param,
self._values[param].ini_str())
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
print >>ini_file, '%s=%s' % (port_name, port.ini_str())
print >>ini_file # blank line between objects
# generate a tree of dictionaries expressing all the parameters in the
# instantiated system for use by scripts that want to do power, thermal
# visualization, and other similar tasks
def get_config_as_dict(self):
d = attrdict()
if hasattr(self, 'type'):
d.type = self.type
if hasattr(self, 'cxx_class'):
d.cxx_class = self.cxx_class
# Add the name and path of this object to be able to link to
# the stats
d.name = self.get_name()
d.path = self.path()
for param in sorted(self._params.keys()):
value = self._values.get(param)
if value != None:
d[param] = value.config_value()
for n in sorted(self._children.keys()):
child = self._children[n]
# Use the name of the attribute (and not get_name()) as
# the key in the JSON dictionary to capture the hierarchy
# in the Python code that assembled this system
d[n] = child.get_config_as_dict()
for port_name in sorted(self._ports.keys()):
port = self._port_refs.get(port_name, None)
if port != None:
# Represent each port with a dictionary containing the
# prominent attributes
d[port_name] = port.get_config_as_dict()
return d
def getCCParams(self):
if self._ccParams:
return self._ccParams
cc_params_struct = getattr(m5.internal.params, '%sParams' % self.type)
cc_params = cc_params_struct()
cc_params.pyobj = self
cc_params.name = str(self)
param_names = self._params.keys()
param_names.sort()
for param in param_names:
value = self._values.get(param)
if value is None:
fatal("%s.%s without default or user set value",
self.path(), param)
value = value.getValue()
if isinstance(self._params[param], VectorParamDesc):
assert isinstance(value, list)
vec = getattr(cc_params, param)
assert not len(vec)
for v in value:
vec.append(v)
else:
setattr(cc_params, param, value)
port_names = self._ports.keys()
port_names.sort()
for port_name in port_names:
port = self._port_refs.get(port_name, None)
if port != None:
port_count = len(port)
else:
port_count = 0
setattr(cc_params, 'port_' + port_name + '_connection_count',
port_count)
self._ccParams = cc_params
return self._ccParams
# Get C++ object corresponding to this object, calling C++ if
# necessary to construct it. Does *not* recursively create
# children.
def getCCObject(self):
if not self._ccObject:
# Make sure this object is in the configuration hierarchy
if not self._parent and not isRoot(self):
raise RuntimeError, "Attempt to instantiate orphan node"
# Cycles in the configuration hierarchy are not supported. This
# will catch the resulting recursion and stop.
self._ccObject = -1
if not self.abstract:
params = self.getCCParams()
self._ccObject = params.create()
elif self._ccObject == -1:
raise RuntimeError, "%s: Cycle found in configuration hierarchy." \
% self.path()
return self._ccObject
def descendants(self):
yield self
# The order of the dict is implementation dependent, so sort
# it based on the key (name) to ensure the order is the same
# on all hosts
for (name, child) in sorted(self._children.iteritems()):
for obj in child.descendants():
yield obj
# Call C++ to create C++ object corresponding to this object
def createCCObject(self):
self.getCCParams()
self.getCCObject() # force creation
def getValue(self):
return self.getCCObject()
# Create C++ port connections corresponding to the connections in
# _port_refs
def connectPorts(self):
# Sort the ports based on their attribute name to ensure the
# order is the same on all hosts
for (attr, portRef) in sorted(self._port_refs.iteritems()):
portRef.ccConnect()
# Function to provide to C++ so it can look up instances based on paths
def resolveSimObject(name):
obj = instanceDict[name]
return obj.getCCObject()
def isSimObject(value):
return isinstance(value, SimObject)
def isSimObjectClass(value):
return issubclass(value, SimObject)
def isSimObjectVector(value):
return isinstance(value, SimObjectVector)
def isSimObjectSequence(value):
if not isinstance(value, (list, tuple)) or len(value) == 0:
return False
for val in value:
if not isNullPointer(val) and not isSimObject(val):
return False
return True
def isSimObjectOrSequence(value):
return isSimObject(value) or isSimObjectSequence(value)
def isRoot(obj):
from m5.objects import Root
return obj and obj is Root.getInstance()
def isSimObjectOrVector(value):
return isSimObject(value) or isSimObjectVector(value)
def tryAsSimObjectOrVector(value):
if isSimObjectOrVector(value):
return value
if isSimObjectSequence(value):
return SimObjectVector(value)
return None
def coerceSimObjectOrVector(value):
value = tryAsSimObjectOrVector(value)
if value is None:
raise TypeError, "SimObject or SimObjectVector expected"
return value
baseClasses = allClasses.copy()
baseInstances = instanceDict.copy()
def clear():
global allClasses, instanceDict, noCxxHeader
allClasses = baseClasses.copy()
instanceDict = baseInstances.copy()
noCxxHeader = False
# __all__ defines the list of symbols that get exported when
# 'from config import *' is invoked. Try to keep this reasonably
# short to avoid polluting other namespaces.
__all__ = [ 'SimObject' ]
|
alianmohammad/pd-gem5-latest
|
src/python/m5/SimObject.py
|
Python
|
bsd-3-clause
| 57,440
|
[
"VisIt"
] |
f12f4f00bb5d6cea325204339d4ad20f73a8e7de79d9c8043619810b8d27b633
|
'''
Functions in this module create rays emenating from a variety
of sources in the form [opd, x, y, z, l, m, n, ux, uy, uz].
Ray Parameters:
* opd : vector tracking the optical path traveled by each ray;
only surfaces with an OPD flag in their definition will
update this vector.
* x, y, z : ray positions in the Cartesian coordinate system [mm].
* l, m, n : ray direction cosines, indicating the direction in which
each ray is moving; magnitude is unity.
* ux, uy, uz : the surface normal of the last surface to which the
ray was traced; magnitude is unity.
'''
import numpy as np
def pointsource(ang, num):
'''
Point source with a specified angular divergence.
Note: Rays points in the +z direction.
Parameters
----------
ang : float
Angular divergence of rays. (half angle)
num : int
Number of rays to create.
Returns
-----
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
# Radial direction cosine magnitude
rho = np.sqrt(np.random.rand(num))*np.sin(ang)
theta = np.random.rand(num)*2*np.pi
l = rho*np.cos(theta)
m = rho*np.sin(theta)
n = np.sqrt(1.-l**2-m**2)
x = np.repeat(0., num)
y = np.repeat(0., num)
z = np.repeat(0., num)
ux = np.repeat(0., num)
uy = np.repeat(0., num)
uz = np.repeat(0., num)
opd = np.repeat(0., num)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def circularbeam(rad, num):
'''
Uniform, circular beam with specified radius.
Note: Rays point in +z direction.
Parameters
----------
rad : int / float
Radius of circular beam.
num : int
Number of rays to create.
Returns
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
rho = np.sqrt(np.random.rand(num))*rad
theta = np.random.rand(num)*2*np.pi
x = rho*np.cos(theta)
y = rho*np.sin(theta)
z = np.repeat(0., num)
l = np.repeat(0., num)
m = np.repeat(0., num)
n = np.repeat(1., num)
ux = np.copy(l)
uy = np.copy(l)
uz = np.copy(l)
opd = np.copy(l)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def annulus(rin, rout, num, zhat=-1.):
'''
Annulus of rays with specified inner and outer radii.
Note: Default has rays pointing in -z direction.
Parameters
----------
rin : int / float
Inner radius of annulus.
rout : int / float
Outer radius of annulus.
num : int
Number of rays to create.
zhat : float
Direction in which rays point. Default is zhat = -1.
Returns`
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
rho = np.sqrt(rin**2+np.random.rand(num)*(rout**2-rin**2))
theta = np.random.rand(num)*2*np.pi
x = rho*np.cos(theta)
y = rho*np.sin(theta)
z = np.repeat(0., num)
l = np.repeat(0., num)
m = np.repeat(0., num)
n = np.repeat(zhat, num)
ux = np.copy(l)
uy = np.copy(l)
uz = np.copy(l)
opd = np.copy(l)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def subannulus(rin, rout, dphi, num, zhat=1.):
'''
Subapertured annulus source with specified inner and outer
radii, as well as angular extent.
Note: Annulus is centered about theta = 0, which points
in the +x direction.
Parameters
----------
rin : int / float
Inner radius of annulus.
rout : int / float
Outer radius of annulus.
dphi : int / float
Full angular width of subannulus.
num : int
Number of rays to create.
zhat : float
Direction in which rays point. Default is zhat = +1.
Returns
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
rho = np.sqrt(rin**2+np.random.rand(num)*(rout**2-rin**2))
theta = np.random.rand(num)*dphi - dphi/2.
x = rho*np.cos(theta)
y = rho*np.sin(theta)
z = np.repeat(0., num)
l = np.repeat(0., num)
m = np.repeat(0., num)
n = np.repeat(zhat, num)
ux = np.copy(l)
uy = np.copy(l)
uz = np.copy(l)
opd = np.copy(l)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def xslit(xin, xout, num, zhat=-1.):
'''
Slit of rays with specified width in the x-dimension.
Note: Rays are linearly spaced in the x-dimension.
Parameters
----------
xin : int / float
Inner x-position of slit.
xout : int / float
Outer x-position of slit.
num : int
Number of rays to create.
zhat : float
Direction in which rays point. Default is zhat = -1.
Returns
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
x = np.linspace(xin, xout, num)
y = np.repeat(0., num)
z = np.repeat(0., num)
l = np.repeat(0., num)
m = np.repeat(0., num)
n = np.repeat(zhat, num)
ux = np.copy(l)
uy = np.copy(l)
uz = np.copy(l)
opd = np.copy(l)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def rectArray(xsize, ysize, num):
'''
Rectangular beam with specified width (x) and height (y) on the
x-y plane.
Created with numpy.meshgrid and np.linspace.
Note: Rays point in the +z direction.
Parameters
----------
xsize : int / float
1/2 of rectangle size in the x-dimension.
ysize : int / float
1/2 of rectangle size in the y-dimension.
num : int
Number of rays to create.
Returns
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
x, y = np.meshgrid(np.linspace(-xsize, xsize, num),
np.linspace(-ysize, ysize, num))
opd = np.repeat(0., num**2)
x = x.flatten()
y = y.flatten()
z = np.copy(opd)
l = np.repeat(0., num**2)
m = np.repeat(0., num**2)
n = np.repeat(1., num**2)
ux = np.copy(l)
uy = np.copy(l)
uz = np.copy(l)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def convergingbeam(zset, rin, rout, tmin, tmax, num, lscat):
'''
Converging sub-apertured annulus beam with specified inner
and outer radii, as well as angular extent.
Note: Place rays at nominal focus. Input z position, inner
and outer radii, and minimum and maximum theta.
Parameters
----------
zset : int / float
Z-dimension which defines the convergence of beam.
rin : int / float
Inner radius of sub-apertured annulus beam.
rout : int / float
Outer radius of sub-apertured annulus beam.
tmin : int / float
Minimum angular extent of sub-apertured annulus beam.
tmax : int/ float
Maximum angular extent of sub-apertured annulus beam.
num : int
Number of rays to create.
lscat : int / float
Scatter in the angular convergence [arcsec].
Returns
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
rho = np.sqrt(rin**2+np.random.rand(num)*(rout**2-rin**2))
theta = tmin + np.random.rand(num)*(tmax-tmin)
x = rho*np.cos(theta)
y = rho*np.sin(theta)
z = np.repeat(zset, num)
lscat = lscat * np.tan((np.random.rand(num) - .5)*np.pi)
lscat = lscat/60**2 * np.pi/180.
n = -np.cos(np.arctan(rho/zset)+lscat)
l = -np.sqrt(1-n**2)*np.cos(theta)
m = -np.sqrt(1-n**2)*np.sin(theta)
ux = np.repeat(0., num)
uy = np.repeat(0., num)
uz = np.repeat(0., num)
opd = np.repeat(0., num)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def convergingbeam2(zset, xmin, xmax, ymin, ymax, num, lscat):
'''
Converging rectangular beam with specified extent in both
the x- and y-dimensions.
Note: Place rays at nominal focus. Input z position and
rectangular bounds.
Parameters
----------
zset : int / float
Z-dimension which defines the convergence of beam.
xmin : int / float
Minimum extent in the x-dimension.
xmax : int / float
Maximum extent in the x-dimension.
ymin : int / float
Minimum extent in the y-dimension.
ymax : int / float
Maximum extent in the y-dimension.
num : int
Number of rays to create.
lscat : int / float
Scatter in the angular convergence [arcsec].
Returns
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
x = xmin + np.random.rand(num)*(xmax-xmin)
y = ymin + np.random.rand(num)*(ymax-ymin)
rho = np.sqrt(x**2+y**2)
theta = np.arctan2(y, x)
z = np.repeat(zset, num)
lscat = lscat * np.tan((np.random.rand(num) - .5)*np.pi)
lscat = lscat/60**2 * np.pi/180.
n = -np.cos(np.arctan(rho/zset)+lscat)
l = -np.sqrt(1-n**2)*np.cos(theta)
m = -np.sqrt(1-n**2)*np.sin(theta)
ux = np.repeat(0., num)
uy = np.repeat(0., num)
uz = np.repeat(0., num)
opd = np.repeat(0., num)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def rectbeam(xhalfwidth, yhalfwidth, num):
'''
Rectangular beam with specified width (x) and height (y)
on the x-y plane, pointing in +z direction.
Parameters
----------
xhalfwidth : int / float
1/2 of rectangle extent in the x-dimension.
yhalfwidth : int / float
1/2 of rectangle extent in the y-dimension.
num : int
Number of rays to create.
Returns
-------
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
x = (np.random.rand(num)-.5)*2*xhalfwidth
y = (np.random.rand(num)-.5)*2*yhalfwidth
z = np.repeat(0., num)
n = np.repeat(1., num)
l = np.copy(z)
m = np.copy(z)
ux = np.repeat(0., num)
uy = np.repeat(0., num)
uz = np.repeat(0., num)
opd = np.repeat(0., num)
return [opd, x, y, z, l, m, n, ux, uy, uz]
def gaussianBeam(ang, num):
'''
Gaussian source with a specified angular divergence.
Note: Rays points in the +z direction.
Parameters
----------
ang : float
Angular divergence of rays. (1-sigma)
num : int
Number of rays to create.
Returns
-----
rays : list
List of ray parameters [opd, x, y, z, l, m, n, ux, uy, uz].
'''
# Radial direction cosine magnitude
#rho = np.random.randn(num)*np.sin(ang)
#theta = np.random.rand(num)*2*np.pi
#l = rho*np.cos(theta)
#m = rho*np.sin(theta)
l = np.random.randn(num)*np.sin(ang)/np.sqrt(2)
m = np.random.randn(num)*np.sin(ang)/np.sqrt(2)
n = np.sqrt(1.-l**2-m**2)
x = np.repeat(0., num)
y = np.repeat(0., num)
z = np.repeat(0., num)
ux = np.repeat(0., num)
uy = np.repeat(0., num)
uz = np.repeat(0., num)
opd = np.repeat(0., num)
return [opd, x, y, z, l, m, n, ux, uy, uz]
|
rallured/PyXFocus
|
sources.py
|
Python
|
mit
| 10,919
|
[
"Gaussian"
] |
37fdc87e87691ea328f75475d250714ef1bceb8b0800e7973a8e2f8ec6139419
|
'''This module holds the code that allows to analyze the alignment search
result analysis.
It can deal with blasts, iprscan or ssaha2 results.
This results can be parsed, filtered and analyzed.
This module revolves around a memory structure that represents a blast or
an iprscan result. The schema of this structure is:
result = {'query':the_query_sequence,
'matches': [a_list_of_matches(hits in the blast terminology)]
}
The sequence can have: name, description, annotations={'database':some db} and
len(sequence).
Every match is a dict.
match = {'subject':the subject sequence
'start' :match start position in bp in query
'end' :match end position in bp in query
'subject_start' : match start position in bp in subject
'subject_end' :match end position in bp in subject
'scores' :a dict with the scores
'match_parts': [a list of match_parts(hsps in the blast lingo)]
'evidences' : [a list of tuples for the iprscan]
}
All the scores are holded in a dict
scores = {'key1': value1, 'key2':value2}
For instance the keys could be expect, similarity and identity for the blast
match_part is a dict:
match_part = {'query_start' : the query start in the alignment in bp
'query_end' : the query end in the alignment in bp
'query_strand' : 1 or -1
'subject_start' : the subject start in the alignment in bp
'subject_end' : the subject end in the alignment in bp
'subject_strand' : 1 or -1
'scores' :a dict with the scores
}
Iprscan has several evidences generated by different programs and databases
for every match. Every evidence is similar to a match.
'''
# Copyright 2009 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of franklin.
# franklin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# franklin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with franklin. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import itertools
import copy
import os
from math import log10
from crumbs.utils.optional_modules import NCBIXML
from crumbs.utils.tags import SUBJECT, QUERY, ELONGATED
from crumbs.utils.segments_utils import merge_overlaping_segments
def _text_blasts_in_file(fhand):
'It returns from Query= to Query'
cache = ''
first_time = True
for line in fhand:
if line.startswith('Query='):
if first_time:
cache = ''
first_time = False
else:
yield cache
cache = ''
cache += line
else:
if not first_time:
yield cache
def _split_description(string):
'It splits the description'
items = string.split(' ', 1)
name = items[0]
desc = items[1] if len(items) == 2 else None
return name, desc
def _text_blast_parser(fhand):
'It parses the blast results'
result = None
previous_query = None
for blast in _text_blasts_in_file(fhand):
in_query_def = False
in_subject_def = False
for line in blast.splitlines():
line = line.strip()
if not line:
continue
if line.startswith('Query='):
query_name = line.split('=')[-1].strip()
query_name, query_desc = _split_description(query_name)
in_query_def = True
subject_name = None
if line.startswith('Subject=') or line.startswith('>'):
if line.startswith('>'):
subject_name = line[1:].strip()
else:
subject_name = line.split('=')[-1].strip()
subject_name, subject_desc = _split_description(subject_name)
in_subject_def = True
query_start, query_end = None, None
subject_start, subject_end = None, None
query_strand, subject_strand = None, None
score, expect, identity = None, None, None
if line.startswith('Length='):
length = int(line.split('=')[-1].strip())
if in_query_def and query_name != previous_query:
if result is not None and result['matches']:
result = _fix_matches(result, score_keys=['expect',
'score'])
if result:
yield result
query_length = length
in_query_def = False
if query_desc:
query = {'name': query_name, 'description': query_desc,
'length': query_length}
else:
query = {'name': query_name, 'length': query_length}
matches = []
result = {'query': query,
'matches': matches}
previous_query = query_name
elif in_subject_def:
subject_length = length
if subject_desc:
subject = {'name': subject_name,
'description': subject_desc,
'length': subject_length}
else:
subject = {'name': subject_name,
'length': subject_length}
in_subject_def = False
matches.append({'subject': subject, 'match_parts': []})
if subject_name is None:
continue
if line.startswith('Score') or line.startswith('Effective'):
if score is not None:
match_part = {'subject_start': subject_start,
'subject_end': subject_end,
'subject_strand': subject_strand,
'query_start': query_start,
'query_end': query_end,
'query_strand': query_strand,
'scores': {'expect': expect,
'identity': identity,
'score': score}}
matches[-1]['match_parts'].append(match_part)
score, expect, identity = None, None, None
query_strand, subject_strand = None, None
query_start, query_end = None, None
subject_start, subject_end = None, None
if line.startswith('Score'):
items = line.split()
score = float(items[2])
expect = float(items[-1])
elif line.startswith('Identities'):
items = line.split()
identity = float(items[3].strip('(')[:-3])
elif line.startswith('Strand'):
strands = line.split('=')[-1]
strands = strands.split('/')
query_strand = 1 if strands[0] == 'Plus' else -1
subject_strand = 1 if strands[1] == 'Plus' else -1
if query_strand and line.startswith('Query'):
items = line.split()
if query_start is None:
query_start = int(items[1]) - 1
query_end = int(items[-1]) - 1
if query_strand and line.startswith('Sbjct'):
items = line.split()
if subject_start is None:
subject_start = int(items[1]) - 1
subject_end = int(items[-1]) - 1
else:
if result is not None and result['matches']:
result = _fix_matches(result, score_keys=['expect', 'score'])
if result:
yield result
class TextBlastParser(object):
'It parses the tabular output of a blast result'
def __init__(self, fhand):
'The init requires a file to be parsed'
self._gen = _text_blast_parser(fhand)
def __iter__(self):
'Part of the iterator protocol'
return self
def next(self):
'It returns the next blast result'
return self._gen.next()
DEFAULT_TABBLAST_FORMAT = ('query', 'subject', 'identity', 'alignment_length',
'mismatches', 'gap_open', 'query_start',
'query_end', 'subject_start', 'subject_end',
'expect', 'score')
def _lines_for_every_tab_blast(fhand, line_format):
'It returns the lines for every query in the tabular blast'
ongoing_query = None
match_parts = []
for line in fhand:
items = line.strip().split()
if len(line_format) != len(items):
msg = 'Malformed line. The line has an unexpected number of items.'
msg += '\nExpected format was: ' + ' '.join(line_format) + '\n'
msg += 'Line was: ' + line + '\n'
raise RuntimeError(msg)
items = dict(zip(line_format, items))
query = items['query']
subject = items['subject']
if 'query_length' in items:
query_len = int(items['query_length'])
else:
query_len = None
if 'subject_length' in items:
subject_len = int(items['subject_length'])
else:
subject_len = None
locations = ('query_start', 'query_end', 'subject_start',
'subject_end')
match_part = {}
for field in locations:
if field in items:
match_part[field] = int(items[field]) - 1
score_fields = ('expect', 'score', 'identity')
scores = {}
for field in score_fields:
if field in items:
scores[field] = float(items[field])
if scores:
match_part['scores'] = scores
if ongoing_query is None:
ongoing_query = query
match_parts.append({'subject': subject, 'match_part': match_part,
'subject_length': subject_len})
elif query == ongoing_query:
match_parts.append({'subject': subject, 'match_part': match_part,
'subject_length': subject_len})
else:
yield ongoing_query, query_len, match_parts
match_parts = [{'subject':subject, 'match_part':match_part,
'subject_length': subject_len}]
ongoing_query = query
if ongoing_query:
yield ongoing_query, query_len, match_parts
def _group_match_parts_by_subject(match_parts):
'It yields lists of match parts that share the subject'
parts = []
ongoing_subject = None
for match_part in match_parts:
subject = match_part['subject']
subject_length = match_part['subject_length']
if ongoing_subject is None:
parts.append(match_part['match_part'])
ongoing_subject = subject
ongoing_subject_length = subject_length
elif ongoing_subject == subject:
parts.append(match_part['match_part'])
else:
yield ongoing_subject, ongoing_subject_length, parts
parts = [match_part['match_part']]
ongoing_subject = subject
ongoing_subject_length = subject_length
else:
yield ongoing_subject, ongoing_subject_length, parts
def _tabular_blast_parser(fhand, line_format):
'Parses the tabular output of a blast result and yields Alignment result'
if hasattr(fhand, 'seek'):
fhand.seek(0)
for qname, qlen, match_parts in _lines_for_every_tab_blast(fhand,
line_format):
matches = []
# pylint: disable=C0301
for sname, slen, match_parts in _group_match_parts_by_subject(match_parts):
# match start and end
match_start, match_end = None, None
match_subject_start, match_subject_end = None, None
for match_part in match_parts:
if (match_start is None or
match_part['query_start'] < match_start):
match_start = match_part['query_start']
if match_end is None or match_part['query_end'] > match_end:
match_end = match_part['query_end']
if (match_subject_start is None or
match_part['subject_start'] < match_subject_start):
match_subject_start = match_part['subject_start']
if (match_subject_end is None or
match_part['subject_end'] > match_subject_end):
match_subject_end = match_part['subject_end']
subject = {'name': sname}
if slen:
subject['length'] = slen
match = {'subject': subject,
'start': match_start,
'end': match_end,
'subject_start': match_subject_start,
'subject_end': match_subject_end,
'scores': {'expect': match_parts[0]['scores']['expect']},
'match_parts': match_parts}
matches.append(match)
if matches:
query = {'name': qname}
if qlen:
query['length'] = qlen
yield {'query': query, 'matches': matches}
class TabularBlastParser(object):
'It parses the tabular output of a blast result'
def __init__(self, fhand, line_format=DEFAULT_TABBLAST_FORMAT):
'The init requires a file to be parsed'
self._gen = _tabular_blast_parser(fhand, line_format)
def __iter__(self):
'Part of the iterator protocol'
return self
def next(self):
'It returns the next blast result'
return self._gen.next()
class BlastParser(object):
'''An iterator blast parser that yields the blast results in a
multiblast file'''
def __init__(self, fhand, subj_def_as_accesion=None):
'The init requires a file to be parsed'
fhand.seek(0, 0)
sample = fhand.read(10)
if sample and 'xml' not in sample:
raise ValueError('Not a xml file')
fhand.seek(0, 0)
self._blast_file = fhand
metadata = self._get_blast_metadata()
blast_version = metadata['version']
plus = metadata['plus']
self.db_name = metadata['db_name']
self._blast_file.seek(0, 0)
if ((blast_version and plus) or
(blast_version and blast_version > '2.2.21')):
self.use_query_def_as_accession = True
self.use_subject_def_as_accession = True
else:
self.use_query_def_as_accession = True
self.use_subject_def_as_accession = False
if subj_def_as_accesion is not None:
self.use_subject_def_as_accession = subj_def_as_accesion
# we use the biopython parser
# if there are no results we put None in our blast_parse results
self._blast_parse = None
if fhand.read(1) == '<':
fhand.seek(0)
self._blast_parse = NCBIXML.parse(fhand)
def __iter__(self):
'Part of the iterator protocol'
return self
def _create_result_structure(self, bio_result):
'Given a BioPython blast result it returns our result structure'
# the query name and definition
definition = bio_result.query
if self.use_query_def_as_accession:
items = definition.split(' ', 1)
name = items[0]
if len(items) > 1:
definition = items[1]
else:
definition = None
else:
name = bio_result.query_id
definition = definition
if definition is None:
definition = "<unknown description>"
# length of query sequence
length = bio_result.query_letters
# now we can create the query sequence
query = {'name': name, 'description': definition, 'length': length}
# now we go for the hits (matches)
matches = []
for alignment in bio_result.alignments:
# the subject sequence
if self.use_subject_def_as_accession:
items = alignment.hit_def.split(' ', 1)
name = items[0]
if len(items) > 1:
definition = items[1]
else:
definition = None
else:
name = alignment.accession
definition = alignment.hit_def
if definition is None:
definition = "<unknown description>"
length = alignment.length
id_ = alignment.hit_id
subject = {'name': name, 'description': definition,
'length': length, 'id': id_}
# the hsps (match parts)
match_parts = []
match_start, match_end = None, None
match_subject_start, match_subject_end = None, None
for hsp in alignment.hsps:
expect = hsp.expect
subject_start = hsp.sbjct_start
subject_end = hsp.sbjct_end
query_start = hsp.query_start
query_end = hsp.query_end
hsp_length = len(hsp.query)
# We have to check the subject strand
if subject_start < subject_end:
subject_strand = 1
else:
subject_strand = -1
subject_start, subject_end = (subject_end,
subject_start)
# Also the query strand
if query_start < query_end:
query_strand = 1
else:
query_strand = -1
query_start, query_end = query_end, query_start
try:
similarity = hsp.positives * 100.0 / float(hsp_length)
except TypeError:
similarity = None
try:
identity = hsp.identities * 100.0 / float(hsp_length)
except TypeError:
identity = None
match_parts.append({'subject_start': subject_start,
'subject_end': subject_end,
'subject_strand': subject_strand,
'query_start': query_start,
'query_end': query_end,
'query_strand': query_strand,
'scores': {'similarity': similarity,
'expect': expect,
'identity': identity}
})
# It takes the first loc and the last loc of the hsp to
# determine hit start and end
if match_start is None or query_start < match_start:
match_start = query_start
if match_end is None or query_end > match_end:
match_end = query_end
if (match_subject_start is None or
subject_start < match_subject_start):
match_subject_start = subject_start
if (match_subject_end is None or
subject_end > match_subject_end):
match_subject_end = subject_end
matches.append({
'subject': subject,
'start': match_start,
'end': match_end,
'subject_start': match_subject_start,
'subject_end': match_subject_end,
'scores': {'expect': match_parts[0]['scores']['expect']},
'match_parts': match_parts})
result = {'query': query, 'matches': matches}
return result
def _get_blast_metadata(self):
'It gets blast parser version'
tell_ = self._blast_file.tell()
version = None
db_name = None
plus = False
for line in self._blast_file:
line = line.strip()
if line.startswith('<BlastOutput_version>'):
version = line.split('>')[1].split('<')[0].split()[1]
if line.startswith('<BlastOutput_db>'):
db_name = line.split('>')[1].split('<')[0]
db_name = os.path.basename(db_name)
if version is not None and db_name is not None:
break
if version and '+' in version:
plus = True
version = version[:-1]
self._blast_file.seek(tell_)
return {'version': version, 'plus': plus, 'db_name': db_name}
def next(self):
'It returns the next blast result'
if self._blast_parse is None:
raise StopIteration
else:
bio_result = self._blast_parse.next()
# now we have to change this biopython blast_result in our
# structure
our_result = self._create_result_structure(bio_result)
return our_result
class ExonerateParser(object):
'''Exonerate parser, it is a iterator that yields the result for each
query separated'''
def __init__(self, fhand):
'The init requires a file to be parser'
self._fhand = fhand
self._exonerate_results = self._results_query_from_exonerate()
def __iter__(self):
'Part of the iterator protocol'
return self
def _results_query_from_exonerate(self):
'''It takes the exonerate cigar output file and yields the result for
each query. The result is a list of match_parts '''
self._fhand.seek(0, 0)
cigar_dict = {}
for line in self._fhand:
if not line.startswith('cigar_like:'):
continue
items = line.split(':', 1)[1].strip().split()
query_id = items[0]
if query_id not in cigar_dict:
cigar_dict[query_id] = []
cigar_dict[query_id].append(items)
for query_id, values in cigar_dict.items():
yield values
@staticmethod
def _create_structure_result(query_result):
'''It creates the result dictionary structure giving a list of
match_parts of a query_id '''
# TODO add to the match the match subject start and end
struct_dict = {}
query_name = query_result[0][0]
query_length = int(query_result[0][9])
query = {'name': query_name, 'length': query_length}
struct_dict['query'] = query
struct_dict['matches'] = []
for match_part_ in query_result:
(query_name, query_start, query_end, query_strand, subject_name,
subject_start, subject_end, subject_strand, score, query_length,
subject_length, similarity) = match_part_
query_start = int(query_start)
# they number the positions between symbols
# A C G T
# 0 1 2 3 4
# Hence the subsequence "CG" would have start=1, end=3, and length=2
# but we would say start=1 and end=2
query_end = int(query_end) - 1
subject_start = int(subject_start)
subject_end = int(subject_end) - 1
query_strand = _strand_transform(query_strand)
subject_strand = _strand_transform(subject_strand)
score = int(score)
similarity = float(similarity)
# For each line , It creates a match part dict
match_part = {}
match_part['query_start'] = query_start
match_part['query_end'] = query_end
match_part['query_strand'] = query_strand
match_part['subject_start'] = subject_start
match_part['subject_end'] = subject_end
match_part['subject_strand'] = subject_strand
match_part['scores'] = {'score': score, 'similarity': similarity}
# Check if the match is already added to the struct. A match is
# defined by a list of part matches between a query and a subject
match_num = _match_num_if_exists_in_struc(subject_name,
struct_dict)
if match_num is not None:
match = struct_dict['matches'][match_num]
if match['start'] > query_start:
match['start'] = query_start
if match['end'] < query_end:
match['end'] = query_end
if match['scores']['score'] < score:
match['scores']['score'] = score
match['match_parts'].append(match_part)
else:
match = {}
match['subject'] = {'name': subject_name,
'length': int(subject_length)}
match['start'] = query_start
match['end'] = query_end
match['scores'] = {'score': score}
match['match_parts'] = []
match['match_parts'].append(match_part)
struct_dict['matches'].append(match)
return struct_dict
def next(self):
'''It return the next exonerate hit'''
query_result = self._exonerate_results.next()
return self._create_structure_result(query_result)
def _strand_transform(strand):
'''It transfrom the +/- strand simbols in our user case 1/-1 caracteres '''
if strand == '-':
return -1
elif strand == '+':
return 1
def _match_num_if_exists_in_struc(subject_name, struct_dict):
'It returns the match number of the list of matches that is about subject'
for i, match in enumerate(struct_dict['matches']):
if subject_name == match['subject']['name']:
return i
return None
def get_alignment_parser(kind):
'''It returns a parser depending of the aligner kind '''
if 'blast_tab' == kind:
parser = TabularBlastParser
elif 'blast_text' == kind:
parser = TextBlastParser
elif 'blast' in kind:
parser = BlastParser
else:
parsers = {'exonerate': ExonerateParser}
parser = parsers[kind]
return parser
def get_match_score(match, score_key, query=None, subject=None):
'''Given a match it returns its score.
It tries to get the score from the match, if it's not there it goes for
the first match_part.
It can also be a derived score like the incompatibility. All derived scores
begin with d_
'''
# the score can be in the match itself or in the first
# match_part
if score_key in match['scores']:
score = match['scores'][score_key]
else:
# the score is taken from the best hsp (the first one)
score = match['match_parts'][0]['scores'][score_key]
return score
def get_match_scores(match, score_keys, query, subject):
'''It returns the scores for one match.
scores should be a list and it will return a list of scores.
'''
scores_res = []
for score_key in score_keys:
score = get_match_score(match, score_key, query, subject)
scores_res.append(score)
return scores_res
def alignment_results_scores(results, scores, filter_same_query_subject=True):
'''It returns the list of scores for all results.
For instance, for a blast a generator with all e-values can be generated.
By default, the results with the same query and subject will be filtered
out.
The scores can be a single one or a list of them.
'''
# for each score we want a list to gather the results
score_res = []
for score in scores:
score_res.append([])
for result in results:
query = result['query']
for match in result['matches']:
subject = match['subject']
if (filter_same_query_subject and query is not None and subject is
not None and query['name'] == subject['name']):
continue
# all the scores for this match
score_values = get_match_scores(match, scores, query, subject)
# we append each score to the corresponding result list
for index, value in enumerate(score_values):
score_res[index].append(value)
if len(score_res) == 1:
return score_res[0]
else:
return score_res
def build_relations_from_aligment(fhand, query_name, subject_name):
'''It returns a relations dict given an alignment in markx10 format
The alignment must be only between two sequences query against subject
'''
# we parse the aligment
in_seq_section = 0
seq, seq_len, al_start = None, None, None
for line in fhand:
line = line.strip()
if not line:
continue
if line[0] == '>' and line[1] != '>':
if in_seq_section:
seq = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': query_name}
if in_seq_section == 1:
seq0 = seq
in_seq_section += 1
seq = ''
continue
if not in_seq_section:
continue
if '; sq_len:' in line:
seq_len = int(line.split(':')[-1])
if '; al_display_start:' in line:
al_start = int(line.split(':')[-1])
if line[0] not in (';', '#'):
seq += line
seq1 = {'seq': seq,
'length': seq_len,
'al_start': al_start - 1,
'name': subject_name}
# now we get the segments
gap = '-'
pos_seq0 = seq0['al_start']
pos_seq1 = seq1['al_start']
segment_start = None
segments = []
for ali_pos in range(len(seq1['seq'])):
try:
nucl0, nucl1 = seq0['seq'][ali_pos + 1], seq1['seq'][ali_pos + 1]
if (nucl0 == gap or nucl1 == gap) and segment_start:
do_segment = True
segment_end = pos_seq0 - 1, pos_seq1 - 1
else:
do_segment = False
except IndexError:
do_segment = True
segment_end = pos_seq0, pos_seq1
if do_segment:
segment = {seq0['name']: (segment_start[0], segment_end[0]),
seq1['name']: (segment_start[1], segment_end[1]), }
segments.append(segment)
segment_start = None
if nucl0 != gap and nucl1 != gap and segment_start is None:
segment_start = pos_seq0, pos_seq1
if nucl0 != gap:
pos_seq0 += 1
if nucl1 != gap:
pos_seq1 += 1
relations = {}
for seg in segments:
for seq_name, limits in seg.items():
if seq_name not in relations:
relations[seq_name] = []
relations[seq_name].append(limits)
return relations
def _get_match_score(match, score_key, query=None, subject=None):
'''Given a match it returns its score.
It tries to get the score from the match, if it's not there it goes for
the first match_part.
'''
# the score can be in the match itself or in the first
# match_part
if score_key in match['scores']:
score = match['scores'][score_key]
else:
# the score is taken from the best hsp (the first one)
score = match['match_parts'][0]['scores'][score_key]
return score
def _score_above_threshold(score, min_score, max_score, log_tolerance,
log_best_score):
'It checks if the given score is a good one'
if log_tolerance is None:
if min_score is not None and score >= min_score:
match_ok = True
elif max_score is not None and score <= max_score:
match_ok = True
else:
match_ok = False
else:
if max_score is not None and score == 0.0:
match_ok = True
elif min_score is not None and score <= min_score:
match_ok = False
elif max_score is not None and score >= max_score:
match_ok = False
elif abs(log10(score) - log_best_score) < log_tolerance:
match_ok = True
else:
match_ok = False
return match_ok
def _create_scores_mapper_(score_key, score_tolerance=None,
max_score=None, min_score=None):
'It creates a mapper that keeps only the best matches'
if score_tolerance is not None:
log_tolerance = log10(score_tolerance)
else:
log_tolerance = None
def map_(alignment):
'''It returns an alignment with the best matches'''
if alignment is None:
return None
if log_tolerance is None:
log_best_score = None
else:
# score of the best match
try:
best_match = alignment['matches'][0]
best_score = _get_match_score(best_match, score_key)
if best_score == 0.0:
log_best_score = 0.0
else:
log_best_score = log10(best_score)
except IndexError:
log_best_score = None
filtered_matches = []
for match in alignment['matches']:
filtered_match_parts = []
for match_part in match['match_parts']:
score = match_part['scores'][score_key]
if _score_above_threshold(score, min_score, max_score,
log_tolerance, log_best_score):
filtered_match_parts.append(match_part)
match['match_parts'] = filtered_match_parts
if not len(match['match_parts']):
continue
# is this match ok?
match_score = get_match_score(match, score_key)
if _score_above_threshold(match_score, min_score, max_score,
log_tolerance, log_best_score):
filtered_matches.append(match)
alignment['matches'] = filtered_matches
return alignment
return map_
def _create_best_scores_mapper(score_key, score_tolerance=None,
max_score=None, min_score=None):
'It creates a mapper that keeps only the best matches'
return _create_scores_mapper_(score_key, score_tolerance=score_tolerance,
max_score=max_score, min_score=min_score)
def _create_scores_mapper(score_key, max_score=None, min_score=None):
'It creates a mapper that keeps only the best matches'
if max_score is None and min_score is None:
raise ValueError('Either max_score or min_score should be given')
return _create_scores_mapper_(score_key, max_score=max_score,
min_score=min_score)
def _create_deepcopy_mapper():
'It creates a mapper that does a deepcopy of the alignment'
def map_(alignment):
'It does the deepcopy'
return copy.deepcopy(alignment)
return map_
def _create_empty_filter():
'It creates a filter that removes the false items'
def filter_(alignment):
'It filters the empty alignments'
if alignment:
return True
else:
return False
return filter_
def _fix_match_scores(match, score_keys):
'Given a match it copies the given scores from the first match_part'
scores = {}
if not match['match_parts']:
return
match_part = match['match_parts'][0]
for key in score_keys:
scores[key] = match_part['scores'][key]
match['scores'] = scores
def _fix_match_start_end(match):
'Given a match it fixes the start and end based on the match_parts'
match_start, match_end = None, None
match_subject_start, match_subject_end = None, None
for match_part in match['match_parts']:
if ('query_start' in match_part and
(match_start is None or
match_part['query_start'] < match_start)):
match_start = match_part['query_start']
if ('query_end' in match_part and
(match_end is None or match_part['query_end'] > match_end)):
match_end = match_part['query_end']
if ('subject_start' in match_part and
(match_subject_start is None or
match_part['subject_start'] < match_subject_start)):
match_subject_start = match_part['subject_start']
if ('subject_end' in match_part and
(match_subject_end is None or
match_part['subject_end'] > match_subject_end)):
match_subject_end = match_part['subject_end']
if match_start is not None:
match['start'] = match_start
if match_end is not None:
match['end'] = match_end
if match_subject_start is not None:
match['subject_start'] = match_subject_start
if match_subject_end is not None:
match['subject_end'] = match_subject_end
def _fix_matches(alignment, score_keys=None):
'It removes the empty match_parts and the alignments with no matches'
if alignment is None:
return None
new_matches = []
for match in alignment['matches']:
if len(match['match_parts']):
if score_keys:
_fix_match_scores(match, score_keys)
_fix_match_start_end(match)
new_matches.append(match)
if not new_matches:
return None
else:
alignment['matches'] = new_matches
return alignment
def _create_fix_matches_mapper():
''''It creates a function that removes alignments with no matches.
It also removes matches with no match_parts
'''
return _fix_matches
def covered_segments_from_match_parts(match_parts, in_query=True,
merge_segments_closer=1):
'''Given a list of match_parts it returns the covered segments.
match_part 1 ------- -----> -----------
match_part 2 ------
It returns the list of segments covered by the match parts either in the
query or in the subject.
merge_segments_closer is an integer. Segments closer than the given
number of residues will be merged.
'''
# we collect all start and ends
segments = []
for match_part in match_parts:
if in_query:
start = match_part['query_start']
end = match_part['query_end']
else:
start = match_part['subject_start']
end = match_part['subject_end']
if start > end: # a revesed item
start, end = end, start
segments.append((start, end))
return merge_overlaping_segments(segments,
merge_segments_closer=merge_segments_closer)
def elongate_match_part_till_global(match_part, query_length, subject_length,
align_completely):
'''It streches the match_part to convert it in a global alignment.
We asume that the subject or the query should be completely aligned and we
strech the match part to do it.
The elongated match_parts will be marked unless the segment added is
shorter than the mark_strech_longer integer.
'''
assert align_completely in (SUBJECT, QUERY)
# start and ends
if match_part['subject_start'] <= match_part['subject_end']:
subject_start = match_part['subject_start']
subject_end = match_part['subject_end']
subject_rev = False
else:
subject_start = match_part['subject_end']
subject_end = match_part['subject_start']
subject_rev = True
if match_part['query_start'] <= match_part['query_end']:
query_start = match_part['query_start']
query_end = match_part['query_end']
query_rev = False
else:
query_start = match_part['query_end']
query_end = match_part['query_start']
query_rev = True
# how much do we elongate?
if align_completely == SUBJECT:
stretch_left = subject_start
max_left_strecth = query_start
stretch_right = subject_length - subject_end - 1
max_right_stretch = query_length - query_end - 1
else:
stretch_left = query_start
max_left_strecth = subject_start
stretch_right = query_length - query_end - 1
max_right_stretch = subject_length - subject_end - 1
if stretch_left > max_left_strecth:
stretch_left = max_left_strecth
if stretch_right > max_right_stretch:
stretch_right = max_right_stretch
# The elongation
if subject_rev:
match_part['subject_end'] -= stretch_left
else:
match_part['subject_start'] -= stretch_left
if query_rev:
match_part['query_end'] -= stretch_left
else:
match_part['query_start'] -= stretch_left
if subject_rev:
match_part['subject_start'] += stretch_right
else:
match_part['subject_end'] += stretch_right
if query_rev:
match_part['query_start'] += stretch_right
else:
match_part['query_end'] += stretch_right
# The taggin
streched_length = stretch_left + stretch_right
if streched_length:
match_part[ELONGATED] = streched_length
# reverse
def elongate_match_parts_till_global(match_parts, query_length,
subject_length, align_completely):
'''It streches the match_part to convert it in a global alignment.
We assume that the subject should be completely aligned and we stretch the
match part to do it.
The elongated match_parts will be marked unless the segment added is
shorter than the mark_strech_longer integer.
'''
return [elongate_match_part_till_global(mp, query_length, subject_length,
align_completely=align_completely)
for mp in match_parts]
def _match_length(match, length_from_query):
'''It returns the match length.
It does take into account only the length covered by match_parts.
'''
segments = covered_segments_from_match_parts(match['match_parts'],
length_from_query)
length = 0
for segment in segments:
match_part_len = segment[1] - segment[0] + 1
length += match_part_len
return length
def _match_part_length(match_part, length_in_query):
'It calculates the length of the match part'
if length_in_query:
return abs(match_part['query_end'] - match_part['query_start'])
else:
return abs(match_part['subject_end'] - match_part['subject_start'])
def _match_long_enough(match_length, total_length, min_num_residues,
min_percentage, length_in_query):
'It returns a boolean if the criteria is met'
if min_num_residues is not None:
if match_length >= min_num_residues:
match_ok = True
else:
match_ok = False
else:
percentage = (match_length / total_length) * 100.0
if percentage >= min_percentage:
match_ok = True
else:
match_ok = False
return match_ok
def _create_min_length_mapper(length_in_query, min_num_residues=None,
min_percentage=None, filter_match_parts=False):
'''It creates a mapper that removes short matches.
The length can be given in percentage or in number of residues.
The length can be from the query or the subject
filter_match_parts determines if every individual match_part is to be
filtered against the length requirement
'''
if not isinstance(length_in_query, bool):
raise ValueError('length_in_query should be a boolean')
if min_num_residues is None and min_percentage is None:
raise ValueError('min_num_residues or min_percentage should be given')
elif min_num_residues is not None and min_percentage is not None:
msg = 'Both min_num_residues or min_percentage can not be given at the'
msg += ' same time'
raise ValueError(msg)
def map_(alignment):
'''It returns an alignment with the matches that span long enough'''
if alignment is None:
return None
filtered_matches = []
query = alignment.get('query', None)
for match in alignment['matches']:
if match is None:
continue
if min_num_residues is None:
if length_in_query:
mol_length = query['length']
else:
mol_length = match['subject']['length']
else:
mol_length = None # it doesn't matter because we're after an
# absolute value
if filter_match_parts:
filtered_match_parts = []
for match_part in match['match_parts']:
match_part_length = _match_part_length(match_part,
length_in_query)
match_part_ok = _match_long_enough(match_part_length,
mol_length,
min_num_residues,
min_percentage,
length_in_query)
if match_part_ok:
filtered_match_parts.append(match_part)
match['match_parts'] = filtered_match_parts
if not len(match['match_parts']):
continue
filtered_matches.append(match)
else:
match_length = _match_length(match, length_in_query)
match_ok = _match_long_enough(match_length, mol_length,
min_num_residues,
min_percentage,
length_in_query)
if match_ok:
filtered_matches.append(match)
alignment['matches'] = filtered_matches
return alignment
return map_
MAPPER = 1
FILTER = 2
FILTER_COLLECTION = {'best_scores':
{'funct_factory': _create_best_scores_mapper,
'kind': MAPPER},
'score_threshold':
{'funct_factory': _create_scores_mapper,
'kind': MAPPER},
'min_length': {'funct_factory': _create_min_length_mapper,
'kind': MAPPER},
'deepcopy': {'funct_factory': _create_deepcopy_mapper,
'kind': MAPPER},
'fix_matches':
{'funct_factory': _create_fix_matches_mapper,
'kind': MAPPER},
'filter_empty':
{'funct_factory': _create_empty_filter,
'kind': FILTER},
}
def filter_alignments(alignments, config):
'''It filters and maps the given alignments.
The filters and maps to use will be decided based on the configuration.
'''
config = copy.deepcopy(config)
config.insert(0, {'kind': 'deepcopy'})
config.append({'kind': 'fix_matches'})
config.append({'kind': 'filter_empty'})
# create the pipeline
for conf in config:
funct_fact = FILTER_COLLECTION[conf['kind']]['funct_factory']
kind = FILTER_COLLECTION[conf['kind']]['kind']
del conf['kind']
function = funct_fact(**conf)
if kind == MAPPER:
alignments = itertools.imap(function, alignments)
else:
alignments = itertools.ifilter(function, alignments)
return alignments
|
JoseBlanca/seq_crumbs
|
crumbs/seq/alignment_result.py
|
Python
|
gpl-3.0
| 48,822
|
[
"BLAST",
"Biopython"
] |
07827ca955caf49f951d5b03ecc7f427e58a14e16307428d5c26840c069c3fdf
|
''' Significant lifting from https://jmetzen.github.io/2015-11-27/vae.html '''
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn
import random
import matplotlib.pyplot as plt
import re, string
from sklearn.feature_extraction.text import CountVectorizer
from collections import defaultdict
import pickle as pkl
import itertools
import ctc_loss
import os
n=50000-2
def map_lambda():
return n+1
def rev_map_lambda():
return "<UNK>"
def load_text(n,num_samples=None):
# fname = 'Oxford_English_Dictionary.txt'
# txt = []
# with open(fname,'rb') as f:
# txt = f.readlines()
# txt = [x.decode('utf-8').strip() for x in txt]
# txt = [re.sub(r'[^a-zA-Z ]+', '', x) for x in txt if len(x) > 1]
# List of words
# word_list = [x.split(' ', 1)[0].strip() for x in txt]
# # List of definitions
# def_list = [x.split(' ', 1)[1].strip()for x in txt]
with open('./training_data/training_data.pkl','rb') as raw:
word_list,dl=pkl.load(raw)
def_list=[]
# def_list=[' '.join(defi) for defi in def_list]
i=0
# words={}
while i<len( dl):
defi=dl[i]
if len(defi)>0:
def_list+=[' '.join(defi)]
i+=1
else:
dl.pop(i)
word_list.pop(i)
# for w,d in zip(word_list,def_list):
# if w not in words:
# words[w]=[]
# words[w].append(d)
# word_list=[]
# def_list=[]
# for word in words:
# word_list.append(word)
# # def_list.append(random.choice(words[word]))
# def_list.append(words[word][0])
maxlen=0
minlen=100
for defi in def_list:
minlen=min(minlen,len(defi.split()))
maxlen=max(maxlen,len(defi.split()))
print(minlen)
print(maxlen)
maxlen=30
# # Initialize the "CountVectorizer" object, which is scikit-learn's
# # bag of words tool.
# vectorizer = CountVectorizer(analyzer = "word", \
# tokenizer = None, \
# preprocessor = None, \
# stop_words = None, \
# max_features = None, \
# token_pattern='\\b\\w+\\b') # Keep single character words
# _map,rev_map=get_one_hot_map(word_list,def_list,n)
# pkl.dump(_map,open('mapaoh.pkl','wb'))
# pkl.dump(rev_map,open('rev_mapaoh.pkl','wb'))
_map=pkl.load(open('mapaoh.pkl','rb'))
rev_map=pkl.load(open('rev_mapaoh.pkl','rb'))
# exit()
if num_samples is not None:
num_samples=len(word_list)
# X = (36665, 56210)
X,to_drop = map_one_hot(word_list[:num_samples],_map,1,n)
# y = (36665, 56210)
# print _map
y,mask = map_one_hot(def_list[:num_samples],_map,maxlen,n,to_drop=to_drop)
np.save('Xaohex',X)
np.save('yaohex',y)
np.save('maskaohex',mask)
exit()
# X=np.load('Xaoh.npy','r')
# y=np.load('yaoh.npy','r')
# mask=np.load('maskaoh.npy','r')
print (np.max(y))
return X, y, mask,rev_map
def get_one_hot_map(to_def,corpus,n):
# words={}
# for line in to_def:
# if line:
# words[line.split()[0]]=1
# counts=defaultdict(int)
# uniq=defaultdict(int)
# for line in corpus:
# for word in line.split():
# if word not in words:
# counts[word]+=1
# words=list(words.keys())
words=[]
counts=defaultdict(int)
uniq=defaultdict(int)
for line in to_def+corpus:
for word in line.split():
if word not in words:
counts[word]+=1
_map=defaultdict(map_lambda)
rev_map=defaultdict(rev_map_lambda)
# words=words[:25000]
for i in counts.values():
uniq[i]+=1
print (len(words))
# random.shuffle(words)
words+=list(map(lambda z:z[0],reversed(sorted(counts.items(),key=lambda x:x[1]))))[:n-len(words)]
print (len(words))
i=0
# random.shuffle(words)
# for num_bits in range(binary_dim):
# for bit_config in itertools.combinations_with_replacement(range(binary_dim),num_bits+1):
# bitmap=np.zeros(binary_dim)
# bitmap[np.array(bit_config)]=1
# num=bitmap*(2** np.arange(binary_dim ))
# num=np.sum(num)
# num=int(num)
# word=words[i]
# _map[word]=num
# rev_map[num]=word
# i+=1
# if i>=len(words):
# break
# if i>=len(words):
# break
# i+=1
for word in words:
i+=1
_map[word]=i
rev_map[i]=word
rev_map[n+1]='<UNK>'
if zero_end_tok:
rev_map[0]='.'
else:
rev_map[0]='Start'
rev_map[2]='End'
print (list(reversed(sorted(uniq.items()))))
print (len(list(uniq.items())))
print (len(rev_map.keys()))
print(len(_map.keys()))
print ('heyo')
# print rev_map
return _map,rev_map
def map_word_emb(corpus,_map):
### NOTE: ONLY WORKS ON TARGET WORD (DOES NOT HANDLE UNK PROPERLY)
rtn=[]
rtn2=[]
num_failed=0
num_counted=0
for word in corpus:
w=word.lower()
num_counted+=1
if w not in _map:
num_failed+=1
mapped=_map[w]
rtn.append(mapped)
if get_rand_vec:
mapped_rand=random.choice(list(_map.keys()))
while mapped_rand==word:
mapped_rand=random.choice(list(_map.keys()))
mapped_rand=_map[mapped_rand]
rtn2.append(mapped_rand)
print 'fuck',num_failed/float(num_counted)
if get_rand_vec:
return np.array(rtn),np.array(rtn2)
return np.array(rtn)
def map_one_hot(corpus,_map,maxlen,n,to_drop=None):
if maxlen==1:
if not form2:
total_not=0
rtn=np.zeros([len(corpus),n+3],dtype=np.float32)
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==75001:
total_not+=1
rtn[l,mapped]=1
print (total_not,len(corpus))
return rtn
else:
total_not=0
rtn=np.zeros([len(corpus)],dtype=np.float32)
rtn=[]
to_drop=[]
for l,line in enumerate(corpus):
if len(line)==0:
rtn[l,-1]=1
else:
mapped=_map[line]
if mapped==n+1:
total_not+=1
if mapped!=n+1:
rtn.append(mapped)
else:
to_drop.append(l)
print (total_not,len(corpus))
return np.array(rtn,dtype=np.int32),to_drop
else:
num_samples=len(corpus)
if to_drop is not None:
num_samples-=len(to_drop)
if form2:
rtn=np.zeros([num_samples,maxlen+2],dtype=np.float32)
else:
rtn=np.zeros([num_samples,maxlen+2],dtype=np.int32)
print (rtn.shape)
mask=np.zeros([num_samples,maxlen+2],dtype=np.float32)
print (mask.shape)
mask[:,1]=1.0
totes=0
nopes=0
wtf=0
dropped=0
for _l,_line in enumerate(corpus):
if to_drop is not None and _l in to_drop:
if dropped%100==0:
print _l
dropped+=1
continue
l=_l-dropped
x=0
line=_line.split()
for i in range(min(len(line),maxlen-1)):
# if line[i] not in _map:
# nopes+=1
mapped=_map[line[i]]
rtn[l,i+1]=mapped
if mapped==n+1:
wtf+=1
mask[l,i+1]=1.0
totes+=1
x=i+1
to_app=n+2
if zero_end_tok:
to_app=0
rtn[l,x+1]=to_app
mask[l,x+1]=1.0
print (nopes,totes,wtf)
return rtn,mask
def xavier_init(fan_in, fan_out, constant=1e-4):
""" Xavier initialization of network weights"""
# https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
low = -constant*np.sqrt(6.0/(fan_in + fan_out))
high = constant*np.sqrt(6.0/(fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class VariationalAutoencoder(object):
""" Variation Autoencoder (VAE) with an sklearn-like interface implemented using TensorFlow.
This implementation uses probabilistic encoders and decoders using Gaussian
distributions and realized by multi-layer perceptrons. The VAE can be learned
end-to-end.
See "Auto-Encoding Variational Bayes" by Kingma and Welling for more details.
"""
def __init__(self, network_architecture, transfer_fct=tf.nn.softplus,
learning_rate=0.001, batch_size=100,generative=False,ctrain=False,test=False,global_step=None):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.learning_rate = learning_rate
print self.learning_rate
self.batch_size = batch_size
if global_step is None:
global_step=tf.Variable(0,trainiable=False)
self.global_step=global_step
# tf Graph input
self.n_words=network_architecture['n_input']
if not form2:
self.x = tf.placeholder(tf.float32, [None,self.n_words],name='x_in')
else:
self.x = tf.placeholder(tf.int32, [None],name='x_in')
self.intype=type(self.x)
if not form2:
self.caption_placeholder = tf.placeholder(tf.int32, [self.batch_size,network_architecture["maxlen"]],name='caption_placeholder')
else:
self.caption_placeholder = tf.placeholder(tf.int32, [self.batch_size, network_architecture["maxlen"]],name='caption_placeholder')
print self.caption_placeholder.shape
self.mask=tf.placeholder(tf.float32, [None, network_architecture["maxlen"]],name='mask')
self.timestep=tf.placeholder(tf.float32,[],name='timestep')
# Create autoencoder network
to_restore=None
with tf.device('/cpu:0'):
print network_architecture['n_input']
self.embw=tf.Variable(xavier_init(network_architecture['n_input'],network_architecture['n_z']),name='embw')
self.embb=tf.Variable(tf.zeros([network_architecture['n_z']]),name='embb')
if not generative:
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
to_restore=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self._create_loss_optimizer()
self.test=test
else:
self._build_gen()
# Initializing the tensor flow variables
init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.InteractiveSession()
if embeddings_trainable:
self.saver = tf.train.Saver(var_list=to_restore,max_to_keep=100)
saved_path=tf.train.latest_checkpoint(model_path)
else:
self.saver= tf.train.Saver(var_list=self.untrainable_variables,max_to_keep=100)
mod_path=model_path
if use_ctc:
mod_path=mod_path[:-3]
saved_path=tf.train.latest_checkpoint(mod_path.replace('defdef','embtransfer'))
self.sess.run(init)
if ctrain:
self.saver.restore(self.sess, saved_path)
self.saver=tf.train.Saver(max_to_keep=100)
def _create_network(self):
# Initialize autoencode network weights and biases
network_weights = self._initialize_weights(**self.network_architecture)
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
self.network_weights=network_weights
seqlen=tf.cast(tf.reduce_sum(self.mask,reduction_indices=-1),tf.int32)
self.embedded_input_KLD_loss=tf.constant(0.0)
self.input_embedding_KLD_loss=tf.constant(0.0)
# def train_encoder():
embedded_input,self.embedded_input_KLD_loss=self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'],tf.reshape(self.caption_placeholder,[self.batch_size*self.network_architecture['maxlen']]),logit=True)
print 'eshape',embedded_input.shape
embedded_input=tf.reshape(embedded_input,[self.batch_size,self.network_architecture['maxlen'],self.network_architecture['n_lstm_input']])
print embedded_input.shape
if not vanilla:
self.embedded_input_KLD_loss=tf.reshape(embedded_input_KLD_loss,[-1,self.network_architecture['maxlen']])[:,1:]
encoder_input=embedded_input[:,1:,:]
cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
cell=tf.contrib.rnn.MultiRNNCell([cell]*lstm_stack)
if not use_bdlstm:
encoder_outs,encoder_states=rnn.dynamic_rnn(cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
else:
backward_cell=tf.contrib.rnn.BasicLSTMCell(self.network_architecture['n_lstm_input'])
if lstm_stack>1:
backward_cell=tf.contrib.rnn.MultiRNNCell([backward_cell]*lstm_stack)
encoder_outs,encoder_states=rnn.bidirectional_dynamic_rnn(cell,backward_cell,encoder_input,sequence_length=seqlen-1,dtype=tf.float32,time_major=False)
ix_range=tf.range(0,self.batch_size,1)
ixs=tf.expand_dims(ix_range,-1)
to_cat=tf.expand_dims(seqlen-2,-1)
gather_inds=tf.concat([ixs,to_cat],axis=-1)
print encoder_outs
outs=tf.gather_nd(encoder_outs,gather_inds)
# outs=tf.nn.dropout(outs,.75)
self.deb=tf.gather_nd(self.caption_placeholder[:,1:],gather_inds)
print outs.shape
input_embedding,self.input_embedding_KLD_loss=self._get_middle_embedding([network_weights['middle_encoding'],network_weights['biases_middle_encoding']],network_weights['middle_encoding'],outs,logit=True)
# return input_embedding
# input_embedding=tf.nn.l2_normalize(input_embedding,dim=-1)
self.other_loss=tf.constant(0,dtype=tf.float32)
KLD_penalty=(tf.cast(self.timestep,tf.float32)/1.0)*1e-3
cos_penalty=tf.maximum(-0.1,(tf.cast(self.timestep,tf.float32)/(5.0)))*1e-3
self.input_KLD_loss=tf.constant(0.0)
# def train_decoder():
if form3:
_x,self.input_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['variational_encoding'])
self.input_KLD_loss=tf.reduce_mean(self.input_KLD_loss)*KLD_penalty#\*tf.constant(0.0,dtype=tf.float32)
# normed_embedding= tf.nn.l2_normalize(self.mid_var, dim=-1)
# normed_target=tf.nn.l2_normalize(self.word_var,dim=-1)
# cos_sim=(tf.reduce_sum(tf.multiply(normed_embedding,normed_target),axis=-1))
# # # self.exp_loss=tf.reduce_mean((-cos_sim))
# # # self.exp_loss=tf.reduce_sum(xentropy)/float(self.batch_size)
# self.other_loss += tf.reduce_mean(1-(cos_sim))*cos_penalty
# # other_loss+=tf.reduce_mean(tf.reduce_sum(tf.square(_x-input_embedding),axis=-1))*cos_penalty
# _x=tf.concat([input_embedding,_x],axis=-1)
# tempe=tf.Variable(xavier_init(self.network_architecture['n_lstm_input']*2,self.network_architecture['n_lstm_input']),name='emb_cat')
# tempb=tf.Variable(tf.zeros([self.network_architecture['n_lstm_input']]),name='emb_cat_b')
# _x=tf.matmul(_x,tempe)+tempb
# input_embedding=_x
# input_embedding=tf.cond(tf.equal(self.timestep%5,0),train_decoder,train_encoder)
# Use recognition network to determine mean and
# (log) variance of Gaussian distribution in latent
# space
# if not same_embedding:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['input_meaning'])
# else:
# input_embedding,input_embedding_KLD_loss=self._get_input_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'])
# if not embeddings_trainable:
# input_embedding=tf.stop_gradient(input_embedding)
# embed2decoder=tf.Variable(xavier_init(self.network_architecture['n_z_m_2'],self.network_architecture['n_lstm_input']),name='decoder_embedding_weight')
# embed2decoder_bias=tf.Variable(tf.zeros(self.network_architecture['n_lstm_input']),name='decoder_embedding_bias')
state = self.lstm.zero_state(self.batch_size, dtype=tf.float32)
# input_embedding=tf.matmul(input_embedding,embed2decoder)+embed2decoder_bias
loss = 0
self.debug=0
probs=[]
with tf.variable_scope("RNN"):
for i in range(self.network_architecture['maxlen']):
if i > 0:
# current_embedding = tf.nn.embedding_lookup(self.word_embedding, caption_placeholder[:,i-1]) + self.embedding_bias
if form4:
current_embedding,KLD_loss=input_embedding,0
elif form2:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1],logit=True)
else:
current_embedding,KLD_loss = self._get_word_embedding([network_weights['variational_encoding'],network_weights['biases_variational_encoding']],network_weights['LSTM'], self.caption_placeholder[:,i-1])
loss+=tf.reduce_sum(KLD_loss*self.mask[:,i])*KLD_penalty
else:
current_embedding = input_embedding
if i > 0:
tf.get_variable_scope().reuse_variables()
out, state = self.lstm(current_embedding, state)
if i > 0:
if not form2:
labels = tf.expand_dims(self.caption_placeholder[:, i], 1)
ix_range=tf.range(0, self.batch_size, 1)
ixs = tf.expand_dims(ix_range, 1)
concat = tf.concat([ixs, labels],1)
onehot = tf.sparse_to_dense(
concat, tf.stack([self.batch_size, self.n_words]), 1.0, 0.0)
else:
onehot=self.caption_placeholder[:,i]
logit = tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not use_ctc:
xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=onehot)
xentropy = xentropy * self.mask[:,i]
xentropy=tf.reduce_sum(xentropy)
self.debug+=xentropy
loss += xentropy
else:
probs.append(tf.expand_dims(tf.nn.sigmoid(logit),1))
self.debug=[self.input_KLD_loss,tf.reduce_mean(self.input_embedding_KLD_loss)/self.batch_size*KLD_penalty,self.other_loss,KLD_penalty]
if not use_ctc:
loss_ctc=0
# self.debug=other_loss
# self.debug=[input_KLD_loss,embedded_input_KLD_loss,input_embedding_KLD_loss]
else:
probs=tf.concat(probs,axis=1)
probs=ctc_loss.get_output_probabilities(probs,self.caption_placeholder[:,1:,:])
loss_ctc=ctc_loss.loss(probs,self.caption_placeholder[:,1:,:],self.network_architecture['maxlen']-2,self.batch_size,seqlen-1)
self.debug=loss_ctc
#
loss = (loss / tf.reduce_sum(self.mask[:,1:]))+tf.reduce_sum(self.input_embedding_KLD_loss)/self.batch_size*KLD_penalty+tf.reduce_sum(self.embedded_input_KLD_loss*self.mask[:,1:])/tf.reduce_sum(self.mask[:,1:])*KLD_penalty+loss_ctc+self.input_KLD_loss+self.other_loss
print 'makin loss'
self.loss=loss
def _initialize_weights(self, n_lstm_input, maxlen,
n_input, n_z, n_z_m,n_z_m_2):
all_weights = dict()
if form3:
n_in=n_input
else:
n_in=n_input
embeddings_trainable=True
if not same_embedding:
all_weights['input_meaning'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight',trainable=embeddings_trainable),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias',trainable=embeddings_trainable)}
# if not vanilla:
all_weights['biases_variational_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_log_sigmab',trainable=embeddings_trainable)}
with tf.device('/cpu:0'):
om=tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable)
ols=tf.Variable(xavier_init(n_in, n_z),name='out_log_sigma',trainable=embeddings_trainable)
all_weights['variational_encoding'] = {
'out_mean': om,
'out_log_sigma': ols,
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')
}
print all_weights['variational_encoding']['out_mean']
# else:
# all_weights['biases_variational_encoding'] = {
# 'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='out_meanb',trainable=embeddings_trainable)}
# all_weights['variational_encoding'] = {
# 'out_mean': tf.Variable(xavier_init(n_in, n_z),name='out_mean',trainable=embeddings_trainable),
# 'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='in_affine_weight'),
# 'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='in_affine_bias')}
self.untrainable_variables=all_weights['input_meaning'].values()+all_weights['biases_variational_encoding'].values()+all_weights['variational_encoding'].values()
if mid_vae:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb'),
'out_log_sigma': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_log_sigmab')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_log_sigma'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean'),
'out_log_sigma': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_log_sigma')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable),
'out_log_sigma': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_log_sigmab',trainable=embeddings_trainable)
}
else:
all_weights['biases_middle_encoding'] = {
'out_mean': tf.Variable(tf.zeros([n_z_m], dtype=tf.float32),name='mid_out_meanb')}
all_weights['middle_encoding'] = {
'out_mean': tf.Variable(xavier_init(n_lstm_input, n_z_m),name='mid_out_mean'),
'affine_weight': tf.Variable(xavier_init(n_z_m, n_lstm_input),name='mid_affine_weight'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='mid_affine_bias')}
all_weights['embmap']={
'out_mean': tf.Variable(xavier_init(n_in, n_z),name='embmap_out_mean')
}
all_weights['embmap_biases']={
'out_mean': tf.Variable(tf.zeros([n_z], dtype=tf.float32),name='embmap_out_meanb',trainable=embeddings_trainable)
}
self.lstm=tf.contrib.rnn.BasicLSTMCell(n_lstm_input)
if lstm_stack>1:
self.lstm=tf.contrib.rnn.MultiRNNCell([self.lstm]*lstm_stack)
all_weights['LSTM'] = {
'affine_weight': tf.Variable(xavier_init(n_z, n_lstm_input),name='affine_weight2'),
'affine_bias': tf.Variable(tf.zeros(n_lstm_input),name='affine_bias2'),
'encoding_weight': tf.Variable(xavier_init(n_lstm_input,n_input),name='encoding_weight'),
'encoding_bias': tf.Variable(tf.zeros(n_input),name='encoding_bias'),
'lstm': self.lstm}
return all_weights
def _get_input_embedding(self, ve_weights, aff_weights):
if not form3:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],self.x)
else:
x=self.x
# with tf.device('/cpu:0'):
# x=tf.nn.embedding_lookup(self.embw,self.x)
# x+=self.embb
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x,lookup=True)
self.word_var=z
embedding=tf.matmul(z,aff_weights['affine_weight'])+aff_weights['affine_bias']
return embedding,vae_loss
def _get_middle_embedding(self, ve_weights, lstm_weights, x,logit=False):
if logit:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x)
else:
if not form2:
z,vae_loss=self._vae_sample_mid(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
print z.shape
self.mid_var=z
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _get_word_embedding(self, ve_weights, lstm_weights, x,logit=False):
if form3:
# with tf.device('/cpu:0'):
# x=tf.nn.embedding_lookup(self.embw,x)
# x+=self.embb
pass
if logit:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x,lookup=True)
else:
if not form2:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],x, True)
else:
z,vae_loss=self._vae_sample(ve_weights[0],ve_weights[1],tf.one_hot(x,depth=self.network_architecture['n_input']))
all_the_f_one_h.append(tf.one_hot(x,depth=self.network_architecture['n_input']))
embedding=tf.matmul(z,lstm_weights['affine_weight'])+lstm_weights['affine_bias']
return embedding,vae_loss
def _vae_sample(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if not vanilla:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if not vanilla:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if not vanilla:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if not vanilla:
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _vae_sample_mid(self, weights, biases, x, lookup=False):
#TODO: consider adding a linear transform layer+relu or softplus here first
if not lookup:
mu=tf.matmul(x,weights['out_mean'])+biases['out_mean']
if mid_vae:
logvar=tf.matmul(x,weights['out_log_sigma'])+biases['out_log_sigma']
else:
with tf.device('/cpu:0'):
mu=tf.nn.embedding_lookup(weights['out_mean'],x)
mu+=biases['out_mean']
if mid_vae:
with tf.device('/cpu:0'):
logvar=tf.nn.embedding_lookup(weights['out_log_sigma'],x)
logvar+=biases['out_log_sigma']
if mid_vae:
epsilon=tf.random_normal(tf.shape(logvar),name='epsilon')
std=tf.exp(.5*logvar)
z=mu+tf.multiply(std,epsilon)
else:
z=mu
KLD=0.0
if mid_vae:
print 'stop fucking sampling',mid_vae
KLD = -0.5 * tf.reduce_sum(1 + logvar - tf.pow(mu, 2) - tf.exp(logvar),axis=-1)
print logvar.shape,epsilon.shape,std.shape,z.shape,KLD.shape
return z,KLD
def _create_loss_optimizer(self):
if clip_grad:
opt_func = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), .1)
self.optimizer = opt_func.apply_gradients(zip(grads, tvars))
else:
self.optimizer = \
tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
def _create_loss_test(self):
self.test_op = \
tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[1],extra_feed_dict={})
def partial_fit(self, X,y,mask,testify=False,timestep=0):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if self.test and testify:
print tf.test.compute_gradient_error(self.x,np.array([self.batch_size,self.n_words]),self.loss,[self.batch_size],extra_feed_dict={self.caption_placeholder: y, self.mask: mask})
exit()
else:
opt, cost,shit = self.sess.run((self.optimizer, self.loss,self.debug),
feed_dict={self.x: X, self.caption_placeholder: y, self.mask: mask,self.timestep:timestep})
# print shit
# print deb
# exit()
return cost,shit
def _build_gen(self):
#same setup as `_create_network` function
network_weights = self._initialize_weights(**self.network_architecture)
if form2:
start_token_tensor=tf.constant((np.zeros([self.batch_size,binary_dim])).astype(np.float32),dtype=tf.float32)
else:
start_token_tensor=tf.constant((np.zeros([self.batch_size])).astype(np.int32),dtype=tf.int32)
self.network_weights=network_weights
if not same_embedding:
input_embedding,_=self._get_input_embedding([network_weights['embmap'],network_weights['embmap_biases']],network_weights['embmap'])
else:
input_embedding,_=self._get_input_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'])
print input_embedding.shape
# image_embedding = tf.matmul(img, self.img_embedding) + self.img_embedding_bias
state = self.lstm.zero_state(self.batch_size,dtype=tf.float32)
#declare list to hold the words of our generated captions
all_words = []
with tf.variable_scope("RNN"):
# in the first iteration we have no previous word, so we directly pass in the image embedding
# and set the `previous_word` to the embedding of the start token ([0]) for the future iterations
output, state = self.lstm(input_embedding, state)
print state,output.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], start_token_tensor)
print previous_word.shape
# previous_word = tf.nn.embedding_lookup(self.word_embedding, [0]) + self.embedding_bias
for i in range(self.network_architecture['maxlen']):
tf.get_variable_scope().reuse_variables()
print i
out, state = self.lstm(previous_word, state)
# get a one-hot word encoding from the output of the LSTM
logit=tf.matmul(out, network_weights['LSTM']['encoding_weight']) + network_weights['LSTM']['encoding_bias']
if not form2:
best_word = tf.argmax(logit, 1)
else:
best_word = tf.argmax(logit, 1)
# with tf.device("/cpu:0"):
# # get the embedding of the best_word to use as input to the next iteration of our LSTM
# previous_word = tf.nn.embedding_lookup(self.word_embedding, best_word)
# previous_word += self.embedding_bias
print logit.shape
if form4:
previous_word,_=input_embedding,None
elif form2:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word,logit=True)
else:
previous_word,_ = self._get_word_embedding([self.network_weights['variational_encoding'],self.network_weights['biases_variational_encoding']],self.network_weights['LSTM'], best_word)
print previous_word.shape
all_words.append(best_word)
self.generated_words=all_words
def generate(self, _map, x):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
# """
# if z_mu is None:
# z_mu = np.random.normal(size=self.network_architecture["n_z"])
# # Note: This maps to mean of distribution, we could alternatively
# # sample from Gaussian distribution
# return self.sess.run(self.x_reconstr_mean,
# feed_dict={self.z: z_mu})
# saver = tf.train.Saver()
# saver.restore(self.sess, tf.train.latest_checkpoint(model_path))
generated_word_index,f_it= self.sess.run([self.generated_words,all_the_f_one_h], feed_dict={self.x:x})
print f_it
print generated_word_index
if form2:
generated_word_index=np.array(bin_to_int(generated_word_index))
generated_word_index=np.rollaxis(generated_word_index,1)
else:
generated_word_index=np.array(generated_word_index)
return generated_word_index
# generated_sentence = ixtoword(_map,generated_word_index)
# return generated_sentence
def ixtoword(_map,ixs):
return [[_map[x] for x in y] for y in ixs]
def bin_to_int(a):
return [(x*(2** np.arange(x.shape[-1] ))).sum(axis=-1).astype(np.uint32) for x in a]
def train(network_architecture, learning_rate=0.001,
batch_size=100, training_epochs=10, display_step=2,gen=False,ctrain=False,test=False):
global_step=tf.Variable(0,trainable=False)
total_batch = int(n_samples / batch_size)
if should_decay and not gen:
learning_rate = tf.train.exponential_decay(learning_rate, global_step,
total_batch, 0.95, staircase=True)
vae = VariationalAutoencoder(network_architecture,
learning_rate=learning_rate,
batch_size=batch_size,generative=gen,ctrain=ctrain,test=test,global_step=global_step)
# Training cycle
# if test:
# maxlen=network_architecture['maxlen']
# return tf.test.compute_gradient_error([vae.x,vae.caption_placeholder,vae.mask],[np.array([batch_size,n_input]),np.array([batch_size,maxlen,n_input]),np.array([batch_size,maxlen])],vae.loss,[])
if gen:
return vae
costs=[]
indlist=np.arange(all_samps).astype(int)
# indlist=np.arange(10*batch_size).astype(int)
for epoch in range(training_epochs):
avg_cost = 0.
# Loop over all batches
np.random.shuffle(indlist)
testify=False
avg_loss=0
# for i in range(1):
for i in range(total_batch):
# break
ts=i
# i=0
inds=np.random.choice(indlist,batch_size)
# print indlist[i*batch_size:(i+1)*batch_size]
# batch_xs = X[indlist[i*batch_size:(i+1)*batch_size]]
batch_xs = X[inds]
# Fit training using batch data
# if epoch==2 and i ==0:
# testify=True
# cost,loss = vae.partial_fit(batch_xs,y[indlist[i*batch_size:(i+1)*batch_size]].astype(np.uint32),mask[indlist[i*batch_size:(i+1)*batch_size]],timestep=epoch*total_batch+ts,testify=testify)
cost,loss = vae.partial_fit(batch_xs,y[inds].astype(np.uint32),mask[inds],timestep=(epoch)+1,testify=testify)
# Compute average loss
avg_cost = avg_cost * i /(i+1) +cost/(i+1)
# avg_loss=avg_loss*i/(i+1)+loss/(i+1)
if i% display_step==0:
print avg_cost,loss,cost
if epoch == 0 and ts==0:
costs.append(avg_cost)
costs.append(avg_cost)
# Display logs per epoch step
if epoch % (display_step) == 0 or epoch==1:
if should_save:
print 'saving'
vae.saver.save(vae.sess, os.path.join(model_path,'model'))
pkl.dump(costs,open(loss_output_path,'wb'))
print("Epoch:", '%04d' % (epoch+1),
"cost=", avg_cost)
return vae
if __name__ == "__main__":
import sys
form2=True
vanilla=True
if sys.argv[1]!='vanilla':
vanilla=False
mid_vae=False
form3= True
form4=False
vanilla=True
if sys.argv[2]=='mid_vae':
mid_vae=True
print 'mid_vae'
same_embedding=False
clip_grad=True
if sys.argv[3]!='clip':
clip_grad=False
should_save=True
should_train=True
# should_train=not should_train
should_continue=False
# should_continue=True
should_decay=True
zero_end_tok=True
training_epochs=int(sys.argv[13])
batch_size=int(sys.argv[4])
onehot=False
embeddings_trainable=False
if sys.argv[5]!='transfer':
print 'true embs'
embeddings_trainable=True
transfertype2=True
binary_dim=int(sys.argv[6])
all_the_f_one_h=[]
if not zero_end_tok:
X, y, mask, _map = load_text(50000-3)
else:
X, y, mask, _map = load_text(50000-2)
n_input =50000
n_samples = 30000
lstm_dim=int(sys.argv[7])
model_path = sys.argv[8]
vartype=''
transfertype=''
maxlen=int(sys.argv[9])+2
n_z=int(sys.argv[10])
n_z_m=int(sys.argv[11])
n_z_m_2=int(sys.argv[12])
if not vanilla:
vartype='var'
if not embeddings_trainable:
transfertype='transfer'
cliptype=''
if clip_grad:
cliptype='clip'
use_ctc=False
losstype=''
if sys.argv[14]=='ctc_loss':
use_ctc=True
losstype='ctc'
lstm_stack=int(sys.argv[15])
use_bdlstm=False
bdlstmtype=''
if sys.argv[16]!='forward':
use_bdlstm=True
bdlstmtype='bdlstm'
loss_output_path= 'losses/%s%ss_%sb_%sl_%sh_%sd_%sz_%szm_%s%s%sdefdef%sohex.pkl'%(bdlstmtype,str(lstm_stack),str(batch_size),str(maxlen-2),str(lstm_dim),str(n_input),str(n_z),str(n_z_m),str(losstype),str(cliptype),str(vartype),str(transfertype))
all_samps=len(X)
n_samples=all_samps
# X, y = X[:n_samples, :], y[:n_samples, :]
network_architecture = \
dict(maxlen=maxlen, # 2nd layer decoder neurons
n_input=n_input, # One hot encoding input
n_lstm_input=lstm_dim, # LSTM cell size
n_z=n_z, # dimensionality of latent space
n_z_m=n_z_m,
n_z_m_2=n_z_m_2
)
# batch_size=1
if should_train:
# vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue)
# print train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,test=True)
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=False,ctrain=should_continue,learning_rate=.005)
else:
vae_2d = train(network_architecture, training_epochs=training_epochs, batch_size=batch_size,gen=True,ctrain=True)
# # vae_2d._build_gen()
ind_list=np.arange(len(X)).astype(int)
# np.random.shuffle(ind_list)
x_sample = X[ind_list[:batch_size]]
print x_sample
y_sample = y[ind_list[:batch_size]]
print y_sample
y_hat = vae_2d.generate(_map,x_sample)
y_hat=y_hat[:10]
# print y_hat
y_hat_words=ixtoword(_map,y_hat)
print y_hat_words
if form2:
y_words=ixtoword(_map,np.array(bin_to_int(y_sample[:10])))
else:
y_words=ixtoword(_map,y_sample)
print(y_hat)
print(y_hat_words)
print(y_words)
print(ixtoword(_map,bin_to_int(np.expand_dims(x_sample[:10],axis=0))))
# # plt.figure(figsize=(8, 6))
# plt.scatter(z_mu[:, 0], z_mu[:, 1], c=np.argmax(y_sample, 1))
# plt.colorbar()
# plt.grid()
# plt.show()
|
dricciardelli/vae2vec
|
def_def_oh_ex.py
|
Python
|
mit
| 36,948
|
[
"Gaussian"
] |
8491a510e5b5a46758c71fb3d4d0e547d4a671fa466856f7fc63b92fd1287446
|
#TODO: Set dbkey to proper UCSC build, if known
import urllib
from galaxy import datatypes, config
import tempfile, shutil
def exec_before_job( app, inp_data, out_data, param_dict, tool=None):
"""Sets the name of the data"""
data_name = param_dict.get( 'name', 'HbVar query' )
data_type = param_dict.get( 'type', 'txt' )
if data_type == 'txt': data_type='interval' #All data is TSV, assume interval
name, data = out_data.items()[0]
data = app.datatypes_registry.change_datatype(data, data_type)
data.name = data_name
out_data[name] = data
def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None):
"""Verifies the data after the run"""
URL = param_dict.get( 'URL', None )
URL = URL + '&_export=1&GALAXY_URL=0'
if not URL:
raise Exception('Datasource has not sent back a URL parameter')
CHUNK_SIZE = 2**20 # 1Mb
MAX_SIZE = CHUNK_SIZE * 100
try:
page = urllib.urlopen(URL)
except Exception, exc:
raise Exception('Problems connecting to %s (%s)' % (URL, exc) )
name, data = out_data.items()[0]
fp = open(data.file_name, 'wb')
size = 0
while 1:
chunk = page.read(CHUNK_SIZE)
if not chunk:
break
if size > MAX_SIZE:
raise Exception('----- maximum datasize exceeded ---')
size += len(chunk)
fp.write(chunk)
fp.close()
#Set meta data, format file to be valid interval type
if isinstance(data.datatype, datatypes.interval.Interval):
data.set_meta(first_line_is_header=True)
#check for missing meta data, if all there, comment first line and process file
if not data.missing_meta():
line_ctr = -1
temp = tempfile.NamedTemporaryFile('w')
temp_filename = temp.name
temp.close()
temp = open(temp_filename,'w')
chromCol = int(data.metadata.chromCol) - 1
startCol = int(data.metadata.startCol) - 1
strandCol = int(data.metadata.strandCol) - 1
for line in open(data.file_name, 'r'):
line_ctr += 1
fields = line.strip().split('\t')
temp.write("%s\n" % '\t'.join(fields))
temp.close()
shutil.move(temp_filename,data.file_name)
else:
data = app.datatypes_registry.change_datatype(data, 'tabular')
data.set_size()
data.set_peek()
app.model.context.add( data )
app.model.context.flush()
|
volpino/Yeps-EURAC
|
tools/data_source/hbvar_filter.py
|
Python
|
mit
| 2,632
|
[
"Galaxy"
] |
02bfce63002e97ecab7f59c51037fd423e9244fe157fe81f4561cbb432a48984
|
# -*- coding: utf-8 -*-
# These tests require a working internet connection.
import os, sys; sys.path.insert(0, os.path.join("..", ".."))
import unittest
import time
import warnings
from pattern import web
#---------------------------------------------------------------------------------------------------
class TestCache(unittest.TestCase):
def setUp(self):
pass
def test_cache(self):
# Assert cache unicode.
k, v = "test", u"ünîcødé"
web.cache[k] = v
self.assertTrue(isinstance(web.cache[k], unicode))
self.assertEqual(web.cache[k], v)
self.assertEqual(web.cache.age(k), 0)
del web.cache[k]
print "pattern.web.Cache"
#---------------------------------------------------------------------------------------------------
class TestUnicode(unittest.TestCase):
def setUp(self):
# Test data with different (or wrong) encodings.
self.strings = (
u"ünîcøde",
u"ünîcøde".encode("utf-16"),
u"ünîcøde".encode("latin-1"),
u"ünîcøde".encode("windows-1252"),
"ünîcøde",
u"אוניקאָד"
)
def test_decode_utf8(self):
# Assert unicode.
for s in self.strings:
self.assertTrue(isinstance(web.decode_utf8(s), unicode))
print "pattern.web.decode_utf8()"
def test_encode_utf8(self):
# Assert Python bytestring.
for s in self.strings:
self.assertTrue(isinstance(web.encode_utf8(s), str))
print "pattern.web.encode_utf8()"
#---------------------------------------------------------------------------------------------------
class TestURL(unittest.TestCase):
def setUp(self):
# Test a live URL that has fast response time
self.live = "http://www.google.com/"
# Test a fake URL with the URL parser.
self.url = "https://username:password@www.domain.com:8080/path/path/page.html?q=1#anchor"
self.parts = {
"protocol": "https",
"username": "username",
"password": "password",
"domain": "www.domain.com",
"port": 8080,
"path": ["path", "path"],
"page": "page.html",
"query": {"q": 1},
"anchor": "anchor"
}
def test_asynchrous(self):
# Assert asynchronous function call (returns 1).
v = web.asynchronous(lambda t: time.sleep(t) or 1, 0.2)
while not v.done:
time.sleep(0.1)
self.assertEqual(v.value, 1)
print "pattern.web.asynchronous()"
def test_extension(self):
# Assert filename extension.
v = web.extension(os.path.join("pattern", "test", "test-web.py.zip"))
self.assertEqual(v, ".zip")
print "pattern.web.extension()"
def test_urldecode(self):
# Assert URL decode (inverse of urllib.urlencode).
v = web.urldecode("?user=me&page=1&q=&")
self.assertEqual(v, {"user": "me", "page": 1, "q": None})
print "pattern.web.urldecode()"
def test_proxy(self):
# Assert URL proxy.
v = web.proxy("www.proxy.com", "https")
self.assertEqual(v, ("www.proxy.com", "https"))
print "pattern.web.proxy()"
def test_url_parts(self):
# Assert URL._parse and URL.parts{}.
v = web.URL(self.url)
for a, b in (
(web.PROTOCOL, self.parts["protocol"]),
(web.USERNAME, self.parts["username"]),
(web.PASSWORD, self.parts["password"]),
(web.DOMAIN, self.parts["domain"]),
(web.PORT, self.parts["port"]),
(web.PATH, self.parts["path"]),
(web.PAGE, self.parts["page"]),
(web.QUERY, self.parts["query"]),
(web.ANCHOR, self.parts["anchor"])):
self.assertEqual(v.parts[a], b)
print "pattern.web.URL.parts"
def test_url_query(self):
# Assert URL.query and URL.querystring.
v = web.URL(self.url)
v.query["page"] = 10
v.query["user"] = None
self.assertEqual(v.query, {"q": 1, "page": 10, "user": None})
self.assertEqual(v.querystring, "q=1&page=10&user=")
# Assert URL.querystring encodes unicode arguments.
q = ({u"ünîcødé": 1.5}, "%C3%BCn%C3%AEc%C3%B8d%C3%A9=1.5")
v.query = q[0]
self.assertEqual(v.querystring, q[1])
# Assert URL.query decodes unicode arguments.
v = web.URL("http://domain.com?" + q[1])
self.assertEqual(v.query, q[0])
print "pattern.web.URL.query"
print "pattern.web.URL.querystring"
def test_url_string(self):
# Assert URL._set_string().
v = web.URL("")
v.string = "https://domain.com"
self.assertEqual(v.parts[web.PROTOCOL], "https")
self.assertEqual(v.parts[web.DOMAIN], "domain.com")
self.assertEqual(v.parts[web.PATH], [])
print "pattern.web.URL.string"
def test_url(self):
# Assert URL.copy().
v = web.URL(self.url)
v = v.copy()
# Assert URL.__setattr__().
v.username = "new-username"
v.password = "new-password"
# Assert URL.__getattr__().
self.assertEqual(v.method, web.GET)
self.assertEqual(v.protocol, self.parts["protocol"])
self.assertEqual(v.username, "new-username")
self.assertEqual(v.password, "new-password")
self.assertEqual(v.domain, self.parts["domain"])
self.assertEqual(v.port, self.parts["port"])
self.assertEqual(v.path, self.parts["path"])
self.assertEqual(v.page, self.parts["page"])
self.assertEqual(v.query, self.parts["query"])
self.assertEqual(v.anchor, self.parts["anchor"])
print "pattern.web.URL"
def test_url_open(self):
# Assert URLError.
v = web.URL(self.live.replace("http://", "htp://"))
self.assertRaises(web.URLError, v.open)
self.assertEqual(v.exists, False)
# Assert HTTPError.
v = web.URL(self.live + "iphone/android.html")
self.assertRaises(web.HTTPError, v.open)
self.assertRaises(web.HTTP404NotFound, v.open)
self.assertEqual(v.exists, False)
# Assert socket connection.
v = web.URL(self.live)
self.assertTrue(v.open() != None)
self.assertEqual(v.exists, True)
# Assert user-agent and referer.
self.assertTrue(v.open(user_agent=web.MOZILLA, referrer=web.REFERRER) != None)
print "pattern.web.URL.exists"
print "pattern.web.URL.open()"
def test_url_download(self):
t = time.time()
v = web.URL(self.live).download(cached=False, throttle=0.25, unicode=True)
t = time.time() - t
# Assert unicode content.
self.assertTrue(isinstance(v, unicode))
# Assert download rate limiting.
self.assertTrue(t >= 0.25)
print "pattern.web.URL.download()"
def test_url_mimetype(self):
# Assert URL MIME-type.
v = web.URL(self.live).mimetype
self.assertTrue(v in web.MIMETYPE_WEBPAGE)
print "pattern.web.URL.mimetype"
def test_url_headers(self):
# Assert URL headers.
v = web.URL(self.live).headers["content-type"].split(";")[0]
self.assertEqual(v, "text/html")
print "pattern.web.URL.headers"
def test_url_redirect(self):
# Assert URL redirected URL (this depends on where you are).
# In Belgium, it yields "http://www.google.be/".
v = web.URL(self.live).redirect
print "pattern.web.URL.redirect: " + self.live + " => " + v
def test_abs(self):
# Assert absolute URL (special attention for anchors).
for a, b in (
("../page.html", "http://domain.com/path/"),
( "page.html", "http://domain.com/home.html")):
v = web.abs(a, base=b)
self.assertEqual(v, "http://domain.com/page.html")
for a, b, c in (
( "#anchor", "http://domain.com", "/"),
( "#anchor", "http://domain.com/", ""),
( "#anchor", "http://domain.com/page", "")):
v = web.abs(a, base=b)
self.assertEqual(v, b+c+a) # http://domain.com/#anchor
print "pattern.web.abs()"
def test_base(self):
# Assert base URL domain name.
self.assertEqual(web.base("http://domain.com/home.html"), "domain.com")
print "pattern.web.base()"
#---------------------------------------------------------------------------------------------------
class TestPlaintext(unittest.TestCase):
def setUp(self):
pass
def test_find_urls(self):
# Assert URL finder with common URL notations.
for url in (
"http://domain.co.uk",
"https://domain.co.uk",
"www.domain.cu.uk",
"domain.com",
"domain.org",
"domain.net"):
self.assertEqual(web.find_urls("("+url+".")[0], url)
# Assert case-insensitive and <a href="">.
# Assert several matches in string.
self.assertEqual(web.find_urls("<a href=\"HTTP://domain.net\">")[0], "HTTP://domain.net")
self.assertEqual(web.find_urls("domain.com, domain.net"), ["domain.com", "domain.net"])
print "pattern.web.find_urls()"
def test_find_email(self):
# Assert e-mail finder with common e-mail notations.
s = "firstname.last+name@domain.ac.co.uk"
v = web.find_email("("+s+".")
self.assertEqual(v[0], s)
# Assert several matches in string.
s = ["me@site1.com", "me@site2.com"]
v = web.find_email("("+",".join(s)+")")
self.assertEqual(v, s)
print "pattern.web.find_email()"
def test_find_between(self):
# Assert search between open tag and close tag.
s = "<script type='text/javascript'>alert(0);</script>"
v = web.find_between("<script","</script>", s)
self.assertEqual(v[0], " type='text/javascript'>alert(0);")
# Assert several matches in string.
s = "a0ba1b"
v = web.find_between("a", "b", s)
self.assertEqual(v, ["0", "1"])
print "pattern.web.find_between()"
def test_strip_tags(self):
# Assert HTML parser and tag stripper.
for html, plain in (
(u"<b>ünîcøde</b>", u"ünîcøde"),
( "<img src=""/>", ""),
( "<p>text</p>", "text\n\n"),
( "<li>text</li>", "* text\n"),
( "<td>text</td>", "text\t"),
( "<br /><br/><br>", "\n\n\n")):
self.assertEqual(web.strip_tags(html), plain)
# Assert exclude tags and attributes
v = web.strip_tags("<a href=\"\" onclick=\"\">text</a>", exclude={"a": ["href"]})
self.assertEqual(v, "<a href=\"\">text</a>")
print "pattern.web.strip_tags()"
def test_strip_element(self):
# Assert strip <p> elements.
v = web.strip_element(" <p><p></p>text</p> <b><P></P></b>", "p")
self.assertEqual(v, " <b></b>")
print "pattern.web.strip_element()"
def test_strip_between(self):
# Assert strip <p> elements.
v = web.strip_between("<p", "</p>", " <p><p></p>text</p> <b><P></P></b>")
self.assertEqual(v, " text</p> <b></b>")
print "pattern.web.strip_between()"
def test_strip_javascript(self):
# Assert strip <script> elements.
v = web.strip_javascript(" <script type=\"text/javascript\">text</script> ")
self.assertEqual(v, " ")
print "pattern.web.strip_javascript()"
def test_strip_inline_css(self):
# Assert strip <style> elements.
v = web.strip_inline_css(" <style type=\"text/css\">text</style> ")
self.assertEqual(v, " ")
print "pattern.web.strip_inline_css()"
def test_strip_comments(self):
# Assert strip <!-- --> elements.
v = web.strip_comments(" <!-- text --> ")
self.assertEqual(v, " ")
print "pattern.web.strip_comments()"
def test_strip_forms(self):
# Assert strip <form> elements.
v = web.strip_forms(" <form method=\"get\">text</form> ")
self.assertEqual(v, " ")
print "pattern.web.strip_forms()"
def test_encode_entities(self):
# Assert HTML entity encoder (e.g., "&" => "&&")
for a, b in (
("É", "É"),
("&", "&"),
("<", "<"),
(">", ">"),
('"', """),
("'", "'")):
self.assertEqual(web.encode_entities(a), b)
print "pattern.web.encode_entities()"
def test_decode_entities(self):
# Assert HMTL entity decoder (e.g., "&" => "&")
for a, b in (
("&", "&"),
("&", "&"),
("&", "&"),
(" ", u"\xa0"),
("&foo;", "&foo;")):
self.assertEqual(web.decode_entities(a), b)
print "pattern.web.decode_entities()"
def test_collapse_spaces(self):
# Assert collapse multiple spaces.
for a, b in (
(" ", ""),
(" .. ", ".."),
(". .", ". ."),
(". \n", "."),
("\xa0", "")):
self.assertEqual(web.collapse_spaces(a), b)
# Assert preserve indendation.
self.assertEqual(web.collapse_spaces(" . \n", indentation=True), " .")
print "pattern.web.collapse_spaces()"
def test_collapse_tabs(self):
# Assert collapse multiple tabs to 1 space.
for a, b in (
("\t\t\t", ""),
("\t..\t", ".."),
(".\t\t.", ". ."),
(".\t\n", ".")):
self.assertEqual(web.collapse_tabs(a), b)
# Assert preserve indendation.
self.assertEqual(web.collapse_tabs("\t\t .\t\n", indentation=True), "\t\t .")
print "pattern.web.collapse_tabs()"
def test_collapse_linebreaks(self):
# Assert collapse multiple linebreaks.
for a, b in (
("\n\n\n", "\n"),
(".\n\n.", ".\n."),
(".\r\n.", ".\n."),
(".\n .", ".\n ."),
(" \n .", "\n .")):
self.assertEqual(web.collapse_linebreaks(a), b)
print "pattern.web.collapse_linebreaks()"
def test_plaintext(self):
# Assert plaintext:
# - strip <script>, <style>, <form>, <!-- --> elements,
# - strip tags,
# - decode entities,
# - collapse whitespace,
html = """
<html>
<head>
<title>tags & things</title>
</head>
<body>
<div id="content"> \n\n\n\
<!-- main content -->
<script type="text/javascript>"alert(0);</script>
<h1>title1</h1>
<h2>title2</h2>
<p>paragraph1</p>
<p>paragraph2 <a href="http://www.domain.com" onclick="alert(0);">link</a></p>
<ul>
<li>item1 xxx</li>
<li>item2</li>
<ul>
</div>
<br />
<br />
</body>
</html>
"""
self.assertEqual(web.plaintext(html, keep={"a": "href"}),
u"tags & things\n\ntitle1\n\ntitle2\n\nparagraph1\n\nparagraph2 " + \
u"<a href=\"http://www.domain.com\">link</a>\n\n* item1 xxx\n* item2")
print "pattern.web.plaintext()"
#---------------------------------------------------------------------------------------------------
class TestSearchEngine(unittest.TestCase):
def setUp(self):
# Test data for all search engines:
# {api: (source, license, Engine)}.
self.api = {
"Google": (web.GOOGLE, web.GOOGLE_LICENSE, web.Google),
"Yahoo": (web.YAHOO, web.YAHOO_LICENSE, web.Yahoo),
"Bing": (web.BING, web.BING_LICENSE, web.Bing),
"Twitter": (web.TWITTER, web.TWITTER_LICENSE, web.Twitter),
"Wikipedia": (web.WIKIPEDIA, web.WIKIPEDIA_LICENSE, web.Wikipedia),
"Flickr": (web.FLICKR, web.FLICKR_LICENSE, web.Flickr),
"Facebook": (web.FACEBOOK, web.FACEBOOK_LICENSE, web.Facebook),
"Products": (web.PRODUCTWIKI, web.PRODUCTWIKI_LICENSE, web.Products)
}
def _test_search_engine(self, api, source, license, Engine, query="today", type=web.SEARCH):
# Assert SearchEngine standard interface for any api:
# Google, Yahoo, Bing, Twitter, Wikipedia, Flickr, Facebook, Products, Newsfeed.
# SearchEngine.search() returns a list of Result objects with unicode fields,
# except Wikipedia which returns a WikipediaArticle.
if api == web.YAHOO and license == ("",""):
return
t = time.time()
e = Engine(license, throttle=0.25, language="en")
v = e.search(query, type, start=1, count=1, cached=False)
t = time.time() - t
self.assertTrue(t >= 0.25)
self.assertEqual(e.license, license)
self.assertEqual(e.throttle, 0.25)
self.assertEqual(e.language, "en")
self.assertEqual(v.query, query)
if source != web.WIKIPEDIA:
self.assertEqual(v.source, source)
self.assertEqual(v.type, type)
self.assertEqual(len(v), 1)
self.assertTrue(isinstance(v[0], web.Result))
self.assertTrue(isinstance(v[0].url, unicode))
self.assertTrue(isinstance(v[0].title, unicode))
self.assertTrue(isinstance(v[0].description, unicode))
self.assertTrue(isinstance(v[0].language, unicode))
self.assertTrue(isinstance(v[0].author, unicode))
self.assertTrue(isinstance(v[0].date, unicode))
else:
self.assertTrue(isinstance(v, web.WikipediaArticle))
# Assert zero results for start < 1 and count < 1.
v1 = e.search(query, start=0)
v2 = e.search(query, count=0)
if source != web.WIKIPEDIA:
self.assertEqual(len(v1), 0)
self.assertEqual(len(v2), 0)
else:
self.assertTrue(isinstance(v1, web.WikipediaArticle))
self.assertEqual(v2, None)
# Assert SearchEngineTypeError for unknown type.
self.assertRaises(web.SearchEngineTypeError, e.search, query, type="crystall-ball")
print "pattern.web.%s.search()" % api
def test_search_google(self):
self._test_search_engine("Google", *self.api["Google"])
def test_search_yahoo(self):
self._test_search_engine("Yahoo", *self.api["Yahoo"])
def test_search_bing(self):
self._test_search_engine("Bing", *self.api["Bing"])
def test_search_twitter(self):
self._test_search_engine("Twitter", *self.api["Twitter"])
def test_search_wikipedia(self):
self._test_search_engine("Wikipedia", *self.api["Wikipedia"])
def test_search_flickr(self):
self._test_search_engine("Flickr", *self.api["Flickr"], **{"type": web.IMAGE})
def test_search_facebook(self):
self._test_search_engine("Facebook", *self.api["Facebook"])
def test_search_products(self):
self._test_search_engine("Products", *self.api["Products"])
def test_search_newsfeed(self):
for feed, url in web.feeds.items():
self._test_search_engine("Newsfeed", url, None, web.Newsfeed, query=url, type=web.NEWS)
def _test_results(self, api, source, license, Engine, type=web.SEARCH, query="today", baseline=[6,6,6,0]):
# Assert SearchEngine result content.
# We expect to find http:// URL's and descriptions containing the search query.
if api == web.YAHOO and license == ("",""):
return
i1 = 0
i2 = 0
i3 = 0
i4 = 0
e = Engine(license, language="en", throttle=0.25)
for result in e.search(query, type, count=10, cached=False):
i1 += int(result.url.startswith("http"))
i2 += int(query in result.url.lower())
i2 += int(query in result.title.lower())
i2 += int(query in result.description.lower())
i3 += int(result.language == "en")
i4 += int(result.url.endswith(("jpg","png","gif")))
#print result.url
#print result.title
#print result.description
#print i1, i2, i3, i4
self.assertTrue(i1 >= baseline[0]) # url's starting with "http"
self.assertTrue(i2 >= baseline[1]) # query in url + title + description
self.assertTrue(i3 >= baseline[2]) # language "en"
self.assertTrue(i4 >= baseline[3]) # url's ending with "jpg", "png" or "gif"
print "pattern.web.%s.Result(type=%s)" % (api, type.upper())
def test_results_google(self):
self._test_results("Google", *self.api["Google"])
def test_results_yahoo(self):
self._test_results("Yahoo", *self.api["Yahoo"])
def test_results_yahoo_images(self):
self._test_results("Yahoo", *self.api["Yahoo"], **{"type": web.IMAGE, "baseline": [6,6,0,6]})
def test_results_yahoo_news(self):
self._test_results("Yahoo", *self.api["Yahoo"], **{"type": web.NEWS})
def test_results_bing(self):
self._test_results("Bing", *self.api["Bing"])
def test_results_bing_images(self):
self._test_results("Bing", *self.api["Bing"], **{"type": web.IMAGE, "baseline": [6,6,0,6]})
def test_results_bing_news(self):
self._test_results("Bing", *self.api["Bing"], **{"type": web.NEWS})
def test_results_twitter(self):
self._test_results("Twitter", *self.api["Twitter"])
def test_results_flickr(self):
self._test_results("Flickr", *self.api["Flickr"], **{"baseline": [6,6,0,6]})
def test_results_facebook(self):
self._test_results("Facebook", *self.api["Facebook"], **{"baseline": [0,1,0,0]})
def test_google_translate(self):
try:
# Assert Google Translate API.
# Requires license with billing enabled.
source, license, Engine = self.api["Google"]
v = Engine(license, throttle=0.25).translate(u"thé", input="fr", output="en", cached=False)
self.assertEqual(v, "tea")
print "pattern.web.Google.translate()"
except web.HTTP401Authentication:
pass
def test_google_identify(self):
try:
# Assert Google Translate API (language detection).
# Requires license with billing enabled.
source, license, Engine = self.api["Google"]
v = Engine(license, throttle=0.25).identify(u"L'essence des mathématiques, c'est la liberté!", cached=False)
self.assertEqual(v[0], "fr")
print "pattern.web.Google.identify()"
except web.HTTP401Authentication:
pass
def test_twitter_author(self):
self.assertEqual(web.author("me"), "from:me")
print "pattern.web.author()"
def test_twitter_hashtags(self):
self.assertEqual(web.hashtags("#cat #dog"), ["#cat", "#dog"])
print "pattern.web.hashtags()"
def test_twitter_retweets(self):
self.assertEqual(web.retweets("RT @me: blah"), ["@me"])
print "pattern.web.retweets()"
def _test_search_image_size(self, api, source, license, Engine):
# Assert image URL's for different sizes actually exist.
if api == web.YAHOO and license == ("",""):
return
e = Engine(license, throttle=0.25)
for size in (web.TINY, web.SMALL, web.MEDIUM, web.LARGE):
v = e.search("cats", type=web.IMAGE, count=1, size=size, cached=False)
self.assertEqual(web.URL(v[0].url).exists, True)
print "pattern.web.%s.search(type=IMAGE, size=%s)" % (api, size.upper())
def test_yahoo_image_size(self):
self._test_search_image_size("Yahoo", *self.api["Yahoo"])
def test_bing_image_size(self):
self._test_search_image_size("Bing", *self.api["Bing"])
def test_flickr_image_size(self):
self._test_search_image_size("Flickr", *self.api["Flickr"])
def test_wikipedia_article(self):
source, license, Engine = self.api["Wikipedia"]
v = Engine(license).search("cat", cached=False)
# Assert WikipediaArticle properties.
self.assertTrue(isinstance(v.title, unicode))
self.assertTrue(isinstance(v.string, unicode))
self.assertTrue(isinstance(v.links, list))
self.assertTrue(isinstance(v.categories, list))
self.assertTrue(isinstance(v.external, list))
self.assertTrue(isinstance(v.media, list))
self.assertTrue(isinstance(v.languages, dict))
# Assert WikipediaArticle properties content.
self.assertTrue(v.string == v.plaintext())
self.assertTrue(v.html == v.source)
self.assertTrue("</div>" in v.source)
self.assertTrue("cat" in v.title.lower())
self.assertTrue("Felis" in v.links)
self.assertTrue("Felines" in v.categories)
self.assertTrue("en" == v.language)
self.assertTrue("fr" in v.languages)
self.assertTrue("chat" in v.languages["fr"].lower())
self.assertTrue(v.external[0].startswith("http"))
self.assertTrue(v.media[0].endswith(("jpg","png","gif","svg")))
print "pattern.web.WikipediaArticle"
def test_wikipedia_article_sections(self):
# Assert WikipediaArticle.sections structure.
# The test may need to be modified if the Wikipedia "Cat" article changes.
source, license, Engine = self.api["Wikipedia"]
v = Engine(license).search("cat", cached=False)
s1 = s2 = s3 = None
for section in v.sections:
if section.title == "Behavior":
s1 = section
if section.title == "Grooming":
s2 = section
if section.title == "Play":
s3 = section
self.assertTrue(section.article == v)
self.assertTrue(section.level == 0 or section.string.startswith(section.title))
# Test section depth.
self.assertTrue(s1.level == 1)
self.assertTrue(s2.level == 2)
self.assertTrue(s2.level == 2)
# Test section parent-child structure.
self.assertTrue(s2 in s1.children) # Behavior => Grooming
self.assertTrue(s3 in s1.children) # Behavior => Play
self.assertTrue(s2.parent == s1)
self.assertTrue(s3.parent == s1)
# Test section content.
self.assertTrue("hairballs" in s2.content)
self.assertTrue("laser pointer" in s3.content)
# Test section tables.
# XXX should test <td colspan="x"> more thoroughly.
self.assertTrue(len(v.sections[1].tables) > 0)
print "pattern.web.WikipediaSection"
def test_products(self):
# Assert product reviews and score.
source, license, Engine = self.api["Products"]
v = Engine(license).search("computer", cached=False)
self.assertTrue(isinstance(v[0].reviews, list))
self.assertTrue(isinstance(v[0].score, int))
print "pattern.web.Products.Result.reviews"
print "pattern.web.Products.Result.score"
#---------------------------------------------------------------------------------------------------
class TestDOM(unittest.TestCase):
def setUp(self):
# Test HTML document.
self.html = """
<!doctype html>
<html lang="en">
<head>
<title>title</title>
<meta charset="utf-8" />
</head>
<body id="front" class="comments">
<script type="text/javascript">alert(0);</script>
<div id="navigation">
<a href="nav1.html">nav1</a> |
<a href="nav2.html">nav2</a> |
<a href="nav3.html">nav3</a>
</div>
<div id="content">
<P class="comment">
<span class="date">today</span>
<span class="author">me</span>
Blah blah
</P>
<p>Read more</p>
</div>
</body>
</html>
"""
def test_node_document(self):
# Assert Node properties.
v1 = web.Document(self.html)
self.assertEqual(v1.type, web.DOCUMENT)
self.assertEqual(v1.source[:10], "<!doctype ") # Note: BeautifulSoup strips whitespace.
self.assertEqual(v1.parent, None)
# Assert Node traversal.
v2 = v1.children[0].next
self.assertEqual(v2.type, web.TEXT)
self.assertEqual(v2.previous, v1.children[0])
# Assert Document properties.
v3 = v1.declaration
self.assertEqual(v3, v1.children[0])
self.assertEqual(v3.parent, v1)
self.assertEqual(v3.source, "<!doctype html>")
self.assertEqual(v1.head.type, web.ELEMENT)
self.assertEqual(v1.body.type, web.ELEMENT)
self.assertTrue(v1.head.source.startswith("<head"))
self.assertTrue(v1.body.source.startswith("<body"))
print "pattern.web.Node"
print "pattern.web.Document"
def test_node_traverse(self):
# Assert Node.traverse() (must visit all child nodes recursively).
self.b = False
def visit(node):
if node.type == web.ELEMENT and node.tag == "span":
self.b = True
v = web.Document(self.html)
v.traverse(visit)
self.assertEqual(self.b, True)
print "pattern.web.Node.traverse()"
def test_element(self):
# Assert Element properties (test <body>).
v = web.Document(self.html).body
self.assertEqual(v.tag, "body")
self.assertEqual(v.attributes["id"], "front")
self.assertEqual(v.attributes["class"], "comments")
self.assertTrue(v.content.startswith("\n<script"))
# Assert Element.getElementsByTagname() (test navigation links).
a = v.by_tag("a")
self.assertEqual(len(a), 3)
self.assertEqual(a[0].content, "nav1")
self.assertEqual(a[1].content, "nav2")
self.assertEqual(a[2].content, "nav3")
# Assert Element.getElementsByClassname() (test <p class="comment">).
a = v.by_class("comment")
self.assertEqual(a[0].tag, "p")
self.assertEqual(a[0].by_tag("span")[0].attributes["class"], "date")
self.assertEqual(a[0].by_tag("span")[1].attributes["class"], "author")
for selector in (".comment", "p.comment", "*.comment"):
self.assertEqual(v.by_tag(selector)[0], a[0])
# Assert Element.getElementById() (test <div id="content">).
e = v.by_id("content")
self.assertEqual(e.tag, "div")
self.assertEqual(e, a[0].parent)
for selector in ("#content", "div#content", "*#content"):
self.assertEqual(v.by_tag(selector)[0], e)
# Assert Element.getElementByAttribute() (test on <a href="">).
a = v.by_attribute(href="nav1.html")
self.assertEqual(a[0].content, "nav1")
print "pattern.web.Node.Element"
print "pattern.web.Node.Element.by_tag()"
print "pattern.web.Node.Element.by_class()"
print "pattern.web.Node.Element.by_id()"
print "pattern.web.Node.Element.by_attribute()"
#---------------------------------------------------------------------------------------------------
class TestPDF(unittest.TestCase):
def setUp(self):
pass
def test_pdf(self):
# Assert PDF to string parser.
v = web.PDF(open("corpora/carroll-alice.pdf").read())
self.assertTrue("Curiouser and curiouser!" in v.string)
self.assertTrue(isinstance(v.string, unicode))
print "pattern.web.PDF.string"
#---------------------------------------------------------------------------------------------------
class TestLocale(unittest.TestCase):
def setUp(self):
pass
def test_encode_language(self):
# Assert "Dutch" => "nl".
self.assertEqual(web.locale.encode_language("dutch"), "nl")
self.assertEqual(web.locale.encode_language("?????"), None)
print "pattern.web.locale.encode_language()"
def test_decode_language(self):
# Assert "nl" => "Dutch".
self.assertEqual(web.locale.decode_language("nl"), "Dutch")
self.assertEqual(web.locale.decode_language("NL"), "Dutch")
self.assertEqual(web.locale.decode_language("??"), None)
print "pattern.web.locale.decode_language()"
def test_encode_region(self):
# Assert "Belgium" => "BE".
self.assertEqual(web.locale.encode_region("belgium"), "BE")
self.assertEqual(web.locale.encode_region("???????"), None)
print "pattern.web.locale.encode_region()"
def test_decode_region(self):
# Assert "BE" => "Belgium".
self.assertEqual(web.locale.decode_region("be"), "Belgium")
self.assertEqual(web.locale.decode_region("BE"), "Belgium")
self.assertEqual(web.locale.decode_region("??"), None)
print "pattern.web.locale.decode_region()"
def test_languages(self):
# Assert "BE" => "fr" + "nl".
self.assertEqual(web.locale.languages("be"), ["fr", "nl"])
print "pattern.web.locale.languages()"
def test_regions(self):
# Assert "nl" => "NL" + "BE".
self.assertEqual(web.locale.regions("nl"), ["NL", "BE"])
print "pattern.web.locale.regions()"
def test_regionalize(self):
# Assert "nl" => "nl-NL" + "nl-BE".
self.assertEqual(web.locale.regionalize("nl"), ["nl-NL", "nl-BE"])
print "pattern.web.locale.regionalize()"
def test_geocode(self):
# Assert region geocode.
v = web.locale.geocode("brussels")
self.assertAlmostEqual(v[0], 50.83, places=2)
self.assertAlmostEqual(v[1], 4.33, places=2)
self.assertEqual(v[2], "nl")
self.assertEqual(v[3], "Belgium")
print "pattern.web.locale.geocode()"
def test_correlation(self):
# Test the correlation between locale.LANGUAGE_REGION and locale.GEOCODE.
# It should increase as new languages and locations are added.
i = 0
n = len(web.locale.GEOCODE)
for city, (latitude, longitude, language, region) in web.locale.GEOCODE.items():
if web.locale.encode_region(region) is not None:
i += 1
self.assertTrue(float(i) / n > 0.60)
#---------------------------------------------------------------------------------------------------
# You need to define a username, password and mailbox to test on.
class TestMail(unittest.TestCase):
def setUp(self):
self.username = ""
self.password = ""
self.service = web.GMAIL
self.port = 993
self.SSL = True
self.query1 = "google" # FROM-field query in Inbox.
self.query2 = "viagra" # SUBJECT-field query in Spam.
def test_mail(self):
if not self.username or not self.password:
return
# Assert web.imap.Mail.
m = web.Mail(self.username, self.password, service=self.service, port=self.port, secure=self.SSL)
# Assert web.imap.MailFolder (assuming GMail folders).
print m.folders
self.assertTrue(len(m.folders) > 0)
self.assertTrue(len(m.inbox) > 0)
print "pattern.web.Mail"
def test_mail_message1(self):
if not self.username or not self.password or not self.query1:
return
# Assert web.imap.Mailfolder.search().
m = web.Mail(self.username, self.password, service=self.service, port=self.port, secure=self.SSL)
a = m.inbox.search(self.query1, field=web.FROM)
self.assertTrue(isinstance(a[0], int))
# Assert web.imap.Mailfolder.read().
e = m.inbox.read(a[0], attachments=False, cached=False)
# Assert web.imap.Message.
self.assertTrue(isinstance(e, web.imap.Message))
self.assertTrue(isinstance(e.author, unicode))
self.assertTrue(isinstance(e.email_address, unicode))
self.assertTrue(isinstance(e.date, unicode))
self.assertTrue(isinstance(e.subject, unicode))
self.assertTrue(isinstance(e.body, unicode))
self.assertTrue(self.query1 in e.author.lower())
self.assertTrue("@" in e.email_address)
print "pattern.web.Mail.search(field=FROM)"
print "pattern.web.Mail.read()"
def test_mail_message2(self):
if not self.username or not self.password or not self.query2:
return
# Test if we can download some mail attachments.
# Set query2 to a mail subject of a spam e-mail you know contains an attachment.
m = web.Mail(self.username, self.password, service=self.service, port=self.port, secure=self.SSL)
if "spam" in m.folders:
for id in m.spam.search(self.query2, field=web.SUBJECT):
e = m.spam.read(id, attachments=True, cached=False)
if len(e.attachments) > 0:
self.assertTrue(isinstance(e.attachments[0][1], str))
self.assertTrue(len(e.attachments[0][1]) > 0)
print "pattern.web.Message.attachments (MIME-type: %s)" % e.attachments[0][0]
print "pattern.web.Mail.search(field=SUBJECT)"
print "pattern.web.Mail.read()"
#---------------------------------------------------------------------------------------------------
class TestSpider(unittest.TestCase):
def setUp(self):
pass
def test_link(self):
# Assert web.Link parser and properties.
v = web.HTMLLinkParser().parse("""
<html>
<head>
<title>title</title>
</head>
<body>
<div id="navigation">
<a href="http://www.domain1.com/?p=1" title="1" rel="a">nav1</a>
<a href="http://www.domain2.com/?p=2" title="2" rel="b">nav1</a>
</div>
</body>
</html>
""", "http://www.domain.com/")
self.assertTrue(v[0].url, "http://www.domain1.com/?p=1")
self.assertTrue(v[1].url, "http://www.domain1.com/?p=2")
self.assertTrue(v[0].description, "1")
self.assertTrue(v[1].description, "2")
self.assertTrue(v[0].relation, "a")
self.assertTrue(v[1].relation, "b")
self.assertTrue(v[0].referrer, "http://www.domain.com/")
self.assertTrue(v[1].referrer, "http://www.domain.com/")
self.assertTrue(v[0] < v[1])
print "pattern.web.HTMLLinkParser"
def test_spider_crawl(self):
# Assert domain filter.
v = web.Spider(links=["http://www.clips.ua.ac.be/"], domains=["clips.ua.ac.be"], delay=0.5)
while len(v.visited) < 4:
v.crawl(throttle=0.1, cached=False)
for url in v.visited:
self.assertTrue("clips.ua.ac.be" in url)
self.assertTrue(len(v.history) == 1)
print "pattern.web.Spider.crawl()"
def test_spider_delay(self):
# Assert delay for several crawls to a single domain.
v = web.Spider(links=["http://www.clips.ua.ac.be/"], domains=["clips.ua.ac.be"], delay=1.0)
v.crawl()
t = time.time()
while not v.crawl(throttle=0.1, cached=False):
pass
t = time.time() - t
self.assertTrue(t > 1.0)
print "pattern.web.Spider.delay"
def test_spider_breadth(self):
# Assert BREADTH cross-domain preference.
v = web.Spider(links=["http://www.clips.ua.ac.be/"], delay=10)
while len(v.visited) < 4:
v.crawl(throttle=0.1, cached=False, method=web.BREADTH)
self.assertTrue(v.history.keys()[0] != v.history.keys()[1])
self.assertTrue(v.history.keys()[0] != v.history.keys()[2])
self.assertTrue(v.history.keys()[1] != v.history.keys()[2])
print "pattern.web.Spider.crawl(method=BREADTH)"
#---------------------------------------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestCache))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUnicode))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestURL))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPlaintext))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSearchEngine))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestDOM))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestPDF))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestLocale))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestMail))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestSpider))
return suite
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=1).run(suite())
|
piskvorky/pattern
|
test/test_web.py
|
Python
|
bsd-3-clause
| 41,751
|
[
"VisIt"
] |
a3ed799878945fb1c613f0f1df0f9f72d282afedf1edf76df900046bf632f850
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pyscf import scf
from pyscf import gto
from pyscf import mcscf
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
["C", (-0.65830719, 0.61123287, -0.00800148)],
["C", ( 0.73685281, 0.61123287, -0.00800148)],
["H", ( 1.43439081, 1.81898387, -0.00800148)],
["H", (-1.35568919, 1.81920887, -0.00868348)],
["H", (-1.20806619, -0.34108413, -0.00755148)],
["H", ( 1.28636081, -0.34128013, -0.00668648)],]
mol.basis = {'H': 'cc-pvdz',
'C': 'cc-pvdz',}
mol.build()
mf = scf.RHF(mol)
mf.conv_tol = 1e-12
mf.scf()
def tearDownModule():
global mol, mf
mol.stdout.close()
del mol, mf
class KnownValues(unittest.TestCase):
def test_casci_4o4e(self):
mc = mcscf.CASCI(mf, 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -77.9734951776, 7)
def test_casci_6o4e(self):
mc = mcscf.CASCI(mf, 6, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -77.9746683275, 7)
def test_casci_6o6e(self):
mc = mcscf.CASCI(mf, 6, 6)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -77.9804561351, 7)
def test_mc2step_6o6e_high_cost(self):
mc = mcscf.CASSCF(mf, 6, 6)
mc.conv_tol = 1e-8
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -78.0390051207, 7)
def test_mc1step_6o6e_high_cost(self):
mc = mcscf.CASSCF(mf, 6, 6)
mc.conv_tol = 1e-8
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -78.0390051207, 7)
def test_mc2step_4o4e_high_cost(self):
mc = mcscf.CASSCF(mf, 4, 4)
mc.conv_tol = 1e-8
emc = mc.mc2step()[0]
#?self.assertAlmostEqual(emc, -78.0103838390, 6)
self.assertAlmostEqual(emc, -77.9916207871, 6)
def test_mc1step_4o4e_high_cost(self):
mc = mcscf.CASSCF(mf, 4, 4)
mcscf.mc1step.WITH_MICRO_SCHEDULER, bak = True, mcscf.mc1step.WITH_MICRO_SCHEDULER
mc.conv_tol = 1e-8
emc = mc.mc1step()[0]
mcscf.mc1step.WITH_MICRO_SCHEDULER = bak
self.assertAlmostEqual(emc, -78.0103838390, 6)
if __name__ == "__main__":
print("Full Tests for C2H4")
unittest.main()
|
gkc1000/pyscf
|
pyscf/mcscf/test/test_c2h4.py
|
Python
|
apache-2.0
| 2,841
|
[
"PySCF"
] |
6276f1327c741807e869b11138efe5e9c1795ab44e572c4ea0c12f4b6c6ef648
|
# Copyright (c) 2012-2016 The ANTLR Project. All rights reserved.
# Use of this file is governed by the BSD 3-clause license that
# can be found in the LICENSE.txt file in the project root.
#/
# The basic notion of a tree has a parent, a payload, and a list of children.
# It is the most abstract interface for all the trees used by ANTLR.
#/
from antlr4.Token import Token
INVALID_INTERVAL = (-1, -2)
class Tree(object):
pass
class SyntaxTree(Tree):
pass
class ParseTree(SyntaxTree):
pass
class RuleNode(ParseTree):
pass
class TerminalNode(ParseTree):
pass
class ErrorNode(TerminalNode):
pass
class ParseTreeVisitor(object):
def visit(self, tree):
return tree.accept(self)
def visitChildren(self, node):
result = self.defaultResult()
n = node.getChildCount()
for i in range(n):
if not self.shouldVisitNextChild(node, result):
return
c = node.getChild(i)
childResult = c.accept(self)
result = self.aggregateResult(result, childResult)
return result
def visitTerminal(self, node):
return self.defaultResult()
def visitErrorNode(self, node):
return self.defaultResult()
def defaultResult(self):
return None
def aggregateResult(self, aggregate, nextResult):
return nextResult
def shouldVisitNextChild(self, node, currentResult):
return True
ParserRuleContext = None
class ParseTreeListener(object):
def visitTerminal(self, node:TerminalNode):
pass
def visitErrorNode(self, node:ErrorNode):
pass
def enterEveryRule(self, ctx:ParserRuleContext):
pass
def exitEveryRule(self, ctx:ParserRuleContext):
pass
del ParserRuleContext
class TerminalNodeImpl(TerminalNode):
def __init__(self, symbol:Token):
self.parentCtx = None
self.symbol = symbol
def __setattr__(self, key, value):
super().__setattr__(key, value)
def getChild(self, i:int):
return None
def getSymbol(self):
return self.symbol
def getParent(self):
return self.parentCtx
def getPayload(self):
return self.symbol
def getSourceInterval(self):
if self.symbol is None:
return INVALID_INTERVAL
tokenIndex = self.symbol.tokenIndex
return (tokenIndex, tokenIndex)
def getChildCount(self):
return 0
def accept(self, visitor:ParseTreeVisitor):
return visitor.visitTerminal(self)
def getText(self):
return self.symbol.text
def __str__(self):
if self.symbol.type == Token.EOF:
return "<EOF>"
else:
return self.symbol.text
# Represents a token that was consumed during resynchronization
# rather than during a valid match operation. For example,
# we will create this kind of a node during single token insertion
# and deletion as well as during "consume until error recovery set"
# upon no viable alternative exceptions.
class ErrorNodeImpl(TerminalNodeImpl,ErrorNode):
def __init__(self, token:Token):
super().__init__(token)
def accept(self, visitor:ParseTreeVisitor):
return visitor.visitErrorNode(self)
class ParseTreeWalker(object):
DEFAULT = None
def walk(self, listener:ParseTreeListener, t:ParseTree):
if isinstance(t, ErrorNode):
listener.visitErrorNode(t)
return
elif isinstance(t, TerminalNode):
listener.visitTerminal(t)
return
self.enterRule(listener, t)
for child in t.getChildren():
self.walk(listener, child)
self.exitRule(listener, t)
#
# The discovery of a rule node, involves sending two events: the generic
# {@link ParseTreeListener#enterEveryRule} and a
# {@link RuleContext}-specific event. First we trigger the generic and then
# the rule specific. We to them in reverse order upon finishing the node.
#
def enterRule(self, listener:ParseTreeListener, r:RuleNode):
ctx = r.getRuleContext()
listener.enterEveryRule(ctx)
ctx.enterRule(listener)
def exitRule(self, listener:ParseTreeListener, r:RuleNode):
ctx = r.getRuleContext()
ctx.exitRule(listener)
listener.exitEveryRule(ctx)
ParseTreeWalker.DEFAULT = ParseTreeWalker()
|
wjkohnen/antlr4
|
runtime/Python3/src/antlr4/tree/Tree.py
|
Python
|
bsd-3-clause
| 4,397
|
[
"VisIt"
] |
b1848f396d3d40861a0b4034b8077040620f80ce61fcec454f9a4a7f4c38b9be
|
from number_theory import isqrt
from fractions import gcd
from itertools import combinations_with_replacement
SIZE = 100000000
total = 0
#sum up the integer divisors
total += sum((SIZE // i) * i for i in range(1, SIZE + 1))
#loop through each possible gaussian integer (non-real)
for a, b in combinations_with_replacement(range(1, isqrt(SIZE)+1), 2):
if gcd(a, b) != 1: continue
norm = a*a + b*b
to_add = a + b
if a != b: #consider the two reflections
to_add *= 2
for k in range(1, SIZE // norm + 1): #k is the multiple of the gaussian int
total += k * to_add * (SIZE // (norm * k))
print(total)
|
peterstace/project-euler
|
OLD_PY_CODE/project_euler_old_old/153/rev3.py
|
Python
|
unlicense
| 639
|
[
"Gaussian"
] |
5a3670b10cfae26ef4664255eba49bd9f163520663957f91046a7c46be055a33
|
"""
Calculate ejection velocity distribution
Use Wiegert (2014) Formalism
Citation: https://arxiv.org/pdf/1404.2159.pdf
"""
import numpy as np
from scipy.stats import norm as gaussian
from matplotlib import use
use('Agg')
import matplotlib.pyplot as plt
#############################################################
#MACROS AND BEHAVIOR
#############################################################
#np.random.seed(7)
rand=np.random.normal
norm=np.linalg.norm
verbose=0
def uniform(a,b):return a+(b-a)*np.random.rand()
def cart2sph(x,y,z):
rho=np.sqrt(x**2+y**2)
r=np.sqrt(rho**2+z**2)
psi=np.pi/2-np.arccos(z/r)
phir=np.arctan(y/x)
if x>0:phi=phir
elif y > 0:phi=phir+np.pi
else:phi=phir-np.pi
return psi*180/np.pi,phi*180/np.pi
#############################################################
#UNITS & CONSTANTS
#############################################################
AU=1.496e11 #m
MSUN=1.98e30 #kg
GCONST=6.67e-11 #m^3/(kg s^2)
RAD=180/np.pi
DEG=1/RAD
G=1.0
UL=1*AU
UM=1*MSUN
UT=np.sqrt(G*UL**3/(GCONST*UM))
UV=UL/UT
if verbose:print("Time unit:",UT)
#############################################################
#INITIAL CONDITIONS
#############################################################
#Stellar mass
Ms=1.0
ap=3.0
Mp=1e-3
Rp=7e7/UL
#Derived
mu=G*Ms
RH=ap*(Mp/(3*Ms))**(1./3)
vp=np.sqrt(mu/ap)
vesc=np.sqrt(2*mu/ap)
mup=G*Mp
#Components of velocity
vpx=-vp
vpy=0
vpz=0
vp=np.array([vpx,vpy,vpz])
if verbose:
if verbose:print("Planetary radius:",Rp)
if verbose:print("mu:",mu)
if verbose:print("Hill radius:",RH)
if verbose:print("Planetary orbital velocity: ",vp)
if verbose:print("Planetary system escape velocity:",vesc)
if verbose:print("mup:",mup)
#############################################################
#GENERATE RANDOM ELEMENTS
#############################################################
Npart=5000
n=0
k=0
vinfs=[]
while n<Npart:
k+=1
if verbose:
print("Test particle:",n)
#input()
#GENERATE RANDOM ASTROCENTRIC VELOCITIES FOR TEST PARTICLES
coswb=2
while np.abs(coswb)>1:
ab=rand(ap,ap/2)
if ab<0:continue
eb=np.random.rand()
pb=ab*(1-eb**2)
ib=uniform(0,90)
hop=np.sqrt(mu/pb)
Ob=0.0
if np.random.rand()>0.5:Ob=180.0
if Ob==0:
coswb=(pb-ap)/(ap*eb)
wpf=0.0
else:
coswb=(ap-pb)/(ap*eb)
wpf=180.0
wb=np.arccos(coswb)*RAD
vi=np.sqrt(2*mu/ap-mu/ab)
if verbose:print("Random elements: ab=%f,eb=%f,ib=%f,Ob=%f,wb=%f,wb+f=%f"%\
(ab,eb,ib,Ob,wb,wpf))
if verbose:print("Incoming astrocentric velocity:",vi)
xdot=-hop*(np.cos(Ob*DEG)*(eb*np.sin(wb*DEG)+np.sin(wpf*DEG))+\
np.sin(Ob*DEG)*np.cos(ib*DEG)*(eb*np.cos(wb*DEG)+np.cos(wpf*DEG)))
ydot=-hop*(np.sin(Ob*DEG)*(eb*np.sin(wb*DEG)+np.cos(wpf*DEG))-\
np.cos(Ob*DEG)*np.cos(ib*DEG)*(eb*np.cos(wb*DEG)+np.cos(wpf*DEG)))
zdot=+hop*np.sin(ib*DEG)*(eb*np.cos(wb*DEG)+\
np.cos(wpf*DEG))
if verbose:print("\tAstrocentric velocity (Zuluaga RF) :",xdot,ydot,zdot,
" (%lf)"%np.sqrt(xdot**2+ydot**2+zdot**2))
if verbose:print("\tAstrocentric velocity (Wiegert RF) :",-ydot,+xdot,zdot,
" (%lf)"%np.sqrt(xdot**2+ydot**2+zdot**2))
#PLANETOCENTRIC VELOCITY (ROTATED FOR COMPLAIN WEIGERT 2014)
xdotrel=(-ydot)-vpx
ydotrel=(+xdot)-vpy
zdotrel=(+zdot)-vpz
vrho=np.sqrt(xdotrel**2+ydotrel**2+zdotrel**2)
vr=np.sqrt(xdotrel**2+ydotrel**2+zdotrel**2)
if verbose:print("\tPlanetocentric velocity :",xdotrel,ydotrel,zdotrel,
" (%lf)"%vr)
Vi=np.array([xdotrel,ydotrel,zdotrel])
if verbose:print("\tIncoming velocity: ",Vi)
#PLANETOCENTRIC DIRECTION
psi,phi=cart2sph(xdotrel,ydotrel,zdotrel)
if verbose:print("\tIncoming direction phi=%f,psi=%f"%(phi,psi))
#GENERATE RANDOM INCOMING IMPACT PARAMETER
fh=0.1
xtp=uniform(-fh*RH,fh*RH)
ytp=uniform(-fh*RH,fh*RH)
beta=np.arcsin(xtp/RH)*RAD;csi=np.arcsin(ytp/RH)*RAD
if verbose:print("\tIncoming impact parameter beta=%f,csi=%f"%(beta,csi))
#INCOMING POSITION
Ri=fh*RH*np.array([-np.sin((phi+beta)*DEG)*np.cos((psi+csi)*DEG),
+np.cos((phi+beta)*DEG)*np.cos((psi+csi)*DEG),
+np.sin((psi+csi)*DEG)])
if verbose:print("\tIncoming position: ",Ri)
#COMPUTE U
U=np.cross(Ri,Vi)
if verbose:print("\tIncoming pole: ",U)
u=U/norm(U)
#IS THE PLANET APPROACHING?
qap=np.dot(Ri,Vi)
if qap>0:
if verbose:print("\t\tParticle is receeding")
continue
#IMPACT PARAMETER
B=np.linalg.norm(U)/np.linalg.norm(Vi)
if verbose:print("\tImpact parameter: ",B)
if B<1.1*Rp:
if verbose:print("\t\tObject collided")
#GAMMA
Vin=norm(Vi)
gamma=2*np.arctan(mup/(B*Vin*Vin))*RAD
if verbose:print("\tGamma: ",gamma)
#PLANETOCENTRIC ECCENTRICITY
e=1/np.sin(gamma*DEG/2)
if verbose:print("\te (planetocentric): ",e)
#PERICENTER
q=mup*(e-1)/(Vin*Vin)
if verbose:print("\tPericenter: ",q)
if q<1.1*Rp:
if verbose:print("\t\tObject collided")
continue
#ROTATION OF INCOMING VELOCITY VECTOR
c=np.cos(gamma*DEG);s=np.sin(gamma*DEG)
ux=u[0];uy=u[1];uz=u[2]
M=np.array([[c+(1-c)*ux**2,(1-c)*uy*ux-s*uz,(1-c)*uz*ux+s*uy],
[(1-c)*ux*uy+s*uz,c+(1-c)*uy**2,(1-c)*ux*uy-s*ux],
[(1-c)*ux*uz-s*uy,(1-c)*uy*uz+s*ux,c+(1-c)*uz**2]])
Vf=np.dot(M,Vi)
if verbose:print("\tOutbound velocity: ",Vf)
#ASTROCENTRIC OUTBOUND VELOCITY
vf=Vf+vp
vfn=norm(vf)
if verbose:print("\tOutbound astrocentric velocity: ",vf)
#CHECK IF OBJECT IS BOUND
if vfn<vesc:
if verbose:print("\t\tObject is still bound (vfn = %e, vesc = %e)\n"%(vfn,vesc))
continue
#INFINITE VELOCITY
vinf=np.sqrt(vfn**2-vesc**2)
if verbose:print("\tVelocity at infinite (km/s): ",vinf*UV/1e3)
n+=1
vinfs+=[vinf*UV/1e3]
#if verbose:input()
print(n,vinf*UV/1e3)
#break
print("Efficiency:",n/(1.*k))
vinfs=np.array(vinfs)
vmean=vinfs.mean()
vstd=vinfs.std()
print("Ratio vstd/vmean :",vstd/vmean)
print("Average velocity (km/s):",vmean)
print("Velocity dispersion:",vstd)
#HISTOGRAM
nbins=10
hs,vs=np.histogram(vinfs,nbins)
vm=(vs[1:]+vs[:-1])/2
hs=np.concatenate(([0],hs,[0]))
vm=np.concatenate(([0],vm,[vs[-1]]))
#PLOT
fig=plt.figure()
ax=fig.gca()
ax.hist(vinfs,nbins)
ax.plot(vm,hs)
ax.plot(vm,20000*gaussian.pdf(vm,vmean,vstd))
fig.savefig("vinfs.png")
|
seap-udea/interstellar
|
ejection.py
|
Python
|
gpl-3.0
| 6,685
|
[
"Gaussian"
] |
561de7a1bd013fbc0ff0bc4f052a57449e5cbdc280d0daaf35e17d562e3d0bba
|
################################################################################################################
# music.py Version 4.10 16-Jan-2017 Bill Manaris, Marge Marshall, Chris Benson, and Kenneth Hanson
###########################################################################
#
# This file is part of Jython Music.
#
# Copyright (C) 2011-2016 Bill Manaris, Marge Marshall, Chris Benson, and Kenneth Hanson
#
# Jython Music is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Jython Music is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Jython Music. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
#
# Imports jMusic and jSyn packages into jython. Also provides additional functionality.
#
#
# REVISIONS:
#
# 4.10 16-Jan-2017 (bm) Fixed Note.getPitch() to return REST for rest notes, as it should.
#
# 4.9 27-Dec-2016 (bm) Fixed jMusic Note bug, where, if int pitch is given, both frequency and pitch attributes are populated, but
# if float pitch is given (i.e., frequency in Hertz), only the frequency attribute is populated - no pitch).
# Consequently, in the second case, calling getPitch() crashes the system. We fix it by also calling setFrequency()
# or setPitch() in our wrapper of the Note constructor. Also added getPitch() and getPitchBend() to fully convert
# a frequency to MIDI pitch information.
#
# 4.8 26-Dec-2016 (mm) Added Envelope class for using with Play.audio(). An envelope contains a list of attack times (in milliseconds,
# relative from the previous time) and values (to reach at those times), how long to wait (delay time, in milliseconds,
# relative from the previous time) to get to a sustain value, and then how long to wait to reach a value of zero
# (in milliseconds, relative from the end time). Also modified Play.audio() to accept an Envelope as an optional parameter.
#
# 4.7 11-Nov-2016 (bm) Small bug fix in Play.midi - now we pay attention to global instrument settings, i.e., Play.setInstrument(),
# unless instrument has been set explicitely locally (e.g., at Phrase level).
#
# 4.6 07-Nov-2016 (bm) Fixed inefficiency problem in Play.midi (took forever to play long scores, e.g., > 3000 notes). Now, things work
# in real time again.
#
# 4.5 05-Nov-2016 (bm) Fixed small but important bug in Play.midi (a missing variable in the part scheduling all notes in the chord list).
#
# 4.4 21-Oct-2016 (bm, mm) Fixed clicking in Play.audio() by adding a timer to lower volume right before the ending of an audio note.
#
# 4.3 28-Aug-2016 (bm, mm) Added Play.audio(), Play.audioNote(), Play.audioOn(), Play.audioOff(), Play.allAudioNotesOff() to
# play musical material via AudioSamples. These functions parallel the corresponding Play.note(), etc. functions.
# The only difference is that audio samples are provided to be used in place of MIDI channels to render the music.
# Also fixed bug with rendering panning information in notes in Play.midi() and Play.audio().
# Also, if panning information is not provided in Play.noteOn(), Play.audioOn(), etc., the global Play.setPanning()
# info is now used.
#
# 4.2 12-Aug-2016 (bm) Using Timer2() class (based on java.util.Timer) for Play.note(), etc. More reliable.
#
# 4.1 28-Jul-2016 (bm, mm) Resolved following issues with Play.midi():
# (a) Playback now uses length of a note (vs. its duration) to determine how long it sounds (as it should).
# (b) Chord durations were calculated improperly for some chord notes, due to a note sorting error. This has been fixed by
# sorting noteList in Play.midi() by start time, then duration, then pitch then etc.
# (c) Fixed race condition which caused some notes from not turning off. The dictionary used to hold instances of overlapping
# notes was changed to a list. Now, for every note to play, a tuple of pitch and channel is added to by frequencyOn() and
# removed from the list by frequencyOff(), respectively. (Race condition is still present, but due to rewritten logic, it
# does not cause any trouble anymore. All concurrent notes are turned off when they should.)
#
# 4.0 14-May-2016 (bm, mm) Added microtonal capabilities. Now, we can create and play notes using frequencies (float, in Hz),
# instead of pitch (0-127). Updated new Play.midi() function to handle microtones / frequencies (by using pitchbend).
# Only one frequency per channel can be played accurately (since there is only one pitchbend per channel). Regular notes
# can play concurrently as usual (i.e., for chords). However, for concurrent microtones on a given channel,
# unless they have same pitchbend, only the last microtone will be rendered accurately (all others will be affected by the
# latest pichbend - the one used to render the last microtone - again, only one pitchbend is available per channel).
# To render concurrent microtones, they have to be spread across different channels. That's the only way to render
# microtonal chords using MIDI (i.e., we are pushing MIDI as far as it goes here).
# Also, updated Play.noteOn/Off(), and Play.frequencyOn/Off() accordingly, and added a few more error checks/warnings.
# Additionally, now only the last note-off event for a given note-channel is executed, thus allowing overlapping notes with
# same pitch (e.g., overlapping A4 notes) to render more accurately.
# Finally, Play.setPitchBend() changes the global pitch bend, so if a certain frequency is played, it will be
# pitchbended if pitch bend is NOT zero. This is similar to playing any note with pitch bend set to anything other
# than zero.
#
# 3.9 30-Mar-2016 (bm) Changed to new Play.midi() function. It issues Play.note() calls, instead of using jMusic's Play.midi() -
# the latter usually hesitates at the beginning of playing. New way is more robust / reliable.
# Old function is still available under Play.midi2().
#
# 3.8 04-Mar-2016 (bm) Reverted back to __Active__ lists removing objects. Not a good idea, especially when loading large audio files.
# We need to remove old objects, when we stop, otherwise we quickly run out of memory...
#
# 3.7 28-Feb-2016 (bm) Updated Metronome class to improve API and implementation - updated method add() to use absolute beats (with
# the exception of 0, which means at the next beat), also soundOn() now takes a MIDI pitch to use and volume, as parameters.
# Updated __Active__ lists to not remove objects. This way, when Stop button is pressed, all objects are stopped,
# even if they are not referenced in the program anymore (during Live Coding, variables may be redefined and leave
# orphan objects still playing - which before could only be stopped by quiting JEM!).
#
# 3.6 05-Feb-2016 (bm) Added Metronome class - it provides for synchronization of musical tasks, especially for live coding.
# Methods include add(), remove(), start(), stop(), show(), hide(), soundOn(), soundOff().
#
# 3.5 17-Jan-2016 (bm) Fixed Mod.invert() bug, which modified RESTs - now, we only invert non-REST notes.
#
# 3.4 01-Dec-2015 (bm) Moved LiveSample to audio.py, where we can do more extensive testing for audio card formats
# (e.g., Little Endian), which appeared in some Windows boxes. Also, fixed problem of Java imports
# overridding Python's enumerate() function.
#
# 3.3 06-May-2015 (cb) Added LiveSample(), which implements live recording of audio, and offers
# an API similar to AudioSample. Nice!
#
# 3.2 22-Feb-2015 (bm) Added Mod.elongate() to fix a problem with jMusic's Mod.elongate (it messes up the
# the length of elongated notes). Added Mod.shift() to shift the start time of material
# as a whole; and Mod.merge() to merge two Parts (or two Scores) into one. Also, updated
# Mod.retrograde() to work with Parts and Scores, in addition to Phrases.
#
# 3.1 07-Dec-2014 (bm) Added Note() wrapping to allow specifying length in the Note constructor (in addition
# to pitch, duration, dynamic, and panning. Updated Phrase.addNoteList() and addChord() to
# include a length parameter. This allows for easier specification of legato and staccato notes.
# Also updated Note.setDuration() to adjust the note's length proportionally.
#
# 3.0 06-Nov-2014 (bm) Added functionality to stop AudioSample and MidiSequence objects via JEM's Stop button
# - see registerStopFunction().
#
# 2.9 07-Oct-2014 (bm) Resolved the various Play.midi() issues. Andrew (Brown) fixed jMusic's MidiSynth, so
# we now can use it as documented. We initialize a total of 12 MidiSynth's (which allows up to
# 12 concurrent Play.midi()'s). This should be sufficient for all practical purposes.
#
# 2.8 06-Sep-2014 (bm) Fixed a couple of bugs in Mod.invert() and Mod.mutate(). Also added a more meaningful
# error message in Phrase.addNoteList() for the common error of providing lists with different lengths.
#
# 2.7 19-Aug-2014 (bm) INDIAN_SCALE and TURKISH_SCALE were taken out because they were incorrect/misleading,
# as per Andrew Brown's recommendation.
#
# 2.6 29-May-2014 (bm) Added JEM's registerStopFunction() to register a callback function to be called,
# inside JEM, when the Stop button is pressed. This is needed to stop Play.midi from
# playing music. For now, we register Play.stop(), which stops any music started through
# the Play class from sounding. Also, changed stopMidiSynths() to __stopMidiSynths__()
# to hide it, since Play.stop() is now the right way to stop Play generated music from
# sounding.
#
# 2.5 27-May-2014 (bm) Added stopMidiSynths() - a function to stop all Play.midi music right away - this
# was needed for JEM. Also,Play.midi() returns the MIDI synthesizer used, so
# m = Play.midi(), followed by, m.stop(), will stop that synthesizer.
#
# 2.4 02-May-2014 (bm) Updated fixWorkingDirForJEM() solution to work with new JEM editor by Tobias Kohn.
#
# 2.3 17-Dec-2013 (bm, ng) Added AudioSample panning ranging from 0 (left) to 127 (right). Also
# added Envelope class and updated AudioSample to work with it.
#
# 2.2 21-Nov-2013 Added a Play.note(pitch, start, duration, velocity=100, channel=0) function,
# which plays a note with given 'start' time (in milliseconds from now),
# 'duration' (in milliseconds from 'start' time), with given 'velocity' on 'channel'.
# This allows scheduling of future note events, and thus should facilitate
# playing score-based or event-based musical material.
#
# 2.1 14-Mar-2013 Two classes - AudioSample and MidiSequence.
#
# AudioSample is instantiated with a string - the filename of an audio file (.wav or .aiff).
# It supports the following functions: play(), loop(), loop(numOfTimes), stop(), pause(), resume(),
# setPitch( e.g., A4 ), getPitch(), getDefaultPitch(),
# setFrequency( e.g., 440.0 ), getFrequency(),
# setVolume( 0-127 ), getVolume().
#
# MidiSequence is instantiated with either a string - the filename of a MIDI file (.mid), or
# music library material (Score, Part, Phrase, or Note).
# It supports the following functions: play(), loop(), stop(), pause(), resume(),
# setPitch( e.g., A4 ), getPitch(), getDefaultPitch(),
# setTempo( e.g., 80.1 ), getTempo(), getDefaultTempo(),
# setVolume( 0-127 ), getVolume().
#
# For more information on function parameters, see the class definition.
#
# 2.0 17-Feb-2012 Added jSyn synthesizer functionality. We now have an AudioSample class for loading audio
# files (WAV or AIF), which can be played, looped, paused, resumed, and stopped.
# Also, each sound has a MIDI pitch associated with it (default is A4), so we
# can play different pitches with it (through pitch shifting).
# Finally, we improved code organization overall.
#
# 1.91 13-Feb-2013 Modified mapScale() to add an argument for the key of the scale (default is C).
#
# 1.9 10-Feb-2013 Removed Read.image() and Write.image() - no content coupling with
# image library anymore.
# 1.81 03-Feb-2013 Now mapScale() returns an int (since it intended to be used as
# a pitch value). If we return a float, it may be confused as
# a note frequency (by the Note() constructor) - that would not be good.
#
# 1.8 01-Jan-2013 Redefine Jython input() function to fix problem with jython 2.5.3
# (see
# 1.7 30-Dec-2012 Added missing MIDI instrument constants
# 1.6 26-Nov-2012 Added Play.frequencyOn/Off(), and Play.set/getPitchBend() functions.
# 1.52 04-Nov-2012 Divided complicated mapValue() to simpler mapValue() and mapScale() functions.
# 1.51 20-Oct-2012 Restablished access to jMusic Phrase's toString() via __str__() and __repr__().
# Added missing jMusic constants.
# Added pitchSet parameter to mapValue()
# 1.5 16-Sep-2012 Added MIDI_INSTRUMENTS to be used in instrument selection menus, etc.
# 1.4 05-Sep-2012 Renamed package to 'music'.
# 1.3 17-Nov-2011 Extended jMusic Phrase, Read, Write by wrapping them in jython classes.
#
# preserve Jython bindings that get overwritten by the following Java imports - a hack!
# (also see very bottom of this file)
enumerate_preserve = enumerate
# import jMusic constants and utilities
from jm.JMC import *
from jm.util import *
from jm.music.tools import *
from jm.gui.cpn import *
from jm.gui.helper import *
from jm.gui.histogram import *
from jm.gui.show import *
from jm.gui.wave import *
from jm.audio.io import *
from jm.audio.synth import *
from jm.audio.Instrument import *
from jm.constants.Alignments import *
from jm.constants.Articulations import *
from jm.constants.DrumMap import *
from jm.constants.Durations import *
from jm.constants.Dynamics import *
from jm.constants.Frequencies import *
from jm.constants.Instruments import *
from jm.constants.Noises import *
from jm.constants.Panning import *
from jm.constants.Pitches import *
from jm.constants.ProgramChanges import *
from jm.constants.Durations import *
from jm.constants.Scales import *
from jm.constants.Tunings import *
from jm.constants.Volumes import *
from jm.constants.Waveforms import *
######################################################################################
# Jython 2.5.3 fix for input()
# see http://python.6.n6.nabble.com/input-not-working-on-Windows-td4987455.html
# also see fix at http://pydev.org/faq.html#PyDevFAQ-Whyrawinput%28%29%2Finput%28%29doesnotworkcorrectlyinPyDev%3F
def input(prompt):
return eval( raw_input(prompt) )
######################################################################################
# redefine scales as Jython lists (as opposed to Java arrays - for cosmetic purposes)
AEOLIAN_SCALE = list(AEOLIAN_SCALE)
BLUES_SCALE = list(BLUES_SCALE)
CHROMATIC_SCALE = list(CHROMATIC_SCALE)
DIATONIC_MINOR_SCALE = list(DIATONIC_MINOR_SCALE)
DORIAN_SCALE = list(DORIAN_SCALE)
HARMONIC_MINOR_SCALE = list(HARMONIC_MINOR_SCALE)
LYDIAN_SCALE = list(LYDIAN_SCALE)
MAJOR_SCALE = list(MAJOR_SCALE)
MELODIC_MINOR_SCALE = list(MELODIC_MINOR_SCALE)
MINOR_SCALE = list(MINOR_SCALE)
MIXOLYDIAN_SCALE = list(MIXOLYDIAN_SCALE)
NATURAL_MINOR_SCALE = list(NATURAL_MINOR_SCALE)
PENTATONIC_SCALE = list(PENTATONIC_SCALE)
######################################################################################
# define text labels for MIDI instruments (index in list is same as MIDI instrument number)
MIDI_INSTRUMENTS = [ # Piano Family
"Acoustic Grand Piano", "Bright Acoustic Piano", "Electric Grand Piano",
"Honky-tonk Piano", "Electric Piano 1 (Rhodes)", "Electric Piano 2 (DX)",
"Harpsichord", "Clavinet",
# Chromatic Percussion Family
"Celesta", "Glockenspiel", "Music Box", "Vibraphone", "Marimba",
"Xylophone", "Tubular Bells", "Dulcimer",
# Organ Family
"Drawbar Organ", "Percussive Organ", "Rock Organ", "Church Organ",
"Reed Organ", "Accordion", "Harmonica", "Tango Accordion",
# Guitar Family
"Acoustic Guitar (nylon)", "Acoustic Guitar (steel)", "Electric Guitar (jazz)",
"Electric Guitar (clean)", "Electric Guitar (muted)", "Overdriven Guitar",
"Distortion Guitar", "Guitar harmonics",
# Bass Family
"Acoustic Bass", "Electric Bass (finger)", "Electric Bass (pick)", "Fretless Bass",
"Slap Bass 1", "Slap Bass 2", "Synth Bass 1", "Synth Bass 2",
# Strings and Timpani Family
"Violin", "Viola", "Cello", "Contrabass", "Tremolo Strings", "Pizzicato Strings",
"Orchestral Harp", "Timpani",
# Ensemble Family
"String Ensemble 1", "String Ensemble 2", "Synth Strings 1", "Synth Strings 2",
"Choir Aahs", "Voice Oohs", "Synth Voice", "Orchestra Hit",
# Brass Family
"Trumpet", "Trombone", "Tuba", "Muted Trumpet", "French Horn",
"Brass Section", "SynthBrass 1", "SynthBrass 2",
# Reed Family
"Soprano Sax", "Alto Sax", "Tenor Sax", "Baritone Sax", "Oboe", "English Horn",
"Bassoon", "Clarinet",
# Pipe Family
"Piccolo", "Flute", "Recorder", "Pan Flute", "Blown Bottle", "Shakuhachi",
"Whistle", "Ocarina",
# Synth Lead Family
"Lead 1 (square)", "Lead 2 (sawtooth)", "Lead 3 (calliope)", "Lead 4 (chiff)",
"Lead 5 (charang)", "Lead 6 (voice)", "Lead 7 (fifths)", "Lead 8 (bass + lead)",
# Synth Pad Family
"Pad 1 (new age)", "Pad 2 (warm)", "Pad 3 (polysynth)", "Pad 4 (choir)",
"Pad 5 (bowed)", "Pad 6 (metallic)", "Pad 7 (halo)", "Pad 8 (sweep)",
# Synth Effects Family
"FX 1 (rain)", "FX 2 (soundtrack)", "FX 3 (crystal)", "FX 4 (atmosphere)",
"FX 5 (brightness)", "FX 6 (goblins)", "FX 7 (echoes)", "FX 8 (sci-fi)",
# Ethnic Family
"Sitar", "Banjo", "Shamisen", "Koto", "Kalimba", "Bag pipe", "Fiddle", "Shanai",
# Percussive Family
"Tinkle Bell", "Agogo", "Steel Drums", "Woodblock", "Taiko Drum", "Melodic Tom",
"Synth Drum", "Reverse Cymbal",
# Sound Effects Family
"Guitar Fret Noise", "Breath Noise", "Seashore", "Bird Tweet", "Telephone Ring",
"Helicopter", "Applause", "Gunshot" ]
# define text labels for inverse-lookup of MIDI pitches (index in list is same as MIDI pitch number)
# (for enharmonic notes, e.g., FS4 and GF4, uses the sharp version, e.g. FS4)
MIDI_PITCHES = ["C_1", "CS_1", "D_1", "DS_1", "E_1", "F_1", "FS_1", "G_1", "GS_1", "A_1", "AS_1", "B_1",
"C0", "CS0", "D0", "DS0", "E0", "F0", "FS0", "G0", "GS0", "A0", "AS0", "B0",
"C1", "CS1", "D1", "DS1", "E1", "F1", "FS1", "G1", "GS1", "A1", "AS1", "B1",
"C2", "CS2", "D2", "DS2", "E2", "F2", "FS2", "G2", "GS2", "A2", "AS2", "B2",
"C3", "CS3", "D3", "DS3", "E3", "F3", "FS3", "G3", "GS3", "A3", "AS3", "B3",
"C4", "CS4", "D4", "DS4", "E4", "F4", "FS4", "G4", "GS4", "A4", "AS4", "B4",
"C5", "CS5", "D5", "DS5", "E5", "F5", "FS5", "G5", "GS5", "A5", "AS5", "B5",
"C6", "CS6", "D6", "DS6", "E6", "F6", "FS6", "G6", "GS6", "A6", "AS6", "B6",
"C7", "CS7", "D7", "DS7", "E7", "F7", "FS7", "G7", "GS7", "A7", "AS7", "B7",
"C8", "CS8", "D8", "DS8", "E8", "F8", "FS8", "G8", "GS8", "A8", "AS8", "B8",
"C9", "CS9", "D9", "DS9", "E9", "F9", "FS9", "G9"]
######################################################################################
# provide additional MIDI rhythm constant
DOTTED_WHOLE_NOTE = 4.5
DWN = 4.5
######################################################################################
# provide additional MIDI pitch constants (for first octave, i.e., minus 1 octave)
BS_1 = 12
bs_1 = 12
B_1 = 11
b_1 = 11
BF_1 = 10
bf_1 = 10
AS_1 = 10
as_1 = 10
A_1 = 9
a_1 = 9
AF_1 = 8
af_1 = 8
GS_1 = 8
gs_1 = 8
G_1 = 7
g_1 = 7
GF_1 = 6
gf_1 = 6
FS_1 = 6
fs_1 = 6
F_1 = 5
f_1 = 5
FF_1 = 4
ff_1 = 4
ES_1 = 5
es_1 = 5
E_1 = 4
e_1 = 4
EF_1 = 3
ef_1 = 3
DS_1 = 3
ds_1 = 3
D_1 = 2
d_1 = 2
DF_1 = 1
df_1 = 1
CS_1 = 1
cs_1 = 1
C_1 = 0
c_1 = 0
######################################################################################
# provide additional MIDI instrument constants (missing from jMusic specification)
EPIANO1 = 4
RHODES_PIANO = 4
DX_PIANO = 5
DX = 5
DULCIMER = 15
DRAWBAR_ORGAN = 16
PERCUSSIVE_ORGAN = 17
ROCK_ORGAN = 18
TANGO_ACCORDION = 23
BANDONEON = 23
OVERDRIVEN_GUITAR = 29
DISTORTION_GUITAR = 30
SLAP_BASS1 = 36
SLAP_BASS2 = 37
SYNTH_BASS1 = 38
SYNTH_BASS2 = 39
ORCHESTRAL_HARP = 46
STRING_ENSEMBLE1 = 48
STRING_ENSEMBLE2 = 49
SYNTH = 50
SYNTH_STRINGS1 = 50
SYNTH_STRINGS2 = 51
CHOIR_AHHS = 52
VOICE_OOHS = 53
SYNTH_VOICE = 54
BRASS_SECTION = 61
SYNTH_BRASS1 = 62
SYNTH_BRASS2 = 63
BLOWN_BOTTLE = 76
LEAD_1_SQUARE = 80
LEAD_2_SAWTOOTH = 81
LEAD_3_CALLIOPE = 82
CALLIOPE = 82
LEAD_4_CHIFF = 83
CHIFF = 83
LEAD_5_CHARANG = 84
LEAD_6_VOICE = 85
LEAD_7_FIFTHS = 86
FIFTHS = 86
LEAD_8_BASS_LEAD = 87
BASS_LEAD = 87
PAD_1_NEW_AGE = 88
NEW_AGE = 88
PAD_2_WARM = 89
PAD_3_POLYSYNTH = 90
POLYSYNTH = 90
PAD_4_CHOIR = 91
SPACE_VOICE = 91
PAD_5_GLASS = 92
PAD_6_METTALIC = 93
METALLIC = 93
PAD_7_HALO = 94
HALO = 94
PAD_8_SWEEP = 95
FX_1_RAIN = 96
FX_2_SOUNDTRACK = 97
FX_3_CRYSTAL = 98
FX_4_ATMOSPHERE = 99
FX_5_BRIGHTNESS = 100
FX_6_GOBLINS = 101
GOBLINS = 101
FX_7_ECHOES = 102
ECHO_DROPS = 102
FX_8_SCI_FI = 103
SCI_FI = 103
TAIKO_DRUM = 116
MELODIC_TOM = 117
TOM_TOM = 117 # this is a fix (jMusic defines this as 119!)
GUITAR_FRET_NOISE = 120
FRET_NOISE = 120
BREATH_NOISE = 121
BIRD_TWEET = 123
TELEPHONE_RING = 124
GUNSHOT = 127
# and MIDI drum and percussion abbreviations
ABD = 35
BASS_DRUM = 36
BDR = 36
STK = 37
SNARE = 38
SNR = 38
CLP = 39
ESN = 40
LFT = 41
CHH = 42
HFT = 43
PHH = 44
LTM = 45
OHH = 46
LMT = 47
HMT = 48
CC1 = 49
HGT = 50
RC1 = 51
CCM = 52
RBL = 53
TMB = 54
SCM = 55
CBL = 56
CC2 = 57
VSP = 58
RC2 = 59
HBG = 60
LBG = 61
MHC = 62
OHC = 63
LCG = 64
HTI = 65
LTI = 66
HAG = 67
LAG = 68
CBS = 69
MRC = 70
SWH = 71
LWH = 72
SGU = 73
LGU = 74
CLA = 75
HWB = 76
LWB = 77
MCU = 78
OCU = 79
MTR = 80
OTR = 81
######################################################################################
#### Free music library functions ####################################################
######################################################################################
def mapValue(value, minValue, maxValue, minResultValue, maxResultValue):
"""
Maps value from a given source range, i.e., (minValue, maxValue),
to a new destination range, i.e., (minResultValue, maxResultValue).
The result will be converted to the result data type (int, or float).
"""
# check if value is within the specified range
if value < minValue or value > maxValue:
raise ValueError("value, " + str(value) + ", is outside the specified range, " \
+ str(minValue) + " to " + str(maxValue) + ".")
# we are OK, so let's map
value = float(value) # ensure we are using float (for accuracy)
normal = (value - minValue) / (maxValue - minValue) # normalize source value
# map to destination range
result = normal * (maxResultValue - minResultValue) + minResultValue
destinationType = type(minResultValue) # find expected result data type
result = destinationType(result) # and apply it
return result
def mapScale(value, minValue, maxValue, minResultValue, maxResultValue, scale=CHROMATIC_SCALE, key=None):
"""
Maps value from a given source range, i.e., (minValue, maxValue), to a new destination range, i.e.,
(minResultValue, maxResultValue), using the provided scale (pitch row) and key. The scale provides
a sieve (a pattern) to fit the results into. The key determines how to shift the scale pattern to
fit a particular key - if key is not provided, we assume it is the same as minResultValue (e.g., C4
and C5 both refer to the key of C)).
The result will be within the destination range rounded to closest pitch in the
provided pitch row. It always returns an int (since it is intended to be used
as a pitch value).
NOTE: We are working within a 12-step tonal system (MIDI), i.e., octave is 12 steps away,
so pitchRow must contain offsets (from the root) between 0 and 11.
"""
# check if value is within the specified range
if value < minValue or value > maxValue:
raise ValueError("value, " + str(value) + ", is outside the specified range, " \
+ str(minValue) + " to " + str(maxValue) + ".")
# check pitch row - it should contain offsets only from 0 to 11
badOffsets = [offset for offset in scale if offset < 0 or offset > 11]
if badOffsets != []: # any illegal offsets?
raise TypeError("scale, " + str(scale) + ", should contain values only from 0 to 11.")
# figure out key of scale
if key == None: # if they didn't specify a key
key = minResultValue % 12 # assume that minResultValue the root of the scale
else: # otherwise,
key = key % 12 # ensure it is between 0 and 11 (i.e., C4 and C5 both mean C, or 0).
# we are OK, so let's map
value = float(value) # ensure we are using float (for accuracy)
normal = (value - minValue) / (maxValue - minValue) # normalize source value
# map to destination range (i.e., chromatic scale)
# (subtracting 'key' aligns us with indices in the provided scale - we need to add it back later)
chromaticStep = normal * (maxResultValue - minResultValue) + minResultValue - key
# map to provided pitchRow scale
pitchRowStep = chromaticStep * len(scale) / 12 # note in pitch row
scaleDegree = int(pitchRowStep % len(scale)) # find index into pitchRow list
register = int(pitchRowStep / len(scale)) # find pitch register (e.g. 4th, 5th, etc.)
# calculate the octave (register) and add the pitch displacement from the octave.
result = register * 12 + scale[scaleDegree]
# adjust for key (scale offset)
result = result + key
# now, result has been sieved through the pitchSet (adjusted to fit the pitchSet)
#result = int(round(result)) # force an int data type
result = int(result) # force an int data type
return result
def frange(start, stop, step):
"""
A range function for floats, with variable accuracy (controlled by
number of digits in decimal part of 'step').
"""
import math
if step == 0: # make sure we do not get into an infinite loop
raise ValueError, "frange() step argument must not be zero"
result = [] # holds resultant list
# since Python's represetation of real numbers may not be exactly what we expect,
# let's round to the number of decimals provided in 'step'
accuracy = len(str(step-int(step))[1:])-1 # determine number of decimals in 'step'
# determine which termination condition to use
if step > 0:
done = start >= stop
else:
done = start <= stop
# generate sequence
while not done:
start = round(start, accuracy) # use same number of decimals as 'step'
result.append(start)
start += step
# again, determine which termination condition to use
if step > 0:
done = start >= stop
else:
done = start <= stop
return result
def xfrange(start, stop, step):
"""
A generator range function for floats, with variable accuracy (controlled by
number of digits in decimal part of 'step').
"""
import math
if step == 0: # make sure we do not get into an infinite loop
raise ValueError, "frange() step argument must not be zero"
# since Python's represetation of real numbers may not be exactly what we expect,
# let's round to the number of decimals provided in 'step'
accuracy = len(str(step-int(step))[1:])-1 # determine number of decimals in 'step'
# determine which termination condition to use
if step > 0:
done = start >= stop
else:
done = start <= stop
# generate sequence
while not done:
start = round(start, accuracy) # use same number of decimals as 'step'
yield start
start += step
# again, determine which termination condition to use
if step > 0:
done = start >= stop
else:
done = start <= stop
######################################################################################
#### jMusic library extensions #########################################################
######################################################################################
# A wrapper to turn class functions into "static" functions (e.g., for Mod functions).
#
# See http://code.activestate.com/recipes/52304-static-methods-aka-class-methods-in-python/
#
class Callable:
def __init__(self, functionName):
self.__call__ = functionName
######################################################################################
#### jMusic Mod extensions #########################################################
######################################################################################
from jm.music.tools import Mod as jMod # needed to wrap more functionality below
# Create various Mod functions, in addition to Mod's default functionality.
# This class is not meant to be instantiated, hence no "self" in function definitions.
# Functions are made callable through class Callable, above.
class Mod(jMod):
def normalize(material):
"""Same as jMod.normalise()."""
jMod.normalise(material)
def invert(phrase, pitchAxis):
"""Invert phrase using pitch as the mirror (pivot) axis."""
# traverse list of notes, and adjust pitches accordingly
for note in phrase.getNoteList():
if not note.isRest(): # modify regular notes only (i.e., do not modify rests)
invertedPitch = pitchAxis + (pitchAxis - note.getPitch()) # find mirror pitch around axis (by adding difference)
note.setPitch( invertedPitch ) # and update it
# now, all notes have been updated
def mutate(phrase):
"""Same as jMod.mutate()."""
# adjust jMod.mutate() to use random durations from phrase notes
durations = [note.getDuration() for note in phrase.getNoteList()]
jMod.mutate(phrase, 1, 1, CHROMATIC_SCALE, phrase.getLowestPitch(),
phrase.getHighestPitch(), durations)
def elongate(material, scaleFactor):
"""Same as jMod.elongate(). Fixing a bug."""
# define helper functions
def elongateNote(note, scaleFactor):
"""Helper function to elongate a single note."""
note.setDuration( note.getDuration() * scaleFactor)
def elongatePhrase(phrase, scaleFactor):
"""Helper function to elongate a single phrase."""
for note in phrase.getNoteList():
elongateNote(note, scaleFactor)
def elongatePart(part, scaleFactor):
"""Helper function to elongate a single part."""
for phrase in part.getPhraseList():
elongatePhrase(phrase, scaleFactor)
def elongateScore(score, scaleFactor):
"""Helper function to elongate a score."""
for part in score.getPartList():
elongatePart(part, scaleFactor)
# check type of material and call the appropriate function
if type(material) == Score:
elongateScore(material, scaleFactor)
elif type(material) == Part:
elongatePart(material, scaleFactor)
elif type(material) == Phrase or type(material) == jPhrase:
elongatePhrase(material, scaleFactor)
elif type(material) == Note:
elongateNote(material, scaleFactor)
else: # error check
raise TypeError( "Unrecognized time type " + str(type(material)) + " - expected Note, Phrase, Part, or Score." )
def shift(material, time):
"""It shifts all phrases' start time by 'time' (measured in QN's, i.e., 1.0 equals QN).
If 'time' is positive, phrases are moved later.
If 'time' is negative, phrases are moved earlier (at most, at the piece's start time, i.e., 0.0),
as negative start times make no sense.
'Material' can be Phrase, Part, or Score (since Notes do not have a start time).
"""
# define helper functions
def shiftPhrase(phrase, time):
"""Helper function to shift a single phrase."""
newStartTime = phrase.getStartTime() + time
newStartTime = max(0, newStartTime) # ensure that the new start time is at most 0 (negative start times make no sense)
phrase.setStartTime( newStartTime )
def shiftPart(part, time):
"""Helper function to shift a single part."""
for phrase in part.getPhraseList():
shiftPhrase(phrase, time)
def shiftScore(score, time):
"""Helper function to shift a score."""
for part in score.getPartList():
shiftPart(part, time)
# check type of time
if not (type(time) == float or type(time) == int):
raise TypeError( "Unrecognized time type " + str(type(time)) + " - expected int or float." )
# check type of material and call the appropriate function
if type(material) == Score:
shiftScore(material, time)
elif type(material) == Part:
shiftPart(material, time)
elif type(material) == Phrase or type(material) == jPhrase:
shiftPhrase(material, time)
else: # error check
raise TypeError( "Unrecognized material type " + str(type(material)) + " - expected Phrase, Part, or Score." )
def merge(material1, material2):
"""Merges 'material2' into 'material1'. 'Material1' is changed, 'material2' is unmodified.
Both 'materials' must be of the same type, either Part or Score.
It does not worry itself about instrument and channel assignments - it is left to the caller
to ensure that the two 'materials' are compatible this way.
"""
# define helper functions
def mergeParts(part1, part2):
"""Helper function to merge two parts into one."""
for phrase in part2.getPhraseList():
part1.addPhrase(phrase)
def mergeScores(score1, score2):
"""Helper function to merge two scores into one."""
for part in score2.getPartList():
score1.addPart(part)
# check type of material and call the appropriate function
if type(material1) == Score and type(material2) == Score:
mergeScores(material1, material2)
elif type(material1) == Part and type(material2) == Part:
mergeParts(material1, material2)
elif (type(material1) == Part and type(material2) == Score) or \
(type(material1) == Score and type(material2) == Part):
raise TypeError( "Cannot merge Score and Part - arguments must be of the same type (both Score or both Part)." )
else:
raise TypeError( "Arguments must be both either Score or Part." )
def retrograde(material):
"""It reverses the start times of notes in 'material'.
'Material' can be Phrase, Part, or Score.
"""
# define helper functions
def getPartStartTime(part):
"""Helper function to return the start time of a part."""
minStartTime = 10000000000.0 # holds the earliest start time among all phrases (initialize to a very large value)
for phrase in part.getPhraseList():
minStartTime = min(minStartTime, phrase.getStartTime()) # accumulate the earliest start time, so far
# now, minStartTime holds the earliest start time of a phrase in this part
return minStartTime # so return it
def getPartEndTime(part):
"""Helper function to return the end time of a part."""
maxEndTime = 0.0 # holds the latest end time among all phrases
for phrase in part.getPhraseList():
maxEndTime = max(maxEndTime, phrase.getEndTime()) # accumulate the latest end time, so far
# now, maxEndTime hold the latest end time of a phrase in this part
return maxEndTime # so return it
def retrogradePart(part):
"""Helper function to retrograde a single part."""
startTime = getPartStartTime(part) # the earliest start time among all phrases
endTime = getPartEndTime(part) # the latest end time among all phrases
# retrograde each phrase and adjust its start time accordingly
for phrase in part.getPhraseList():
distanceFromEnd = endTime - phrase.getEndTime() # get this phrase's distance from end
jMod.retrograde(phrase) # retrograde it
# the retrograded phrase needs to start as far from the beginning of the part as its orignal end used to be
# from the end of the part
phrase.setStartTime( distanceFromEnd + startTime )
# now, all phrases in this part have been retrograded and their start times have been aranged
# to mirror their original end times
def retrogradeScore(score):
"""Helper function to retrograde a score."""
# calculate the score's start and end times
startTime = 10000000000.0 # holds the earliest start time among all parts (initialize to a very large value)
endTime = 0.0 # holds the latest end time among all parts
for part in score.getPartList():
startTime = min(startTime, getPartStartTime(part)) # accumulate the earliest start time, so far
endTime = max(endTime, getPartEndTime(part)) # accumulate the latest end time, so far
# now, startTime and endTime hold the score's start and end time, respectively
print "score startTime =", startTime, "endTime =", endTime
# retrograde each part and adjust its start time accordingly
for part in score.getPartList():
# get this part's distance from the score end
distanceFromEnd = endTime - (getPartEndTime(part) + getPartStartTime(part))
# retrograde this part
retrogradePart(part)
# the retrograded part needs to start as far as
# the orignal part's distance from the score end
Mod.shift(part, distanceFromEnd)
# now, all parts have been retrograded and their start times have been aranged to mirror their original
# end times
# check type of material and call the appropriate function
if type(material) == Score:
retrogradeScore(material)
elif type(material) == Part:
retrogradePart(material)
elif type(material) == Phrase or type(material) == jPhrase:
jMod.retrograde(material)
else: # error check
raise TypeError( "Unrecognized material type " + str(type(material)) + " - expected Phrase, Part, or Score." )
# make these function callable without having to instantiate this class
normalize = Callable(normalize)
invert = Callable(invert)
mutate = Callable(mutate)
elongate = Callable(elongate)
shift = Callable(shift)
merge = Callable(merge)
retrograde = Callable(retrograde)
######################################################################################
# JEM working directory fix
#
# JEM (written partially in Java) does not allow changing current directory.
# So, when we have the user's desired working directory we CANNOT use it to read/write
# jMusic media files, unless we add it as a prefix here to every Read/Write operation.
# We do so only if the filepath passed to Read/Write is just a filename (as opposed
# to a path).
#
# Let's define some useful stuff here, for this fix
import os.path
def fixWorkingDirForJEM( filename ):
"""It prefixes the provided filename with JEM's working directory, if available,
only if filename is NOT an absolute path (in which case the user truly knows
where they want to store it).
"""
try:
JEM_getMainFilePath # check if function JEM_getMainFilePath() is defined (this happens only inside JEM)
# get working dir, if JEM is available
workDir = JEM_getMainFilePath()
# two cases for filename:
#
# 1. a relative filepath (e.g., just a filename, or "../filename")
# 2. an absolute filepath
if os.path.isabs( filename ): # if an absolute path, the user knows what they are doing
return filename # ...so, do nothing
else: # else (if a relative pathname),
return workDir + filename # ...fix it
except:
# if JEM is not available, do nothing (e.g., music.py is being run outside of JEM)
return filename
######################################################################################
#### jMusic Read extensions ##########################################################
######################################################################################
from jm.util import Read as jRead # needed to wrap more functionality below
from image import * # import Image class and related Java libraries
# Create Read.image("test.jpg") to return an image, in addition to Read's default functionality.
# This class is not meant to be instantiated, hence no "self" in function definitions.
# Functions are made callable through class Callable, above.
class Read(jRead):
def midi(score, filename):
"""Import a standard MIDI file to a jMusic score."""
# JEM working directory fix (see above)
filename = fixWorkingDirForJEM( filename ) # does nothing if not in JEM
# use fixed filename with jMusic's Read.midi()
jRead.midi(score, filename)
# make this function callable without having to instantiate this class
midi = Callable(midi)
######################################################################################
#### jMusic Write extensions #########################################################
######################################################################################
from jm.util import Write as jWrite # needed to wrap more functionality below
# Create Write.image(image, "test.jpg") to write an image to file, in addition
# to Write's default functionality.
# This class is not meant to be instantiated, hence no "self" in function definitions.
# Functions are made callable through class Callable, above.
class Write(jWrite):
def midi(score, filename):
"""Save a standard MIDI file from a jMusic score."""
# JEM working directory fix (see above)
filename = fixWorkingDirForJEM( filename ) # does nothing if not in JEM
#***
#print "fixWorkingDirForJEM( filename ) =", filename
# use fixed filename with jMusic's Write.midi()
jWrite.midi(score, filename)
# make this function callable without having to instantiate this class
midi = Callable(midi)
######################################################################################
#### jMusic Note extensions ########################################################
######################################################################################
###############################################################################
# freqToNote Convert frequency to MIDI note number
# freqToNote(f) converts frequency to the closest MIDI note
# number with pitch bend value for finer control. A4 corresponds to
# the note number 69 (concert pitch is set to 440Hz by default).
# The default pitch bend range is 2 half tones above and below.
#
# 2005-10-13 by MARUI Atsushi
# See http://www.geidai.ac.jp/~marui/octave/node3.html
#
# For example, "sliding" from A4 (MIDI pitch 69, frequency 440 Hz)
# to a bit over AS4 (MIDI pitch 70, frequency 466.1637615181 Hz).
#
#>>>for f in range(440, 468):
#... print freqToNote(f)
#...
#(69, 0)
#(69, 322)
#(69, 643)
#(69, 964)
#(69, 1283)
#(69, 1603)
#(69, 1921)
#(69, 2239)
#(69, 2555)
#(69, 2872)
#(69, 3187)
#(69, 3502)
#(69, 3816)
#(70, -4062)
#(70, -3750)
#(70, -3438)
#(70, -3126)
#(70, -2816)
#(70, -2506)
#(70, -2196)
#(70, -1888)
#(70, -1580)
#(70, -1272)
#(70, -966)
#(70, -660)
#(70, -354)
#(70, -50)
#(70, 254)
#
# The above overshoots AS4 (MIDI pitch 70, frequency 466.1637615181 Hz).
# So, here is converting the exact frequency:
#
#>>> freqToNote(466.1637615181)
#(70, 0)
###############################################################################
def freqToNote(frequency):
"""Converts frequency to the closest MIDI note number with pitch bend value
for finer control. A4 corresponds to the note number 69 (concert pitch
is set to 440Hz by default). The default pitch bend range is 4 half tones.
"""
from math import log
concertPitch = 440.0 # 440Hz
bendRange = 4 # 4 half tones (2 below, 2 above)
x = log(frequency / concertPitch, 2) * 12 + 69
note = round(x)
pitchBend = round((x - note) * 8192 / bendRange * 2)
return int(note), int(pitchBend)
def noteToFreq(note):
"""Converts a MIDI note to the corresponding frequency. A4 corresponds to the note number 69 (concert pitch
is set to 440Hz by default).
"""
concertPitch = 440.0 # 440Hz
frequency = concertPitch * 2 ** ( (note - 69) / 12.0 )
return frequency
from jm.music.data import *
from jm.music.data import Note as jNote # needed to wrap more functionality below
# update Note to accept length which specifies the actual length (performance) of the note,
# (whereas duration specifies the score (or denoted) length of the note).
class Note(jNote):
def __str__(self):
# we disrupted access to jMusic's (Java's) Note.toString() method,
# so, let's fix it
return self.toString()
def __repr__(self):
# we disrupted access to jMusic's (Java's) Note.toString() method,
# so, let's fix it
return self.toString()
def __init__(self, value, duration, dynamic=85, pan=0.5, length=None):
# NOTE: If value is an int, it signifies pitch; otherwise, if it is a float,
# it signifies a frequency.
# set note length (if needed)
if length == None: # not provided?
length = duration * jNote.DEFAULT_LENGTH_MULTIPLIER # normally, duration * 0.9
# do some basic error checking
if type(value) == int and value != REST and (value < 0 or value > 127):
raise TypeError( "Note pitch should be an integer between 0 and 127 (it was " + str(value) + ")." )
elif type(value) == float and not value > 0.0:
raise TypeError( "Note frequency should be a float greater than 0.0 (it was " + str(value) + ")." )
elif (type(value) != int) and (type(value) != float):
raise TypeError( "Note first parameter should be a pitch (int) or a frequency (float) - it was " + str(type(value)) + "." )
# now, construct a jMusic Note with the proper attributes
jNote.__init__(self, value, duration, dynamic, pan) # construct note
self.setLength( length ) # and set its length
# NOTE: jMusic Notes if int pitch is given, they populate both frequency and pitch;
# (if float pitch is given, they treat if as frequency and populate only frequency - no pitch).
# This is a bug. Below, we fix it by also using setPitch() or setFrequency(), which may appear
# redundant, but they fix this problem (as they do the proper cross-updating of pitch and frequency).
# fix jMusic Note bug (see above)
if type(value) == int:
self.setPitch(value)
elif type(value) == float:
self.setFrequency(value)
# fix set duration to also adjust length proportionally
def setDuration(self, duration):
# calculate length fector from original values
lengthFactor = self.getLength() / self.getDuration()
# and set new duration and length appropriately
jNote.setDuration(self, duration )
self.setLength(duration * lengthFactor )
# fix error message returned from getPitch() if frequency and pitch are not equivalent
def getPitch(self):
# get frequency
frequency = self.getFrequency()
# convert to corresponding pitch
if frequency == float(REST): # is it a rest?
pitch = REST # yes, so update accordingly
else: # it's a regular note, so...
# calculate corresponding pitch and pith bend
pitch, bend = freqToNote(frequency)
# return only pitch
return pitch
# also, create a way to get the difference between frequency and pitch, in pitch bend units (see Play class)
def getPitchBend(self):
# get frequency
frequency = self.getFrequency()
# and calculate corresponding pitch and pith bend
pitch, bend = freqToNote(frequency)
# return only pitch bend (from 0 to )
return bend + PITCHBEND_NORMAL
######################################################################################
#### jMusic Phrase extensions ########################################################
######################################################################################
from jm.music.data import Phrase as jPhrase # needed to wrap more functionality below
# update Phrase's addNoteList to handle chords, i.e., lists of pitches,
# in addition to single pitches (the default functionality).
class Phrase(jPhrase):
def __str__(self):
# we disrupted access to jMusic's (Java's) Phrase.toString() method,
# so, let's fix it
return self.toString()
def __repr__(self):
# we disrupted access to jMusic's (Java's) Phrase.toString() method,
# so, let's fix it
return self.toString()
def addChord(self, pitches, duration, dynamic=85, panoramic=0.5, length=None):
# set chord length (if needed)
if length == None: # not provided?
length = duration * jNote.DEFAULT_LENGTH_MULTIPLIER # normally, duration * 0.9
# add all notes, minus the last one, as having no duration, yet normal length
# (exploiting how Play.midi() and Write.midi() work)
for i in range( len(pitches)-1 ):
n = Note(pitches[i], 0.0, dynamic, panoramic, length)
self.addNote(n)
# now, add the last note with the proper duration (and length)
n = Note(pitches[-1], duration, dynamic, panoramic, length)
self.addNote(n)
def addNoteList(self, pitches, durations, dynamics=[], panoramics=[], lengths=[]):
"""Add notes to the phrase using provided lists of pitches, durations, etc. """
# check if provided lists have equal lengths
if len(pitches) != len(durations) or \
(len(dynamics) != 0) and (len(pitches) != len(dynamics)) or \
(len(panoramics) != 0) and (len(pitches) != len(panoramics)) or \
(len(lengths) != 0) and (len(pitches) != len(lengths)):
raise ValueError("The provided lists should have the same length.")
# if dynamics was not provided, construct it with max value
if dynamics == []:
dynamics = [85] * len(pitches)
# if panoramics was not provided, construct it at CENTER
if panoramics == []:
panoramics = [0.5] * len(pitches)
# if note lengths was not provided, construct it at 90% of note duration
if lengths == []:
lengths = [duration * jNote.DEFAULT_LENGTH_MULTIPLIER for duration in durations]
# traverse the pitch list and handle every item appropriately
for i in range( len(pitches) ):
if type(pitches[i]) == list: # is it a chord?
self.addChord(pitches[i], durations[i], dynamics[i], panoramics[i], lengths[i]) # yes, so add it
else: # else, it's a note
n = Note(pitches[i], durations[i], dynamics[i], panoramics[i], lengths[i]) # create note
self.addNote(n) # and add it
# Do NOT make these functions callable - Phrase class is meant to be instantiated,
# i.e., we will always call these from a Phrase object - not the class, e.g., as in Mod.
######################################################################################
#### jMusic Play extensions ##########################################################
######################################################################################
from jm.util import Play as jPlay # needed to wrap more functionality below
# Create Play.noteOn(pitch, velocity, channel) to start a MIDI note sounding,
# Play.noteOff(pitch, channel) to stop the corresponding note from sounding, and
# Play.setInstrument(instrument, channel) to change instrument for this channel.
#
# This adds to existing Play functionality.
# This class is not meant to be instantiated, hence no "self" in function definitions.
# Functions are made callable through class Callable, above.
from javax.sound.midi import *
# NOTE: Opening the Java synthesizer below generates some low-level noise in the audio output.
# But we need it to be open, in case the end-user wishes to use functions like Play.noteOn(), below.
# ( *** Is there a way to open it just-in-time, and/or close it when not used? I cannot think of one.)
Java_synthesizer = MidiSystem.getSynthesizer() # get a Java synthesizer
Java_synthesizer.open() # and activate it (should we worry about close()???)
# make all instruments available
Java_synthesizer.loadAllInstruments(Java_synthesizer.getDefaultSoundbank())
# The MIDI specification stipulates that pitch bend be a 14-bit value, where zero is
# maximum downward bend, 16383 is maximum upward bend, and 8192 is the center (no pitch bend).
PITCHBEND_MIN = 0
PITCHBEND_MAX = 16383
PITCHBEND_NORMAL = 8192
# initialize pitchbend across channels to 0
CURRENT_PITCHBEND = {} # holds pitchbend to be used when playing a note / frequency (see below)
for i in range(16):
CURRENT_PITCHBEND[i] = 0 # set this channel's pitchbend to zero
#########
# NOTE: The following code addresses Play.midi() functionality. In order to be able to stop music
# that is currently playing, we wrap the jMusic Play class inside a Python Play class and rebuild
# play music functionality from basic elements.
from jm.midi import MidiSynth # needed to play and loop MIDI
from time import sleep # needed to implement efficient busy-wait loops (see below)
from timer import * # needed to schedule future tasks
# allocate enough MidiSynths and reuse them (when available)
__midiSynths__ = [] # holds all available jMusic MidiSynths
MAX_MIDI_SYNTHS = 12 # max number of concurrent MidiSynths allowed
# NOTE: This is an empirical value - not documented - may change.
def __getMidiSynth__():
"""Returns the next available MidiSynth (if any), or None."""
# make sure all possible MidiSynths are allocated
if __midiSynths__ == []:
for i in range(MAX_MIDI_SYNTHS):
__midiSynths__.append( MidiSynth() ) # create a new MIDI synthesizer
# now, all MidiSynths are allocated
# find an available MidiSynth to play the material (it's possible that all are allocated,
# since this function may be called repeatedly, while other music is still sounding
i = 0
while i < MAX_MIDI_SYNTHS and __midiSynths__[i].isPlaying():
i = i + 1 # check if the next MidiSynth is available
# now, i either points to the next available MidiSynth, or MAX_MIDI_SYNTHS if none is available
# did we find an available MidiSynth?
if i < MAX_MIDI_SYNTHS:
midiSynth = __midiSynths__[i]
else:
midiSynth = None
return midiSynth # let them have it (hopefully, they will use it right away)
# Provide a way to stop all MidiSynths from playing.
def __stopMidiSynths__():
"""Stops all MidiSynths from playing."""
for midiSynth in __midiSynths__:
if midiSynth.isPlaying(): # if playing, stop it
midiSynth.stop()
#########
# An envelope contains a list of attack times (in milliseconds, relative from the previous time) and values (to reach at those times),
# how long to wait (delay time, in milliseconds, relative from the previous time) to get to a sustain value, and
# then how long to wait to reach a value of zero (in milliseconds, relative from the end time).
class Envelope():
def __init__(self, attackTimes = [2], attackValues = [1.0], delayTime = 1, sustainValue = 1.0, releaseTime = 2):
# make sure attack times and values are parallel
if len(attackValues) != len(attackTimes):
raise IndexError("Envelope: attack times and values need to have the same length")
else: # all seems well
self.attackTimes = attackTimes # in milliseconds, relative from the previous time...
self.attackValues = attackValues # and the corresponding values
self.delayTime = delayTime # in milliseconds, relative from the previous time...
self.sustainValue = sustainValue # to reach this value
self.releaseTime = releaseTime # in milliseconds, relative from the end time
# get list of attack times
def getAttackTimes(self):
return self.attackTimes
# get list of attack values
def getAttackValues(self):
return self.attackValues
# get list of lists - first element is list of attack times and second element is list attack values
def getAttackTimesAndValues(self):
return [self.attackTimes, self.attackValues]
# update attack times
def setAttackTimes(self, attackTimes):
# make sure attack times and values are parallel
if len(self.attackValues) != len(attackTimes):
raise IndexError("Envelope.setAttackTimes(): attack times and values need to have the same length")
else: # all seems well
self.attackTimes = attackTimes
#
def setAttackValues(self, attackValues):
# make sure attack times and values are parallel
if len(attackValues) != len(self.attackTimes):
raise IndexError("Envelope.setAttackValues(): attack times and values need to have the same length")
else: # all seems well
self.attackValues = attackValues
def setAttackTimesAndValues(self, attackTimes, attackValues):
# make sure attack times and values are parallel
if len(self.attackValues) != len(attackTimes):
raise IndexError("Envelope.setAttackTimesAndValues(): attack times and values need to have the same length")
else: # all seems well
self.attackTimes = attackTimes
self.attackValues = attackValues
def getSustain(self):
return self.sustainValue
def setSustain(self, sustainValue):
self.sustainValue = sustainValue
def getDelay(self):
return self.delayTime
def setDelay(self, delayTime):
self.delayTime = delayTime
def getRelease(self):
return self.releaseTime
#update release
def setRelease(self, releaseTime):
self.releaseTime = releaseTime
# get length of envelope
def getLength(self):
return self.__getAbsoluteDelay__() + self.releaseTime
# get list of absolute attack times, attack time distance from start of envelope
def __getAbsoluteAttackTimes__(self):
# now convert relative attack times to absolute from the start time
absoluteAttackTimes = [ self.attackTimes[0] ] # initialize first list element
for i in range(1, len(self.attackTimes)): # process remaining times
absoluteAttackTimes.append(self.attackTimes[i] + absoluteAttackTimes[i-1])
return absoluteAttackTimes
def __getAbsoluteDelay__(self):
# same for delay
absoluteAttackTimes = self.__getAbsoluteAttackTimes__()
absoluteDelayTime = absoluteAttackTimes[len(absoluteAttackTimes) - 1] + self.delayTime
return absoluteDelayTime
# Holds notes currently sounding, in order to prevent premature NOTE-OFF for overlapping notes on the same channel
# For every frequencyOn() we add the tuple (pitch, channel), and for every frequencyOff() we rmove the tuple.
# If it is the last one, we execute a NOTE-OFF (otherwise, we don't).
notesCurrentlyPlaying = []
class Play(jPlay):
# redefine Play.midi to fix jMusic bug (see above) - now, we can play as many times as we wish.
def midi(material):
"""Play jMusic material (Score, Part, Phrase, Note) using our own Play.note() function."""
# do necessary datatype wrapping (MidiSynth() expects a Score)
if type(material) == Note:
material = Phrase(material)
if type(material) == jNote: # (also wrap jMusic default Notes, in addition to our own)
material = Phrase(material)
if type(material) == Phrase: # no elif - we need to successively wrap from Note to Score
material = Part(material)
material.setInstrument(-1) # indicate no default instrument (needed to access global instrument)
if type(material) == jPhrase: # (also wrap jMusic default Phrases, in addition to our own)
material = Part(material)
material.setInstrument(-1) # indicate no default instrument (needed to access global instrument)
if type(material) == Part: # no elif - we need to successively wrap from Note to Score
material = Score(material)
if type(material) == Score:
# we are good - let's play it then!
score = material # by now, material is a score, so create an alias (for readability)
# loop through all parts and phrases to get all notes
noteList = [] # holds all notes
tempo = score.getTempo() # get global tempo (can be overidden by part and phrase tempos)
for part in score.getPartArray(): # traverse all parts
channel = part.getChannel() # get part channel
instrument = Play.getInstrument(channel) # get global instrument for this channel
if part.getInstrument() > -1: # has the part instrument been set?
instrument = part.getInstrument() # yes, so it takes precedence
if part.getTempo() > -1: # has the part tempo been set?
tempo = part.getTempo() # yes, so update tempo
for phrase in part.getPhraseArray(): # traverse all phrases in part
if phrase.getInstrument() > -1: # is this phrase's instrument set?
instrument = phrase.getInstrument() # yes, so it takes precedence
if phrase.getTempo() > -1: # has the phrase tempo been set?
tempo = phrase.getTempo() # yes, so update tempo
# time factor to convert time from jMusic Score units to milliseconds
# (this needs to happen here every time, as we may be using the tempo from score, part, or phrase)
FACTOR = 1000 * 60.0 / tempo
# process notes in this phrase
startTime = phrase.getStartTime() * FACTOR # in milliseconds
for note in phrase.getNoteArray():
frequency = note.getFrequency()
panning = note.getPan()
panning = mapValue(panning, 0.0, 1.0, 0, 127) # map from range 0.0..1.0 (Note panning) to range 0..127 (as expected by Java synthesizer)
start = int(startTime) # remember this note's start time (in milliseconds)
# NOTE: Below we use note length as opposed to duration (getLength() vs. getDuration())
# since note length gives us a more natural sounding note (with proper decay), whereas
# note duration captures the more formal (printed score) duration (which sounds unnatural).
duration = int(note.getLength() * FACTOR) # get note length (as oppposed to duration!) and convert to milliseconds
startTime = startTime + note.getDuration() * FACTOR # update start time (in milliseconds)
velocity = note.getDynamic()
# accumulate non-REST notes
if (frequency != REST):
noteList.append((start, duration, frequency, velocity, channel, instrument, panning)) # put start time first and duration second, so we can sort easily by start time (below),
# and so that notes that are members of a chord as denoted by having a duration of 0 come before the note that gives the specified chord duration
# sort notes by start time
noteList.sort()
# Schedule playing all notes in noteList
chordNotes = [] # used to process notes belonging in a chord
for start, duration, pitch, velocity, channel, instrument, panning in noteList:
# set appropriate instrument for this channel
Play.setInstrument(instrument, channel)
# handle chord (if any)
# Chords are denoted by a sequence of notes having the same start time and 0 duration (except the last note
# of the chord).
if duration == 0: # does this note belong in a chord?
chordNotes.append([start, duration, pitch, velocity, channel, panning]) # add it to the list of chord notes
elif chordNotes == []: # is this a regular, solo note (not part of a chord)?
# yes, so schedule it to play via a Play.note event
Play.note(pitch, start, duration, velocity, channel, panning)
#print "Play.note(" + str(pitch) + ", " + str(int(start * FACTOR)) + ", " + str(int(duration * FACTOR)) + ", " + str(velocity) + ", " + str(channel) + ")"
else: # note has a normal duration and it is part of a chord
# first, add this note together with this other chord notes
chordNotes.append([start, duration, pitch, velocity, channel, panning])
# now, schedule all notes in the chord list using last note's duration
for start, ignoreThisDuration, pitch, velocity, channel, panning in chordNotes:
# schedule this note using chord's duration (provided by the last note in the chord)
Play.note(pitch, start, duration, velocity, channel, panning)
#print "Chord: Play.note(" + str(pitch) + ", " + str(int(start * FACTOR)) + ", " + str(int(duration * FACTOR)) + ", " + str(velocity) + ", " + str(channel) + ")"
# now, all chord notes have been scheduled
# so, clear chord notes to continue handling new notes (if any)
chordNotes = []
# now, all notes have been scheduled for future playing - scheduled notes can always be stopped using
# JEM's stop button - this will stop all running timers (used by Play.note() to schedule playing of notes)
#print "Play.note(" + str(pitch) + ", " + str(int(start * FACTOR)) + ", " + str(int(duration * FACTOR)) + ", " + str(velocity) + ", " + str(channel) + ")"
else: # error check
print "Play.midi(): Unrecognized type " + str(type(material)) + ", expected Note, Phrase, Part, or Score."
# old way - should be removed in future release (together will *all* references of __midiSynths__'s)
def midi2(material):
"""This is the original Play.midi() - retained for backup and testing purposes.
Play jMusic material (Score, Part, Phrase, Note) using next available MidiSynth (if any)."""
from jm.music.data import Phrase as jPhrase # since we extend Phrase later
midiSynth = __getMidiSynth__() # get next available MidiSynth (or None if all busy)
#midiSynth = MidiSynth() # create a new MIDI synthesizer
# did we find an available midiSynth?
if midiSynth:
# play the music
# do necessary datatype wrapping (MidiSynth() expects a Score)
if type(material) == Note:
material = Phrase(material)
if type(material) == jNote: # (also wrap jMusic default Notes, in addition to our own)
material = Phrase(material)
if type(material) == Phrase: # no elif - we need to successively wrap from Note to Score
material = Part(material)
if type(material) == jPhrase: # (also wrap jMusic default Phrases, in addition to our own)
material = Part(material)
if type(material) == Part: # no elif - we need to successively wrap from Note to Score
material = Score(material)
if type(material) == Score:
midiSynth.play( material ) # play it!
else: # error check
print "Play.midi(): Unrecognized type" + str(type(material)) + ", expected Note, Phrase, Part, or Score."
else: # error check
print "Play.midi(): All", MAX_MIDI_SYNTHS, "MIDI synthesizers are busy - (try again later?)"
return midiSynth # return midiSynth playing
# NOTE: Here we connect noteOn() and frequencyOn() with noteOnPitchBend() to allow for
# playing microtonal music. Although this may seem as cyclical (i.e., that in noteOn() we
# convert pitch to frequency, and then call frequencyOn() which convert the frequency back to pitch,
# so that it can call noteOnPitchBend() ), this is the only way we can make everything work.
# We are constrained by the fact that jMusic Note objects are limited in how they handle pitch and
# frequency (i.e., that creating a Note with pitch will set the Note's corresponding frequency,
# but not the other way around), and the fact that we can call Note.getFrequency() above in Play.midi()
# without problem, but NOT Note.getPitch(), which will crash if the Note was instantiated with a frequency
# (i.e., pitch is not available / set).
# Therefore, we need to make the run about here, so that we keep everything else easier to code / maintain,
# and also keep the API (creating and play notes) simpler. So, do NOT try to simplify the following code,
# as it is the only way (I think) that can make everything else work simply - also see Play.midi().
def noteOn(pitch, velocity=100, channel=0, panning = -1):
"""Send a NOTE_ON message for this pitch to the Java synthesizer object. Default panning of -1 means to
use the default (global) panning setting of the Java synthesizer."""
if (type(pitch) == int) and (0 <= pitch <= 127): # a MIDI pitch?
# yes, so convert pitch from MIDI number (int) to Hertz (float)
pitch = noteToFreq(pitch)
if type(pitch) == float: # a pitch in Hertz?
Play.frequencyOn(pitch, velocity, channel, panning) # start it
else:
print "Play.noteOn(): Unrecognized pitch " + str(pitch) + ", expected MIDI pitch from 0 to 127 (int), or frequency in Hz from 8.17 to 12600.0 (float)."
def frequencyOn(frequency, velocity=100, channel=0, panning = -1):
"""Send a NOTE_ON message for this frequency (in Hz) to the Java synthesizer object. Default panning of -1 means to
use the default (global) panning setting of the Java synthesizer."""
if (type(frequency) == float) and (8.17 <= frequency <= 12600.0): # a pitch in Hertz (within MIDI pitch range 0 to 127)?
pitch, bend = freqToNote( frequency ) # convert to MIDI note and pitch bend
# also, keep track of how many overlapping instances of this pitch are currently sounding on this channel
# so that we turn off only the last one - also see frequencyOff()
noteID = (pitch, channel) # create an ID using pitch-channel pair
notesCurrentlyPlaying.append(noteID) # add this note instance to list
Play.noteOnPitchBend(pitch, bend, velocity, channel, panning) # and start it
else:
print "Play.frequencyOn(): Invalid frequency " + str(frequency) + ", expected frequency in Hz from 8.17 to 12600.0 (float)."
def noteOff(pitch, channel=0):
"""Send a NOTE_OFF message for this pitch to the Java synthesizer object."""
if (type(pitch) == int) and (0 <= pitch <= 127): # a MIDI pitch?
# yes, so convert pitch from MIDI number (int) to Hertz (float)
pitch = noteToFreq(pitch)
if type(pitch) == float: # a pitch in Hertz?
Play.frequencyOff(pitch, channel) # stop it
else:
print "Play.noteOff(): Unrecognized pitch " + str(pitch) + ", expected MIDI pitch from 0 to 127 (int), or frequency in Hz from 8.17 to 12600.0 (float)."
def frequencyOff(frequency, channel=0):
"""Send a NOTE_OFF message for this frequency (in Hz) to the Java synthesizer object."""
global Java_synthesizer
if (type(frequency) == float) and (8.17 <= frequency <= 12600.0): # a frequency in Hertz (within MIDI pitch range 0 to 127)?
pitch, bend = freqToNote( frequency ) # convert to MIDI note and pitch bend
# also, keep track of how many overlapping instances of this frequency are currently playing on this channel
# so that we turn off only the last one - also see frequencyOn()
noteID = (pitch, channel) # create an ID using pitch-channel pair
# next, remove this noteID from the list, so that we may check for remaining instances
notesCurrentlyPlaying.remove(noteID) # remove noteID
if noteID not in notesCurrentlyPlaying: # is this last instance of note?
# yes, so turn it off!
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
channelHandle.noteOff(pitch) # and turn it off
else: # frequency was outside expected range
print "Play.frequencyOff(): Invalid frequency " + str(frequency) + ", expected frequency in Hz from 8.17 to 12600.0 (float)."
# NOTE: Just to be good citizens, also turn pitch bend to normal (i.e., no bend).
# Play.setPitchBend(0, channel)
# Commented out below, because it might give the impression that different pitch bends
# signify different notes to be turned off - not so. NOTE_OFF messages are based solely on pitch.
#
# def noteOffPitchBend(pitch, bend = 0, channel=0):
# """Send a NOTE_OFF message for this pitch to the Java synthesizer object."""
# # NOTE_OFF messages are based on pitch (i.e., pitch bend is irrelevant / ignored)
# Play.noteOff(pitch, channel)
def note(pitch, start, duration, velocity=100, channel=0, panning = -1):
"""Plays a note with given 'start' time (in milliseconds from now), 'duration' (in milliseconds
from 'start' time), with given 'velocity' on 'channel'. Default panning of -1 means to
use the default (global) panning setting of the Java synthesizer. """
# TODO: We should probably test for negative start times and durations.
# create a timer for the note-on event
noteOn = Timer2(start, Play.noteOn, [pitch, velocity, channel, panning], False)
# create a timer for the note-off event
noteOff = Timer2(start+duration, Play.noteOff, [pitch, channel], False)
# and activate timers (set things in motion)
noteOn.start()
noteOff.start()
# NOTE: Upon completion of this function, the two Timer objects become unreferenced.
# When the timers elapse, then the two objects (in theory) should be garbage-collectable,
# and should be eventually cleaned up. So, here, no effort is made in reusing timer objects, etc.
def frequency(frequency, start, duration, velocity=100, channel=0, panning = -1):
"""Plays a frequency with given 'start' time (in milliseconds from now), 'duration' (in milliseconds
from 'start' time), with given 'velocity' on 'channel'. Default panning of -1 means to
use the default (global) panning setting of the Java synthesizer."""
# NOTE: We assume that the end-user will ensure that concurrent microtones end up on
# different channels. This is needed since MIDI has only one pitch band per channel,
# and most microtones require their unique pitch bending.
# TODO: We should probably test for negative start times and durations.
# create a timer for the frequency-on event
frequencyOn = Timer2(start, Play.frequencyOn, [frequency, velocity, channel, panning], False)
# create a timer for the frequency-off event
frequencyOff = Timer2(start+duration, Play.frequencyOff, [frequency, channel], False)
# call pitchBendNormal to turn off the timer, if it is on
#setPitchBendNormal(channel)
# and activate timers (set things in motion)
frequencyOn.start()
frequencyOff.start()
#setPitchBendNormal(channel, start+duration, True)
# No (normal) pitch bend is 0, max downward bend is -8192, and max upward bend is 8191.
# (Result is undefined if you exceed these values - it may wrap around or it may cap.)
def setPitchBend(bend = 0, channel=0):
"""Set global pitchbend variable to be used when a note / frequency is played."""
if (bend <= 8191) and (bend >= -8192): # is pitchbend within appropriate range?
CURRENT_PITCHBEND[channel] = bend
else: # frequency was outside expected range
print "Play.setPitchBend(): Invalid pitchbend " + str(bend) + ", expected pitchbend in range -8192 to 8191."
def getPitchBend(channel=0):
"""returns the current pitchbend for this channel."""
return CURRENT_PITCHBEND[channel]
# No (normal) pitch bend is 0, max downward bend is -8192, and max upward bend is 8191.
# (Result is undefined if you exceed these values - it may wrap around or it may cap.)
def noteOnPitchBend(pitch, bend = 0, velocity=100, channel=0, panning = -1):
"""Send a NOTE_ON message for this pitch and pitch bend to the Java synthesizer object.
Default panning of -1 means to use the default (global) panning setting of the Java synthesizer."""
global Java_synthesizer
#Play.setPitchBend(bend, channel) # remember current pitchbend for this channel
# now, really set the pitchbend on the Java synthesizer (this is the only place this is done!)
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
# NOTE: Normal (or no) pitch bend is 0, max downward bend is -8192, and max upward bend is 8191.
# However, internally, the MIDI specification wants normal pitch bend to be 8192, max downward
# bend to be 0, and max upward bend to be 16383).
# Here we from translate external pitch bend specification (-8192 to 0 to 8191) to internal MIDI pitch bend
# specification (0 to 8192 to 16383).
# Also, we add the current pitchbend, as set previously.
pitchbend = bend + PITCHBEND_NORMAL + CURRENT_PITCHBEND[channel] # calculate pitchbend to set
if (pitchbend <= PITCHBEND_MAX) and (pitchbend >= PITCHBEND_MIN): # is pitchbend within appropriate range?
channelHandle.setPitchBend( pitchbend ) # send message
# and send message to start the note on this channel
if panning != -1: # if we have a specific panning,
channelHandle.controlChange(10, panning) # then, use it (otherwise let default / global panning stand)
channelHandle.noteOn(pitch, velocity) # and start the note on Java synthesizer
else: # frequency was outside expected range
print "Play.noteOnPitchBend(): Invalid pitchbend " + str(pitchbend - PITCHBEND_NORMAL) + \
", expected pitchbend in range " + str(PITCHBEND_MIN-PITCHBEND_NORMAL) + " to " + str(PITCHBEND_MAX-PITCHBEND_NORMAL) + "."
def allNotesOff():
"""It turns off all notes on all channels."""
Play.allFrequenciesOff()
def allFrequenciesOff():
"""It turns off all notes on all channels."""
global Java_synthesizer
for channel in range(16): # cycle through all channels
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
channelHandle.allNotesOff() # send the message
# also reset pitch bend
Play.setPitchBend(0, channel)
def stop():
"""It stops all Play music from sounding."""
# NOTE: This could also handle Play.note() notes, which may have been
# scheduled to start sometime in the future. For now, we assume that timer.py
# (which provides Timer objects) handles stopping of timers on its own. If so,
# this takes care of our problem, for all practical purposes. It is possible
# to have a race condition (i.e., a note that starts playing right when stop()
# is called, but a second call of stop() (e.g., double pressing of a stop button)
# will handle this, so we do not concern ourselves with it.
# first, stop the internal __getMidiSynth__ synthesizers
__stopMidiSynths__()
# then, stop all sounding notes
Play.allNotesOff()
Play.allAudioNotesOff()
# NOTE: In the future, we may also want to handle scheduled notes through Play.note(). This could be done
# by creating a list of Timers created via note() and looping through them to stop them here.
def setInstrument(instrument, channel=0):
"""Send a patch change message for this channel to the Java synthesizer object."""
global Java_synthesizer
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
channelHandle.programChange(channel, instrument) # send the message
def getInstrument(channel=0):
"""Gets the current instrument for this channel of the Java synthesizer object."""
global Java_synthesizer
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
instrument = channelHandle.getProgram() # get the instrument
return instrument
def setVolume(volume, channel=0):
"""Sets the current coarse volume for this channel to the Java synthesizer object."""
global Java_synthesizer
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
channelHandle.controlChange(7, volume) # send the message
def getVolume(channel=0):
"""Gets the current coarse volume for this channel of the Java synthesizer object."""
global Java_synthesizer
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
return channelHandle.getController(7) # obtain the current value for volume controller
def setPanning(panning, channel=0):
"""Sets the current panning setting for this channel to the Java synthesizer object."""
global Java_synthesizer
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
channelHandle.controlChange(10, panning) # send the message
def getPanning(channel=0):
"""Gets the current panning setting for this channel of the Java synthesizer object."""
global Java_synthesizer
channelHandle = Java_synthesizer.getChannels()[channel] # get a handle to channel
return channelHandle.getController(10) # obtain the current value for panning controller
def audio(material, listOfAudioSamples, listOfEnvelopes = []):
"""Play jMusic material using a list of audio samples as voices"""
# do necessary datatype wrapping (MidiSynth() expects a Score)
if type(material) == Note:
material = Phrase(material)
if type(material) == jNote: # (also wrap jMusic default Notes, in addition to our own)
material = Phrase(material)
if type(material) == Phrase: # no elif - we need to successively wrap from Note to Score
material = Part(material)
if type(material) == jPhrase: # (also wrap jMusic default Phrases, in addition to our own)
material = Part(material)
if type(material) == Part: # no elif - we need to successively wrap from Note to Score
material = Score(material)
if type(material) == Score:
# we are good - let's play it then!
score = material # by now, material is a score, so create an alias (for readability)
# loop through all parts and phrases to get all notes
noteList = [] # holds all notes
tempo = score.getTempo() # get global tempo (can be overidden by part and phrase tempos)
for part in score.getPartArray(): # traverse all parts
#NOTE: channel is used as an index for the audio voice
channel = part.getChannel() # get part channel
instrument = part.getInstrument() # get part instrument
if part.getTempo() > -1: # has the part tempo been set?
tempo = part.getTempo() # yes, so update tempo
for phrase in part.getPhraseArray(): # traverse all phrases in part
if phrase.getInstrument() > -1: # is this phrase's instrument set?
instrument = phrase.getInstrument() # yes, so it takes precedence
if phrase.getTempo() > -1: # has the phrase tempo been set?
tempo = phrase.getTempo() # yes, so update tempo
# time factor to convert time from jMusic Score units to milliseconds
# (this needs to happen here every time, as we may be using the tempo from score, part, or phrase)
FACTOR = 1000 * 60.0 / tempo
for index in range(phrase.length()): # traverse all notes in this phrase
note = phrase.getNote(index) # and extract needed note data
frequency = note.getFrequency()
panning = note.getPan()
panning = mapValue(panning, 0.0, 1.0, 0, 127) # map from range 0.0..1.0 (Note panning) to range 0..127 (as expected by Java synthesizer)
start = int(phrase.getNoteStartTime(index) * FACTOR) # get time and convert to milliseconds
# NOTE: Below we use note length as opposed to duration (getLength() vs. getDuration())
# since note length gives us a more natural sounding note (with proper decay), whereas
# note duration captures the more formal (printed score) duration (which sounds unnatural).
duration = int(note.getLength() * FACTOR) # get note length (as oppposed to duration!) and convert to milliseconds
velocity = note.getDynamic()
# accumulate non-REST notes
if (frequency != REST):
noteList.append((start, duration, frequency, velocity, channel, instrument, panning)) # put start time first and duration second, so we can sort easily by start time (below),
# and so that notes that are members of a chord as denoted by having a duration of 0 come before the note that gives the specified chord duration
# sort notes by start time
noteList.sort()
for start, duration, pitch, velocity, channel, instrument, panning in noteList:
# this function only supprts a regular, solo note (not part of a chord)
if len(listOfEnvelopes) != 0:
Play.audioNote(pitch, start, duration, listOfAudioSamples[channel], velocity, panning, listOfEnvelopes[channel])
else:
Play.audioNote(pitch, start, duration, listOfAudioSamples[channel], velocity, panning)
# now, all notes have been scheduled for future playing - scheduled notes can always be stopped using
# JEM's stop button - this will stop all running timers (used by Play.note() to schedule playing of notes)
#print "Play.note(" + str(pitch) + ", " + str(int(start * FACTOR)) + ", " + str(int(duration * FACTOR)) + ", " + str(velocity) + ", " + str(channel) + ")"
else: # error check
print "Play.audio(): Unrecognized type " + str(type(material)) + ", expected Note, Phrase, Part, or Score."
def audioNote(pitch, start, duration, audioSample, velocity = 127, panning = -1, envelope = Envelope()):
"""Play a note using an AudioSample for generating the sound."""
if (type(pitch) == int) and (0 <= pitch <= 127): # a MIDI pitch?
# yes, so convert pitch from MIDI number (int) to Hertz (float)
pitch = noteToFreq(pitch)
# apply envelope to note
if envelope.getLength() > duration:
print("Play.audioNote(): Envelope is too large for this note,\n midi: " + str(pitch) + "\nnote length: " + str(duration) + "\nenvelope length: " + str(envelope.getLength()))
else:
envelopeLength = envelope.getLength()
#now create the list of delays that will be passed to the setVolume method
# convert delays to seconds for the amplitude smoother
attackDelays = []
attackTimes = envelope.getAttackTimes()
for i in range(len(attackTimes)):
attackDelays.append(float(attackTimes[i]) / 1000.0)
# now we have a list of how long each attack lasts in seconds
delayDelay = float(envelope.getDelay() / 1000.0)
releaseDelay = float(envelope.getRelease() / 1000.0)
# and how long the delay and release lasts in seconds
# adjust attackValues relative to note velocity
relativeAttackValues = []
for i in range( len(envelope.getAttackValues() ) ):
relativeValue = mapValue( envelope.getAttackValues()[i], 0.0, 1.0, 0, velocity ) # adjust
relativeAttackValues.append( relativeValue ) # and remember
# adjust sustainValue relative to note velocity
relativeSustainValue = mapValue( envelope.sustainValue, 0.0, 1.0, 0, velocity )
# get absolute release time
absoluteReleaseTime = duration - envelope.getRelease()
# create list of timers to perform volume changes
attackTimers = []
# first, create timers for envelope attack
for i in range( len(relativeAttackValues) ):
timerOffset = envelope.__getAbsoluteAttackTimes__()[i]
volume = relativeAttackValues[i]
delay = attackDelays[i]
timer = Timer2(start+timerOffset, audioSample.setVolume, [volume, delay], False)
attackTimers.append( timer )
# now, create timers for envelope sustain and release
sustainTimer = Timer2(start + envelope.__getAbsoluteDelay__(), audioSample.setVolume, [relativeSustainValue, delayDelay], False)
releaseTimer = Timer2(start + absoluteReleaseTime, audioSample.setVolume, [0, releaseDelay], False)
# finally, create timers for note-on and note-off events
audioOn = Timer2(start, Play.audioOn, [pitch, audioSample, velocity, panning], False)
audioOff = Timer2(start+duration, Play.audioOff, [pitch, audioSample], False)
# everything is ready, so start timers to schedule playng of note
audioOn.start()
for i in range(len(attackTimers)):
attackTimers[i].start()
sustainTimer.start()
releaseTimer.start()
audioOff.start()
def audioOn(pitch, audioSample, velocity = 127, panning = -1):
"""Start playing a specific pitch at a given volume using provided audio sample."""
if panning != -1: # if we have a specific panning...
audioSample.setPanning(panning) # then, use it (otherwise let default / global panning stand
else: # otherwise...
audioSample.setPanning( Play.getPanning() ) # use the global / default panning
audioSample.setFrequency(pitch) # set the sample to the specified frequency
audioSample.setVolume(velocity) # and specified volume
audioSample.play() # and play the pitch!
def audioOff(pitch, audioSample):
"""Stop playing the specified pitch on the provided audio sample."""
# NOTE: For now we ignore pitch...
audioSample.stop()
def allAudioNotesOff():
"""It turns off all notes on all audio samples."""
# NOTE: We are probably overreaching here... as this will stop *all* AudioSamples from playing.
# This is a quick way to stop music played via Play.audio().
__stopActiveAudioSamples__()
# NOTE: In the future, we may also want to handle scheduled notes through Play.audio(). This could be done
# by creating a list of AudioSamples and Timers created via audioNote() and looping through them to stop them here.
########################################################################
# make these functions callable without having to instantiate this class
midi = Callable(midi)
midi2 = Callable(midi2)
noteOn = Callable(noteOn)
noteOnPitchBend = Callable(noteOnPitchBend)
noteOff = Callable(noteOff)
note = Callable(note)
frequency = Callable(frequency)
#microtonal = Callable(microtonal)
#noteOffPitchBend = Callable(noteOffPitchBend)
allNotesOff = Callable(allNotesOff)
frequencyOn = Callable(frequencyOn)
frequencyOff = Callable(frequencyOff)
allFrequenciesOff = Callable(allFrequenciesOff)
stop = Callable(stop)
setInstrument = Callable(setInstrument)
getInstrument = Callable(getInstrument)
setVolume = Callable(setVolume)
getVolume = Callable(getVolume)
setPanning = Callable(setPanning)
getPanning = Callable(getPanning)
setPitchBend = Callable(setPitchBend)
getPitchBend = Callable(getPitchBend)
#setPitchBendNormal = Callable(setPitchBendNormal)
audioNote = Callable(audioNote)
audio = Callable(audio)
audioOn = Callable(audioOn)
audioOff = Callable(audioOff)
allAudioNotesOff = Callable(allAudioNotesOff)
######################################################################################
# If running inside JEM, register function that stops music from playing, when the
# Stop button is pressed inside JEM.
######################################################################################
try:
# if we are inside JEM, registerStopFunction() will be available
registerStopFunction(Play.stop) # tell JEM which function to call when the Stop button is pressed
except: # otherwise (if we get an error), we are NOT inside JEM
pass # so, do nothing.
######################################################################################
#### jSyn extensions #################################################################
######################################################################################
##### jSyn synthesizer ######################################
# create the jSyn synthesizer (one synthesizer for everything)
from java.io import * # for File
from math import *
class jSyn_AudioEngine():
"""Encasulates a jSyn synthesizer. Only one may exist (no need for more).
We modularize the synth and its operations in a class for convenience.
"""
instance = None # only one instance allowed (no need for more)
def __init__(self):
# import jSyn stuff here, so as to not polute the global namespace
from com.jsyn import JSyn
from com.jsyn.data import FloatSample
from com.jsyn.unitgen import LineOut, Pan, VariableRateMonoReader, VariableRateStereoReader, LinearRamp, FixedRateMonoWriter, FixedRateStereoWriter
from com.jsyn.util import SampleLoader
if jSyn_AudioEngine.instance == None: # first time?
self.synth = JSyn.createSynthesizer() # create synthesizer
jSyn_AudioEngine.instance = self # remember the only allowable instance
self.samples = [] # holds audio samples connected to synthesizer
else: # an instance already exists
print "Only one jSyn audio engine may exist (use existing one)."
def start(self):
"""Starts the synthesizer."""
self.synth.start()
for sample in self.samples: # and all the sample lineOut units
sample.lineOut.start()
def stop(self):
"""Stops the synthesizer."""
self.synth.stop() # stop the synth
for sample in self.samples: # and all the sample lineOut units
sample.lineOut.stop()
# *** This should probably be happening inside AudioSample() - much cleaner.
def add(self, sample):
"""Connects an audio sample to the jSyn lineOut unit."""
self.synth.add( sample.player ) # add the sample's player to the synth
self.synth.add( sample.amplitudeSmoother ) # add the sample's amplitude linearRamp to the synth
self.synth.add( sample.panLeft ) # add the sample's left pan control to the synth
self.synth.add( sample.panRight ) # add the sample's right pan control to the synth
self.synth.add( sample.lineOut ) # add the sample's output mixer to the synth
self.samples.append( sample ) # remember this sample
# *** NOTE: This synthesizer should be started only when an audio file (AudioSample) is created.
# Perhaps do the same with the Java synthesizer above? Is that synthesizer needed?
# create the jSyn synthesizer (again, only one for everything)
jSyn = jSyn_AudioEngine()
jSyn.start() # should this be happening here? (or inside the Audio class, when needed?) ***
# used to keep track which AudioSample and LiveSample objects are active, so we can stop them when
# JEM's Stop button is pressed
__ActiveAudioSamples__ = [] # holds active AudioSample and LiveSample objects
##### AudioSample class ######################################
import os # to check if provided filename exists
class AudioSample():
"""
Encapsulates a sound object created from an external audio file, which can be played once,
looped, paused, resumed, and stopped. Also, each sound has a MIDI pitch associated with it
(default is A4), so we can play different pitches with it (through pitch shifting).
Finally, we can set/get its volume (0-127), panning (0-127), pitch (0-127), and frequency (in Hz).
Ideally, an audio object will be created with a specific pitch in mind.
Supported data formats are WAV or AIF files (16, 24 and 32 bit PCM, and 32-bit float).
"""
def __init__(self, filename, referencePitch=A4, volume=127):
# import jSyn stuff here, so as to not polute the global namespace
from com.jsyn import JSyn
from com.jsyn.data import FloatSample
from com.jsyn.unitgen import LineOut, Pan, VariableRateMonoReader, VariableRateStereoReader, LinearRamp, FixedRateMonoWriter, FixedRateStereoWriter
from com.jsyn.util import SampleLoader
# ensure the file exists (jSyn will NOT complain on its own)
if not os.path.isfile(filename):
raise ValueError("File '" + str(filename) + "' does not exist.")
# file exists, so continue
self.filename = filename
# remember is sample is paused or not - needed for function isPaused()
self.hasPaused = False
# load and create the audio sample
SampleLoader.setJavaSoundPreferred( False ) # use internal jSyn sound processes
datafile = File(self.filename) # get sound file
self.sample = SampleLoader.loadFloatSample( datafile ) # load it as a a jSyn sample
self.channels = self.sample.getChannelsPerFrame() # get number of channels in sample
# create lineOut unit (it mixes output to computer's audio (DAC) card)
self.lineOut = LineOut()
# create panning control (we simulate this using two pan controls, one for the left channel and
# another for the right channel) - to pan we adjust their respective pan
self.panLeft = Pan()
self.panRight = Pan()
# NOTE: The two pan controls have only one of their outputs (as their names indicate)
# connected to LineOut. This way, we can set their pan value as we would normally, and not worry
# about clipping (i.e., doubling the output amplitude). Also, this works for both mono and
# stereo samples.
# create sample player (mono or stereo, as needed) and connect to lineOut mixer
if self.channels == 1: # mono audio?
self.player = VariableRateMonoReader() # create mono sample player
self.player.output.connect( 0, self.panLeft.input, 0) # connect single channel to pan control
self.player.output.connect( 0, self.panRight.input, 0)
elif self.channels == 2: # stereo audio?
self.player = VariableRateStereoReader() # create stereo sample player
self.player.output.connect( 0, self.panLeft.input, 0) # connect both channels to pan control
self.player.output.connect( 1, self.panRight.input, 0)
else:
raise TypeError( "Can only play mono or stereo samples." )
# now that we have a player, set the default and current pitches
# check if the reference is a midi pitch (int) or a frequency (float)
if (type(referencePitch) == int) and (0 <= referencePitch <= 127): # is reference pitch in MIDI (an int)?
self.referencePitch = referencePitch # remember reference pitch
self.referenceFrequency = self.__convertPitchToFrequency__(referencePitch) # and corresponding reference frequency
self.pitch = self.referencePitch # initialize playback pitch (may be different from reference pitch)
self.frequency = self.referenceFrequency # and corresponding playback frequency
elif type(referencePitch) == float: # if reference pitch a frequency (a float, in Hz)?
self.referenceFrequency = referencePitch # remember reference frequency
self.referencePitch = self.__convertFrequencyToPitch__(referencePitch) # convert reference frequency to corresponding MIDI pitch
self.pitch = self.referencePitch # initialize playback pitch (may be different from reference pitch)
self.frequency = referencePitch # and corresponding playback frequency
else: # otherwise this is an error, so let them know
raise TypeError("Reference pitch (" + str(referencePitch) + ") should be an int (range 0 and 127) or float (such as 440.0).")
print "Panning (" + str(panning) + ") should range from 0 to 127."
# now, connect pan control to mixer
self.panLeft.output.connect( 0, self.lineOut.input, 0 )
self.panRight.output.connect( 1, self.lineOut.input, 1 )
# now, that panning is set up, initialize it to center
self.panning = 63 # ranges from 0 (left) to 127 (right) - 63 is center
self.setPanning( self.panning ) # and initialize
# smooth out (linearly ramp) changes in player amplitude (without this, we get clicks)
self.amplitudeSmoother = LinearRamp()
self.amplitudeSmoother.output.connect( self.player.amplitude ) # connect to player's amplitude
self.amplitudeSmoother.input.setup( 0.0, 0.5, 1.0 ) # set minimum, current, and maximum settings for control
self.amplitudeSmoother.time.set( 0.0002 ) # and how many seconds to take for smoothing amplitude changes
# play at original pitch
self.player.rate.set( self.sample.getFrameRate() )
self.volume = volume # holds current volume (0 - 127)
self.setVolume( self.volume ) # set the desired volume
# NOTE: Adding to global jSyn synthesizer
jSyn.add(self) # connect sample unit to the jSyn synthesizer
# remember that this AudioSample has been created and is active (so that it can be stopped by JEM, if desired)
__ActiveAudioSamples__.append(self)
### functions to control playback and looping ######################
def play(self, start=0, size=-1):
"""
Play the sample once from the millisecond 'start' until the millisecond 'start'+'size'
(size == -1 means to the end). If 'start' and 'size' are omitted, play the complete sample.
"""
# for faster response, we restart playing (as opposed to queue at the end)
if self.isPlaying(): # is another play is on?
self.stop() # yes, so stop it
self.loop(1, start, size)
def loop(self, times = -1, start=0, size=-1):
"""
Repeat the sample indefinitely (times = -1), or the specified number of times
from millisecond 'start' until millisecond 'start'+'size' (size == -1 means to the end).
If 'start' and 'size' are omitted, repeat the complete sample.
"""
startFrames = self.__msToFrames__(start)
sizeFrames = self.__msToFrames__(size)
self.lineOut.start() # should this be here?
if size == -1: # to the end?
sizeFrames = self.sample.getNumFrames() - startFrames # calculate number of frames to the end
if times == -1: # loop forever?
self.player.dataQueue.queueLoop( self.sample, startFrames, sizeFrames )
else: # loop specified number of times
self.player.dataQueue.queueLoop( self.sample, startFrames, sizeFrames, times-1 )
def stop(self):
"""
Stop the sample play.
"""
self.player.dataQueue.clear()
self.hasPaused = False # reset
def isPlaying(self):
"""
Returns True if the sample is still playing.
"""
return self.player.dataQueue.hasMore()
def isPaused(self):
"""
Returns True if the sample is paused.
"""
return self.hasPaused
def pause(self):
"""
Pause playing recorded sample.
"""
if self.hasPaused:
print "Sample is already paused!"
else:
self.lineOut.stop() # pause playing
self.hasPaused = True # remember sample is paused
def resume(self):
"""
Resume Playing the sample from the paused position
"""
if not self.hasPaused:
print "Sample is already playing!"
else:
self.lineOut.start() # resume playing
self.hasPaused = False # remember the sample is not paused
def setFrequency(self, freq):
"""
Set sample's playback frequency.
"""
rateChangeFactor = float(freq) / self.frequency # calculate change on playback rate
self.frequency = freq # remember new frequency
self.pitch = self.__convertFrequencyToPitch__(freq) # and corresponding pitch
self.__setPlaybackRate__(self.__getPlaybackRate__() * rateChangeFactor) # and set new playback rate
def getFrequency(self):
"""
Return sample's playback frequency.
"""
return self.frequency
def setPitch(self, pitch):
"""
Set sample playback pitch.
"""
self.pitch = pitch # remember new playback pitch
self.setFrequency(self.__convertPitchToFrequency__(pitch)) # update playback frequency (this changes the playback rate)
def getPitch(self):
"""
Return sample's current pitch (it may be different from the default pitch).
"""
return self.pitch
def getReferencePitch(self):
"""
Return sample's reference pitch.
"""
return self.referencePitch
def getReferenceFrequency(self):
"""
Return sample's reference pitch.
"""
return self.referenceFrequency
def setPanning(self, panning):
"""
Set panning of sample (panning ranges from 0 - 127).
"""
if panning < 0 or panning > 127:
print "Panning (" + str(panning) + ") should range from 0 to 127."
else:
self.panning = panning # remember it
panValue = mapValue(self.panning, 0, 127, -1.0, 1.0) # map panning from 0,127 to -1.0,1.0
self.panLeft.pan.set(panValue) # and set it
self.panRight.pan.set(panValue)
def getPanning(self):
"""
Return sample's current panning (panning ranges from 0 - 127).
"""
return self.panning
def setVolume(self, volume, delay = 0.0002):
"""
Set sample's volume (volume ranges from 0 - 127).
"""
if volume < 0 or volume > 127:
print "Volume (" + str(volume) + ") should range from 0 to 127."
elif delay < 0.0:
print "Delay (" + str(delay) + ") should be at least 0.0"
else:
self.volume = volume # remember new volume
amplitude = mapValue(self.volume,0,127,0.0,1.0) # map volume to amplitude
self.amplitudeSmoother.input.set( amplitude ) # and set it
self.amplitudeSmoother.time.set(delay) # set delay time
def getVolume(self):
"""
Return sample's current volume (volume ranges from 0 - 127).
"""
return self.volume
### low-level functions related to FrameRate and PlaybackRate ######################
def getFrameRate(self):
"""
Return the sample's default recording rate (e.g., 44100.0 Hz).
"""
return self.sample.getFrameRate()
def __setPlaybackRate__(self, newRate):
"""
Set the sample's playback rate (e.g., 44100.0 Hz).
"""
self.player.rate.set( newRate )
def __getPlaybackRate__(self):
"""
Return the sample's playback rate (e.g., 44100.0 Hz).
"""
return self.player.rate.get()
def __msToFrames__(self, milliseconds):
"""
Converts milliseconds to frames based on the frame rate of the sample
"""
return int(self.getFrameRate() * (milliseconds / 1000.0))
### helper functions for various conversions ######################
# Calculate frequency in Hertz based on MIDI pitch. Middle C is 60.0. You
# can use fractional pitches so 60.5 would give you a pitch half way
# between C and C#. (by Phil Burk (C) 2009 Mobileer Inc)
def __convertPitchToFrequency__(self, pitch):
"""
Convert MIDI pitch to frequency in Hertz.
"""
concertA = 440.0
return concertA * 2.0 ** ((pitch - 69) / 12.0)
def __convertFrequencyToPitch__(self, freq):
"""
Converts pitch frequency (in Hertz) to MIDI pitch.
"""
concertA = 440.0
return log(freq / concertA, 2.0) * 12.0 + 69
# following conversions between frequencies and semitones based on code
# by J.R. de Pijper, IPO, Eindhoven
# see http://users.utu.fi/jyrtuoma/speech/semitone.html
def __getSemitonesBetweenFrequencies__(self, freq1, freq2):
"""
Calculate number of semitones between two frequencies.
"""
semitones = (12.0 / log(2)) * log(freq2 / freq1)
return int(semitones)
def __getFrequencyChangeBySemitones__(self, freq, semitones):
"""
Calculates frequency change, given change in semitones, from a frequency.
"""
freqChange = (exp(semitones * log(2) / 12) * freq) - freq
return freqChange
######################################################################################
# If running inside JEM, register function that stops everything, when the Stop button
# is pressed inside JEM.
######################################################################################
# function to stop and clean-up all active AudioSamples
def __stopActiveAudioSamples__():
global __ActiveAudioSamples__
# first, stop them
for a in __ActiveAudioSamples__:
a.stop() # no need to check if they are playing - just do it (it's fine)
# then, delete them
for a in __ActiveAudioSamples__:
del a
# also empty list, so things can be garbage collected
__ActiveAudioSamples__ = [] # remove access to deleted items
# now, register function with JEM (if possible)
try:
# if we are inside JEM, registerStopFunction() will be available
registerStopFunction(__stopActiveAudioSamples__) # tell JEM which function to call when the Stop button is pressed
except: # otherwise (if we get an error), we are NOT inside JEM
pass # so, do nothing.
# used to keep track which MidiSequence objects are active, so we can stop them when
# JEM's Stop button is pressed
__ActiveMidiSequences__ = [] # holds active MidiSequence objects
##### MidiSequence class ######################################
class MidiSequence():
"""Encapsulates a midi sequence object created from the provided material, which is either a string
- the filename of a MIDI file (.mid), or music library object (Score, Part, Phrase, or Note).
The midi sequence has a default MIDI pitch (e.g., A4) and volume. The sequence can be played once, looped,
and stopped. Also, we may change its pitch, tempo, and volume. These changes happen immediately.
"""
def __init__(self, material, pitch=A4, volume=127):
# determine what type of material we have
if type(material) == type(""): # a string?
self.filename = material # assume it's an external MIDI filename
# load and create the MIDI sample
self.score = Score() # create an empty score
Read.midi(self.score, self.filename) # load the external MIDI file
else: # determine what type of material we have
# and do necessary datatype wrapping (MidiSynth() expects a Score)
if type(material) == Note:
material = Phrase(material)
if type(material) == Phrase: # no elif - we need to successively wrap from Note to Score
material = Part(material)
if type(material) == jPhrase: # (also wrap jMusic default Phrases, in addition to our own)
material = Part(material)
if type(material) == Part: # no elif - we need to successively wrap from Note to Score
material = Score(material)
if type(material) == Score:
self.score = material # and remember it
else: # error check
raise TypeError("Midi() - Unrecognized type", type(material), "- expected filename (string), Note, Phrase, Part, or Score.")
# now, self.score contains a Score object
# create Midi sequencer to playback this sample
self.midiSynth = self.__initMidiSynth__()
# get access to the MidiSynth's internal components (neededd for some of our operations)
self.sequencer = self.midiSynth.getSequencer()
self.synthesizer = self.midiSynth.getSynthesizer()
# set tempo factor
self.tempoFactor = 1.0 # scales whatever tempo is set for the sequence (1.0 means no change)
self.defaultTempo = self.score.getTempo() # remember default tempo
self.playbackTempo = self.defaultTempo # set playback tempo to default tempo
# set volume
self.volume = volume # holds volume (0-127)
#self.setVolume( self.volume ) # set desired volume
# set MIDI score's default pitch
self.pitch = pitch # remember provided pitch
# remember that this MidiSequence has been created and is active (so that it can be stopped by JEM, if desired)
__ActiveMidiSequences__.append(self)
def __initMidiSynth__(self):
"""Creates and initializes a MidiSynth object."""
# NOTE: Since we need access to the "guts" of the MidiSynth object, it is important to initialize it.
# This happens automatically the first time we play something through it, so let's play an empty score.
midiSynth = MidiSynth() # create it
midiSynth.play( Score() ) # and initialize it
return midiSynth
def play(self):
"""Play the MIDI score."""
# make sure only one play is active at a time
if self.midiSynth.isPlaying(): # is another play is on?
self.stop() # yes, so stop it
#self.sequencer.setLoopCount(0) # set to no repetition (needed, in case we are called after loop())
self.midiSynth.setCycle(False) # turn off looping (just in case)
self.midiSynth.play( self.score ) # play it!
def loop(self):
"""Repeat the score indefinitely."""
# make sure only one play is active at a time
if self.midiSynth.isPlaying(): # is another play is on?
self.stop() # yes, so stop it
# Due to an apparent Java Sequencer bug in setting tempo, we can only loop indefinitely (not a specified
# number of times). Looping a specified number of times causes the second iteration to playback at 120 BPM.
#self.sequencer.setLoopCount(times) # set the number of times to repeat the sequence
self.midiSynth.setCycle(True)
self.midiSynth.play( self.score ) # play it!
def isPlaying(self):
"""
Returns True if the sequence is still playing.
"""
return self.midiSynth.isPlaying()
def stop(self):
"""Stop the MIDI score play."""
self.midiSynth.stop()
def pause(self):
"""Pause the MIDI sequence play."""
self.__setTempoFactor__(0.00000000000000000000000000000000000000000001) # slow play down to (almost) a standstill
def resume(self):
"""
Resume playing the sample (from the paused position).
"""
self.__setTempoFactor__(1.0) # reset playback to original tempo (i.e., resume)
# low-level helper function
def __setTempoFactor__(self, factor = 1.0):
"""
Set MIDI sequence's tempo factor (1.0 means default, i.e., no change).
"""
self.sequencer.setTempoFactor( factor )
def setPitch(self, pitch):
"""Set the MidiSequence's playback pitch (by transposing the MIDI material)."""
semitones = pitch - self.pitch # get the pitch change in semitones
Mod.transpose( self.score, semitones ) # update score pitch appropriately
# do some low-level work inside MidiSynth
updatedSequence = self.midiSynth.scoreToSeq( self.score ) # get new Midi sequence from updated score
self.positionInMicroseconds = self.sequencer.getMicrosecondPosition() # remember where to resume
self.sequencer.setSequence(updatedSequence) # update the sequence - this restarts playing...
self.sequencer.setMicrosecondPosition( self.positionInMicroseconds ) # ...so reset playing to where we left off
self.sequencer.setTempoInBPM( self.playbackTempo ) # set tempo (needed for the first (partial) iteration)
# finally, remember new pitch
self.pitch = pitch
def getPitch(self):
"""Returns the MIDI score's pitch."""
return self.pitch
def getDefaultPitch(self):
"""Return the MidiSequence's default pitch."""
return self.defaultPitch
def setTempo(self, beatsPerMinute):
"""
Set MIDI sequence's playback tempo.
"""
# Due to an apparent Java Sequencer bug in setting tempo, when looping a specified number of times causes
# all but the first iteration to playback at 120 BPM, regardless of what the current tempo may be.
# Unable to solve the problem in the general case, below is an attempt to fix it for some cases (e.g.,
# for looping continuously, but not for looping a specified number of times).
self.playbackTempo = beatsPerMinute # keep track of new playback tempo
self.sequencer.setTempoInBPM( beatsPerMinute ) # and set it
self.midiSynth.setTempo( beatsPerMinute ) # and set it again (this seems redundant, but see above)
self.score.setTempo( beatsPerMinute ) # and set it again (this seems redundant, but see above)
def getTempo(self):
"""
Return MIDI sequence's playback tempo.
"""
return self.playbackTempo
def getDefaultTempo(self):
"""
Return MIDI sequence's default tempo (in beats per minute).
"""
return self.defaultTempo
def setVolume(self, volume):
"""Sets the volume for the MidiSequence (volume ranges from 0 - 127)."""
self.volume = volume # remember new volume
# NOTE: Setting volume through a MidiSynth is problematic.
# Here we use a solution by Howard Amos (posted 8/16/2012) in
# http://www.coderanch.com/t/272584/java/java/MIDI-volume-control-difficulties
volumeMessage = ShortMessage() # create a MIDI message
#receiver = self.sequencer.getTransmitters().iterator().next().getReceiver() # get the MidiSynth receiver
receiver = self.sequencer.getTransmitters()[0].getReceiver() # get the MidiSynth receiver
for channel in range(16): # change volume of all the MIDI channels
volumeMessage.setMessage(0xB0 + channel, 7, volume) # set coarse volume control for this channel
receiver.send (volumeMessage, -1) # and communicate it to the receiver
def getVolume(self):
"""Returns the volume for the MidiSequence (volume ranges from 0 - 127)."""
return self.volume
######################################################################################
# If running inside JEM, register function that stops everything, when the Stop button
# is pressed inside JEM.
######################################################################################
# function to stop and clean-up all active MidiSequences
def __stopActiveMidiSequences__():
global __ActiveMidiSequences__
# first, stop them
for m in __ActiveMidiSequences__:
m.stop() # no need to check if they are playing - just do it (it's fine)
# then, delete them
for m in __ActiveMidiSequences__:
del m
# also empty list, so things can be garbage collected
__ActiveMidiSequences__ = [] # remove access to deleted items
# now, register function with JEM (if possible)
try:
# if we are inside JEM, registerStopFunction() will be available
registerStopFunction(__stopActiveMidiSequences__) # tell JEM which function to call when the Stop button is pressed
except: # otherwise (if we get an error), we are NOT inside JEM
pass # so, do nothing.
# used to keep track which Metronome objects are active, so we can stop them when
# JEM's Stop button is pressed
__ActiveMetronomes__ = [] # holds active MidiSequence objects
##### Metronome class ######################################
from timer import Timer
#from gui import Display # for Metronome tick visualization
class Metronome():
"""Creates a metronome object used in scheduling and synchronizing function call (intended for starting blocks of musical
material together, but could be really used for anything (e.g., GUI animzation). This is based on the Timer class,
but is higher-level, based on tempo (e.g., 60 BPM), and time signatures (e.g., 4/4).
"""
#def __init__(self, tempo=60, timeSignature=[4, 4], displaySize=50, displayTickColor=Color.RED):
def __init__(self, tempo=60, timeSignature=[4, 4]):
# remember title, tempo and time signature
self.tempo = tempo
self.timeSignature = timeSignature # a list (first item is numerator, second is denominator)
# list of functions (we are asked to synchronize) and their information (parallel lists)
self.functions = [] # functions to call
self.parameters = [] # their corresponding parameters
self.desiredBeats = [] # on which beat to call them (0 means now)
self.repeatFlags = [] # if they are meant to be called repeatedly
self.beatCountdowns = [] # holds beat countdown until call
# create timer, upon which to base our operation
delay = int((60.0 / self.tempo) * 1000) # in milliseconds
self.timer = Timer2(delay, self.__callFunctions__, [], True)
# set up metronome visualization
# self.display = Display("Metronome", displaySize, displaySize+20, 0, 0)
# self.display.hide() # initially hidden
#
# # set up display ticking
# self.displayTickColor = displayTickColor # color used for ticking
# self.displayOriginalColor = self.display.getColor() # color to reset ticking
# self.flickerTimer = Timer2(100, self.display.setColor, [self.displayOriginalColor]) # create timer to reset display color (it ends fliker)
# self.add( self.__updateDisplay__, [], 0, True, 1) # schedule display flickering on every beat (starts flicker)
# set up metronome visualization / sonification
self.currentBeat = 1 # holds current beat relative to provided time signature (1 means first beat)
self.visualize = False # True means print out current beat on console; False do not print
self.sonify = False # True means sound each tick; False do not
self.sonifyPitch = HI_MID_TOM # which pitch to play whe ticking
self.sonifyChannel = 9 # which channel to use (9 is for percussion)
self.sonifyVolume = 127 # how loud is strong beat (secondary beats will at 70%)
# remember that this MidiSequence has been created and is active (so that it can be stopped by JEM, if desired)
__ActiveMetronomes__.append(self)
def add(self, function, parameters=[], desiredBeat=0, repeatFlag=False):
"""It schedules the provided function to be called by the metronome (passing the provided parameters to it) on the
desired beat (0 means right away, 1 means first (strong) beat, 2 means second beat, etc.), and whether to keep
calling in it every time the desired beat comes around.
"""
self.functions.append( function )
self.parameters.append( parameters )
self.desiredBeats.append( desiredBeat )
self.repeatFlags.append( repeatFlag )
# calculate beat countdown
beatCountdown = self.__calculateBeatCountdown__( desiredBeat )
# store beat countdown for this function
self.beatCountdowns.append( beatCountdown )
def remove(self, function):
"""It removes the provided function from the list of functions scheduled (via add) to be called by the metronome.
If several instances of this function have been scheduled, it removes the earliest one (i.e., several calls of this
will be needed to remove all scheduled instances - a design choice). If the function is not scheduled, it throws
an error.
"""
index = self.functions.index( function ) # find index of leftmost occurrence
self.functions.pop( index ) # and remove it and all info
self.parameters.pop( index )
self.desiredBeats.pop( index )
self.repeatFlags.pop( index )
self.beatCountdowns.pop( index )
def removeAll(self):
"""It removes all provided functions to be called by the metronome."""
# reinitialize all function related information
self.functions = []
self.parameters = []
self.desiredBeats = []
self.repeatFlags = []
self.beatCountdowns = []
def setTempo(self, tempo):
"""It sets the metronome's tempo."""
self.tempo = tempo # remember new tempo
# and set it
delay = int((60.0 / self.tempo) * 1000) # in milliseconds
self.timer.setDelay(delay)
def getTempo(self):
"""It returns the metronome's tempo."""
return self.tempo
def setTimeSignature(self, timeSignature):
"""It sets the metronome's time signature."""
self.timeSignature = timeSignature # remember new time signature
self.currentBeat = 0 # reinitialize current beat relative to provided time signature (1 means first beat)
def getTimeSignature(self):
"""It returns the metronome's time signature."""
return self.timeSignature
def start(self):
"""It starts the metronome."""
self.timer.start()
print "Metronome started..."
def stop(self):
"""It starts the metronome."""
self.timer.stop()
print "Metronome stopped."
# def __updateDisplay__(self):
# """It temporarily flickers the metronome's visualization display to indicate a 'tick'."""
#
# # change color to indicate a tick
# self.display.setColor( self.displayTickColor )
#
# # reset display back to original color after a small delay
# #flikcerTimer = Timer2(250, self.display.setColor, [self.displayOriginalColor])
# #flikcerTimer.start() # after completion, this timer will eventually be garbage collected (no need to reuse)
# self.flickerTimer.start()
# def __advanceCurrentBeat__(self):
# """It advances the current metronome beat."""
#
# if self.visualize: # do we need to print out current beat?
# print self.currentBeat
#
# if self.sonify: # do we need to sound out current beat?
# if self.currentBeat == 1: # strong (first) beat?
# Play.note(self.sonifyPitch, 0, 200, self.sonifyVolume, self.sonifyChannel) # louder
# else:
# Play.note(self.sonifyPitch, 0, 200, int(self.sonifyVolume * 0.7), self.sonifyChannel) # softer
#
# self.currentBeat = (self.currentBeat % self.timeSignature[0]) + 1 # wrap around as needed
def __callFunctions__(self):
"""Calls all functions we are asked to synchronize."""
# do visualization / sonification tasks (if any)
if self.visualize: # do we need to print out current beat?
print self.currentBeat
if self.sonify: # do we need to sound out current beat?
if self.currentBeat == 1: # strong (first) beat?
Play.note(self.sonifyPitch, 0, 200, self.sonifyVolume, self.sonifyChannel) # louder
else:
Play.note(self.sonifyPitch, 0, 200, int(self.sonifyVolume * 0.7), self.sonifyChannel) # softer
#***
#print "self.desiredBeats, self.beatCountdowns = ",
#print self.desiredBeats, self.beatCountdowns
# NOTE: The following uses several for loops so that all functions are given quick service.
# Once they've been called, we can loop again to do necessary book-keeping...
# first, iterate to call all functions with their (provided) parameters
nonRepeatedFunctions = [] # holds indices of functions to be called only once (so we can remove them later)
for i in range( len(self.functions) ):
# see if current function needs to be called right away
if self.beatCountdowns[i] == 0:
# yes, so call this function!!!
self.functions[i]( *(self.parameters[i]) ) # strange syntax, but does the trick...
# check if function was meant to be called only once, and if so remove from future consideration
if not self.repeatFlags[i]: # call only once?
nonRepeatedFunctions.append( i ) # mark it for deletion (so it is not called again)
# now, all functions who needed to be called have been called
# next, iterate to remove any functions that were meant to be called once
for i in nonRepeatedFunctions:
self.functions.pop( i )
self.parameters.pop( i )
self.desiredBeats.pop( i )
self.repeatFlags.pop( i )
self.beatCountdowns.pop( i )
###########################################################################################
# NOTE: This belongs exactly here (before updating countdown timers below)
# advance to next beat (in anticipation...)
self.currentBeat = (self.currentBeat % self.timeSignature[0]) + 1 # wrap around as needed
###########################################################################################
# finally, iterate to update countdown timers for all remaining functions
for i in range( len(self.functions) ):
# if this function was just called
if self.beatCountdowns[i] == 0:
# reinitialize its beat countdown counter, i.e., reschedule it for its next call
# calculate beat countdown
self.beatCountdowns[i] = self.__calculateBeatCountdown__( self.desiredBeats[i] )
else: # it's not time to call this function, so update its information
# reduce ticks remaining to call it
self.beatCountdowns[i] = self.beatCountdowns[i] - 1 # we are now one tick closer to calling it
# now, all functions who needed to be called have been called, and all beat countdowns
# have been updated.
def __calculateBeatCountdown__(self, desiredBeat):
"""Calculates the beat countdown given the desired beat."""
# if desiredBeat == 0: # do they want now (regardess of current beat)?
# beatCountdown = 0 # give them now
# elif desiredBeat >= self.currentBeat: # otherwise, is desired beat now or in the future?
# beatCountdown = desiredBeat - self.currentBeat # calculate remaining beats until then
# else: # desired beat has passed in the time signature, so we need to pick it up in the next measure
# beatCountdown = (desiredBeat + self.timeSignature[0]) - self.currentBeat
if desiredBeat == 0: # do they want now (regardess of current beat)?
beatCountdown = 0 # give them now
elif self.currentBeat <= desiredBeat <= self.timeSignature[0]: # otherwise, is desired beat the remaining measure?
beatCountdown = desiredBeat - self.currentBeat # calculate remaining beats until then
elif 1 <= desiredBeat < self.currentBeat: # otherwise, is desired beat passed in this measure?
beatCountdown = (desiredBeat + self.timeSignature[0]) - self.currentBeat # pick it up in the next measure
elif self.timeSignature[0] < desiredBeat: # otherwise, is desired beat beyond this measure?
beatCountdown = desiredBeat - self.currentBeat + self.timeSignature[0] # calculate remaining beats until then
else: # we cannot handle negative beats
raise ValueError("Cannot handle negative beats, " + str(desiredBeat) + ".")
# ***
#print "beatCountdown =", beatCountdown
return beatCountdown
def show(self):
"""It shows the metronome visualization display."""
#self.display.show()
self.visualize = True
def hide(self):
"""It shows the metronome visualization display."""
#self.display.hide()
self.visualize = False
def soundOn(self, pitch=ACOUSTIC_BASS_DRUM, volume=127, channel=9):
"""It turns the metronome sound on."""
self.sonify = True
self.sonifyPitch = pitch # which pitch to play whe ticking
self.sonifyChannel = channel # which channel to use (9 is for percussion)
self.sonifyVolume = volume # how loud is strong beat (secondary beats will at 70%)
def soundOff(self):
"""It turns the metronome sound off."""
self.sonify = False
#
#####################################################################################
# If running inside JEM, register function that stops everything, when the Stop button
# is pressed inside JEM.
######################################################################################
# function to stop and clean-up all active MidiSequences
def __stopActiveMetronomes__():
global __ActiveMetronomes__
# first, stop them
for m in __ActiveMetronomes__:
m.stop() # no need to check if they are playing - just do it (it's fine)
# then, delete them
for m in __ActiveMetronomes__:
del m
# also empty list, so things can be garbage collected
__ActiveMetronomes__ = [] # remove access to deleted items
# now, register function with JEM (if possible)
try:
# if we are inside JEM, registerStopFunction() will be available
registerStopFunction(__stopActiveMetronomes__) # tell JEM which function to call when the Stop button is pressed
except: # otherwise (if we get an error), we are NOT inside JEM
pass # so, do nothing.
######################################################################################
# synthesized jMusic instruments (also see http://jmusic.ci.qut.edu.au/Instruments.html)
#import AMInst
#import AMNoiseInst
#import AddInst
#import AddMorphInst
#import AddSynthInst
#import BandPassFilterInst
#import BowedPluckInst
#import BreathyFluteInst
#import ChiffInst
#import ControlledHPFInst
#import DynamicFilterInst
#import FGTRInst
#import FMNoiseInst
#import FractalInst
#import GranularInst
#import GranularInstRT
#import HarmonicsInst
#import LFOFilteredSquareInst
#import LPFilterEnvInst
#import NoiseCombInst
#import NoiseInst
#import OddEvenInst
#import OvertoneInst
#import PluckInst
#import PluckSampleInst
#import PrintSineInst
#import PulseFifthsInst
#import PulsewaveInst
#import RTPluckInst
#import RTSimpleFMInst
#import ResSawInst
#import ReverseResampledInst
#import RingModulationInst
#import SabersawInst
#import SawCombInst
#import SawHPFInst
#import SawLPFInst
#import SawLPFInstB
#import SawLPFInstE
#import SawLPFInstF
#import SawLPFInstG
#import SawLPFInstRT
#import SawtoothInst
#import Sawtooth_LPF_Env_Inst
#import SimpleAMInst
#import SimpleAllPassInst
#import SimpleFMInst
#import SimpleFMInstRT
#import SimplePluckInst
#import SimpleReverbInst
#import SimpleSampleInst
#import SimpleSineInst
#import SimpleTremoloInst
#import SimplestInst
#import SineInst
#import SlowSineInst
#import SquareBackwardsInst
#import SquareCombInst
#import SquareInst
#import SquareLPFInst
#import SubtractiveSampleInst
#import SubtractiveSynthInst
#import SuperSawInst
#import TextInst
#import TimpaniInst
#import TremoloInst
#import TriangleInst
#import TriangleRepeatInst
#import VaryDecaySineInst
#import VibesInst
#import VibratoInst
#import VibratoInstRT
# preserve Jython bindings that get ovwerwritten by the following Java imports - a hack!
# (also see very top of this file)
enumerate = enumerate_preserve
print
print
|
Justin-Yuan/Image2Music-Generator
|
library/music.py
|
Python
|
gpl-2.0
| 143,992
|
[
"CRYSTAL"
] |
da97fe20ef2b6af16de45ec0dc2cfc27e2a66756cee23d106e271ba12d8cda51
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
from pythtb import tb_model,w90
from ase.calculators.interface import Calculator,DFTCalculator
from ase.dft.dos import DOS
from ase.dft.kpoints import monkhorst_pack
import numpy as np
#from tetrahedronDos import tetrahedronDosClass
from occupations import Occupations
from pyDFTutils.wannier90.band_plot import plot_band_weight
import matplotlib.pyplot as plt
class my_tb_model(tb_model):
"""
:param dim_k: Dimensionality of reciprocal space, i.e., specifies how
many directions are considered to be periodic.
:param dim_r: Dimensionality of real space, i.e., specifies how many
real space lattice vectors there are and how many coordinates are
needed to specify the orbital coordinates.
.. note:: Parameter *dim_r* can be larger than *dim_k*! For example,
a polymer is a three-dimensional molecule (one needs three
coordinates to specify orbital positions), but it is periodic
along only one direction. For a polymer, therefore, we should
have *dim_k* equal to 1 and *dim_r* equal to 3. See similar example
here: :ref:`trestle-example`.
:param lat: Array containing lattice vectors in Cartesian
coordinates (in arbitrary units). In example the below, the first
lattice vector has coordinates [1.0,0.5] while the second
one has coordinates [0.0,2.0].
:param orb: Array containing reduced coordinates of all
tight-binding orbitals. In the example below, the first
orbital is defined with reduced coordinates [0.2,0.3]. Its
Cartesian coordinates are therefore 0.2 times the first
lattice vector plus 0.3 times the second lattice vector.
:param per: This is an optional parameter giving a list of lattice
vectors which are considered to be periodic. In the example below,
only the vector [0.0,2.0] is considered to be periodic (since
per=[1]). By default, all lattice vectors are assumed to be
periodic. If dim_k is smaller than dim_r, then by default the first
dim_k vectors are considered to be periodic.
:param nspin: Number of spin components assumed for each orbital
in *orb*. Allowed values of *nspin* are *1* and *2*. If *nspin*
is 1 then the model is spinless, if *nspin* is 2 then it is a
spinfull model and each orbital is assumed to have two spin
components. Default value of this parameter is *1*.
:param nel: Number of electrons, if fix_spin , nel should be a tuple of (nel_up,nel_dn)
:param width: smearing width
:param verbose: whether to print some information in detail.
:param fix_spin: whether to fix the spin polarization.
Example usage::
# Creates model that is two-dimensional in real space but only
# one-dimensional in reciprocal space. Second lattice vector is
# chosen to be periodic (since per=[1]). Three orbital
# coordinates are specified.
tb = tb_model(1, 2,
lat=[[1.0, 0.5], [0.0, 2.0]],
orb=[[0.2, 0.3], [0.1, 0.1], [0.2, 0.2]],
per=[1])
"""
def __init__(self,dim_k,dim_r,lat,orb,per=None,nspin=1,nel=None,width=0.2,verbose=True,fix_spin=False):
"""
:param nel: number of electrons.
:param width: smearing width
:param verbose: verbose
:param fix_spin: whether to fix the spin polarization.
"""
#super(mytb,self,dim_k,dim_r,lat,orb,per=per,nspin=nspin)
tb_model.__init__(self,dim_k,dim_r,lat,orb,per=per,nspin=nspin)
self._eigenvals=None
self._eigenvecs=None
self._nspin=nspin
# if fix_spin: _efermi is a tuple (ef_up,ef_dn)
self._efermi=None
self._occupations=None
self._kpts=None
# _kweight is a array [w1,w2,....].
self._kweights=None
self._nel=nel
self._old_occupations=None
self._width=width
self._verbose=verbose
self._eps=0.001
self.fix_spin=fix_spin
self._nbar=np.ndarray([len(orb),2])
def set_kpoints(self,kpts):
"""
set the kpoints to calculate. each kpoint can be a
"""
if len(kpts[0])==self._dim_k:
self._kpts=kpts
self._kweights=np.array([1.0/len(self._kpts)]*len(self._kpts))
elif len(kpts[0])==self.dim_k+1:
self._kpts=kpts[:,:-1]
self._kweights=kpts[:,-1]
def get_number_of_bands(self):
"""
number of bands.
"""
return self.get_num_orbitals()
def solve_all(self,k_list=None,eig_vectors=False):
if eig_vectors:
self._eigenvals,self._eigenvecs=tb_model.solve_all(self,k_list=k_list,eig_vectors=eig_vectors)
else:
self._eigenvals=tb_model.solve_all(self,k_list=k_list,eig_vectors=eig_vectors)
return self._eigenvals
def get_eigenvalues(self,kpt=0,spin=None,refresh=False):
"""
Ak_spin. Calculate the eigenvalues and eigen vectors. the eigenvalues are returned.
self._eigenvals are returned.
"""
if self._eigenvals is None or refresh:
#print self.solve_all(k_list=self._kpts,eig_vectors=True)
self._eigenvals,self._eigenvecs=self.solve_all(k_list=self._kpts,eig_vectors=True)
if spin is None or self._nspin==1:
return self._eigenvals[:,kpt]
else:
## seperate the spin up/ down
## project the evec to spin up/down basis
eval_up=[]#np.zeros(self._norb)
eval_dn=[]#np.zeros(self._norb)
for ib,eval in enumerate(self._eigenvals[:,kpt]):
vec_up=self._eigenvecs[ib,kpt,:,0]
vec_dn=self._eigenvecs[ib,kpt,:,1]
#if np.abs(np.abs(vec_up)).sum()>np.abs(np.abs(vec_dn)).sum():
if np.linalg.norm(vec_up)>np.linalg.norm(vec_dn):
eval_up.append(eval)
else:
eval_dn.append(eval)
eval_up=np.array(eval_up)
eval_dn=np.array(eval_dn)
#if len(eval_up)!=4:
#print "!=4"
#print eval_up
#print eval_dn
#print self._eigenvecs[:,kpt,:,0]
#print self._eigenvecs[:,kpt,:,1]
if spin==0 or spin=='UP':
#return self._eigenvals[::2,kpt]
return eval_up
if spin==1 or spin=='DOWN':
return eval_dn
#return self._eigenvals[1::2,kpt]
def get_fermi_level(self):
if self._efermi==None:
print("Warning: Efermi not calculated yet. Using 0 instead.")
return 0.0
else:
return self._efermi
def get_bz_k_points(self):
return self._kpts
def get_ibz_k_points(self):
raise NotImplementedError
def get_k_point_weights(self):
return self._kweights
def get_number_of_spins(self):
return self._nspin
def get_dos(self,width=0.15,method='gaussian',npts=501):
"""
density of states.
:param width: smearing width
:param method: 'gaussian'| 'tetra'
:param npts: number of DOS energies.
:returns:
energies, dos. two ndarray.
TODO: implement spin resolved DOS.
"""
if method=='tetra':
dos=tetrahedronDosClass(self,width,npts=npts)
else:
dos=DOS(self,width,window=None,npts=npts)
return dos.get_energies(),dos.get_dos()
#def get_pdos()
def get_occupations(self,nel,width=0.2,refresh=False):
"""
calculate occupations of each eigenvalue.
the the shape of the occupation is the same as self._eigenvals.
[eig_k1,eigk2,...], each eig_k is a column with the length=nbands.
if nspin=2 and fix_spin, there are two fermi energies. NOTE: this conflicts with the DOS caluculation. FIXME.
:param nel: number of electrons. if fix_spin, the nel is a tuple of (nel_up,nel_dn)
:Returns:
self._occupations (np.ndarray) index:[band,kpt,orb,spin] if nspin==2 else [band,kpt,orb] same as eigenvec
"""
self._nel=nel
self.get_eigenvalues(refresh=refresh)
#print self._kweights
#print self._eigenvals
if self._nspin ==1 or not self.fix_spin:
occ=Occupations(nel,width,self._kweights,nspin=self._nspin)
self._occupations=occ.occupy(self._eigenvals)
self._efermi=occ.get_mu()
elif self._nspin==2 and self.fix_spin:
raise NotImplementedError("current implement on fix_spin is not correct.")
u"""FIXME: 这根本就不对,eig无法被直接区分为eig_up,eig_dn,不能这样处理"""
nel_up,nel_dn=nel
eig_up=self.eigenvals[::2]
eig_dn=self.eigenvals[1::2]
occ_up=Occupations(nel_up,width,self._kweights,nspin=1)
occupations_up=occ_up.occupy(eig_up)
efermi_up=occ_up.get_mu()
occ_dn=Occupations(nel_dn,width,self._kweights,nspin=1)
occupations_dn=occ_dn.occupy(eig_dn)
efermi_dn=occ_dn.get_mu()
self._occupations[::2]=occupations_up
self._occupations[1::2]=occupations_dn
self.efermi=(efermi_up,efermi_dn)
return self._occupations
def get_orbital_occupations(self,refresh=True):
"""
self.occupations:
if spin==1: the indexes are [orb];
if spin==2: the indexes are [orb,spin]
"""
A2= np.abs(self._eigenvecs)**2
#first sum over band
#print self._occupations.shape
#print(A2.sum(axis=0))
# occupations: index same as eigenval. [band, k]
ni,nk=self._occupations.shape
V2=np.zeros(A2.shape,dtype=float)
if self._nspin==1:
for i in range(ni):
for j in range(nk):
V2[i,j]=self._occupations[i,j]*A2[i,j]*self._kweights[j]
#V2=self._occupations.flatten()*A2.reshape(ni*nk,ni)/len(self._kweights)
#V2=(self._occupations*A2).sum(axis=(0,1))#/len(self._kweights)
V2=V2.sum(axis=(0,1))
elif self._nspin==2:
for i in range(ni):
for j in range(nk):
V2[i,j]=self._occupations[i,j]*A2[i,j]*self._kweights[j]
#V2=self._occupations.flatten()*A2.reshape(ni*nk,ni)/len(self._kweights)
#V2=(self._occupations*A2).sum(axis=(0,1))#/len(self._kweights)
self._nbar=V2.sum(axis=(0,1))
return self._nbar
def get_band_energy(self):
"""
Not free energy. total energy. sum of occupied levels.
"""
self.energy=(self._kweights*(self._occupations*self._eigenvals)).sum()
def get_free_energy(self):
pass
#raise NotImplementedError
def get_projection(self,orb,spin=0):
"""
get the projection to nth orb.
:param orb: the index of the orbital.
:param spin: if spin polarized, 0 or 1
:returns: eigenvecs[iband,ikpt]
"""
if self._nspin==2:
return self._eigenvecs[:,:,orb,spin]
else:
return self._eigenvecs[:,:,orb]
def plot_projection(self,orb,spin=0,color='blue',axis=None):
"""
plot the projection of the band to the basis
"""
kslist=[list(range(len(self._kpts)))]*self._norb
ekslist=self._eigenvals
wkslist=np.abs(self.get_projection(orb,spin=spin))
#fig,a = plt.subplots()
return plot_band_weight(kslist,ekslist,wkslist=wkslist,efermi=None,yrange=None,output=None,style='alpha',color=color,axis=axis,width=10,xticks=None)
def get_pdos(self):
"""
get projected dos to the basis set.
"""
raise NotImplementedError('Projected DOS is not yet implemented!')
def get_band_gap_and_edges(self, nel=None,kpts=None):
"""
return band gap, VBM and CBM. The VBM is defined as the highest nelth band energy.
"""
VBM=None
CBM=None
if nel is None:
nel=self._nel
if kpts is None:
kpts=self._kpts
if self._eigenvals is None:
eigvals = self.solve_all(k_list=kpts,eig_vectors=False)
VBM=np.maximum(eigvals[nel-1,:])
CBM=np.minimum(eigvals[nel,:])
band_gap=CBM-VBM
return band_gap, VBM, CBM
class myw90(w90):
"""
wrapper of pythtb.w90, the only difference is that it use mypythtb as model class.
"""
def model(self,zero_energy=0.0,min_hopping_norm=None,max_distance=None,ignorable_imaginary_part=None):
"""
This function returns :class:`pythtb.tb_model` object that can
be used to interpolate the band structure at arbitrary
k-point, analyze the wavefunction character, etc.
The tight-binding basis orbitals in the returned object are
maximally localized Wannier functions as computed by
Wannier90. The orbital character of these functions can be
inferred either from the *projections* block in the
*prefix*.win or from the *prefix*.nnkp file. Please note that
the character of the maximally localized Wannier functions is
not exactly the same as that specified by the initial
projections. One way to ensure that the Wannier functions are
as close to the initial projections as possible is to first
choose a good set of initial projections (for these initial
and final spread should not differ more than 20%) and then
perform another Wannier90 run setting *num_iter=0* in the
*prefix*.win file.
Number of spin components is always set to 1, even if the
underlying DFT calculation includes spin. Please refer to the
*projections* block or the *prefix*.nnkp file to see which
orbitals correspond to which spin.
Locations of the orbitals in the returned
:class:`pythtb.tb_model` object are equal to the centers of
the Wannier functions computed by Wannier90.
:param zero_energy: Sets the zero of the energy in the band
structure. This value is typically set to the Fermi level
computed by the density-functional code (or to the top of the
valence band). Units are electron-volts.
:param min_hopping_norm: Hopping terms read from Wannier90 with
complex norm less than *min_hopping_norm* will not be included
in the returned tight-binding model. This parameters is
specified in electron-volts. By default all terms regardless
of their norm are included.
:param max_distance: Hopping terms from site *i* to site *j+R* will
be ignored if the distance from orbital *i* to *j+R* is larger
than *max_distance*. This parameter is given in Angstroms.
By default all terms regardless of the distance are included.
:param ignorable_imaginary_part: The hopping term will be assumed to
be exactly real if the absolute value of the imaginary part as
computed by Wannier90 is less than *ignorable_imaginary_part*.
By default imaginary terms are not ignored. Units are again
eV.
:returns:
* **tb** -- The object of type :class:`pythtb.tb_model` that can be used to
interpolate Wannier90 band structure to an arbitrary k-point as well
as to analyze the character of the wavefunctions. Please note
Example usage::
# returns tb_model with all hopping parameters
my_model=silicon.model()
# simplified model that contains only hopping terms above 0.01 eV
my_model_simple=silicon.model(min_hopping_norm=0.01)
my_model_simple.display()
"""
# make the model object
tb=my_tb_model(3,3,self.lat,self.red_cen)
# remember that this model was computed from w90
tb._assume_position_operator_diagonal=False
# add onsite energies
onsite=np.zeros(self.num_wan,dtype=float)
for i in range(self.num_wan):
tmp_ham=self.ham_r[(0,0,0)]["h"][i,i]/float(self.ham_r[(0,0,0)]["deg"])
onsite[i]=tmp_ham.real
if np.abs(tmp_ham.imag)>1.0E-9:
raise Exception("Onsite terms should be real!")
tb.set_onsite(onsite-zero_energy)
# add hopping terms
for R in self.ham_r:
# avoid double counting
use_this_R=True
# avoid onsite terms
if R[0]==0 and R[1]==0 and R[2]==0:
avoid_diagonal=True
else:
avoid_diagonal=False
# avoid taking both R and -R
if R[0]!=0:
if R[0]<0:
use_this_R=False
else:
if R[1]!=0:
if R[1]<0:
use_this_R=False
else:
if R[2]<0:
use_this_R=False
# get R vector
vecR=_red_to_cart((self.lat[0],self.lat[1],self.lat[2]),[R])[0]
# scan through unique R
if use_this_R==True:
for i in range(self.num_wan):
vec_i=self.xyz_cen[i]
for j in range(self.num_wan):
vec_j=self.xyz_cen[j]
# get distance between orbitals
dist_ijR=np.sqrt(np.dot(-vec_i+vec_j+vecR,
-vec_i+vec_j+vecR))
# to prevent double counting
if not (avoid_diagonal==True and j<=i):
# only if distance between orbitals is small enough
if max_distance is not None:
if dist_ijR>max_distance:
continue
# divide the matrix element from w90 with the degeneracy
tmp_ham=self.ham_r[R]["h"][i,j]/float(self.ham_r[R]["deg"])
# only if big enough matrix element
if min_hopping_norm is not None:
if np.abs(tmp_ham)<min_hopping_norm:
continue
# remove imaginary part if needed
if ignorable_imaginary_part is not None:
if np.abs(tmp_ham.imag)<ignorable_imaginary_part:
tmp_ham=tmp_ham.real+0.0j
# set the hopping term
tb.set_hop(tmp_ham,i,j,list(R))
return tb
|
mailhexu/pyDFTutils
|
pyDFTutils/tightbinding/mypythTB.py
|
Python
|
lgpl-3.0
| 18,994
|
[
"ASE",
"Gaussian",
"Wannier90"
] |
f1d49189e92080a995e295c006d7415c338cc5b65e79e753a8f7effd507ac081
|
from galaxy.web.base.controller import *
from galaxy.web.framework.helpers import time_ago, iff, grids
from galaxy.model.orm import *
from galaxy.datatypes import sniff
from galaxy import util
from galaxy.util.streamball import StreamBall
import logging, tempfile, zipfile, tarfile, os, sys
from galaxy.web.form_builder import *
from datetime import datetime, timedelta
from galaxy.web.controllers.forms import get_all_forms
from sqlalchemy.sql.expression import func, and_
from sqlalchemy.sql import select
log = logging.getLogger( __name__ )
#
# ---- Request Grid ------------------------------------------------------------
#
class RequestsGrid( grids.Grid ):
# Custom column types
class NameColumn( grids.TextColumn ):
def get_value(self, trans, grid, request):
return request.name
class DescriptionColumn( grids.TextColumn ):
def get_value(self, trans, grid, request):
return request.desc
class SamplesColumn( grids.GridColumn ):
def get_value(self, trans, grid, request):
return str(len(request.samples))
class TypeColumn( grids.TextColumn ):
def get_value(self, trans, grid, request):
return request.type.name
class LastUpdateColumn( grids.TextColumn ):
def get_value(self, trans, grid, request):
delta = datetime.utcnow() - request.update_time
if delta > timedelta( minutes=60 ):
last_update = '%s hours' % int( delta.seconds / 60 / 60 )
else:
last_update = '%s minutes' % int( delta.seconds / 60 )
return last_update
class StateColumn( grids.GridColumn ):
def __init__( self, col_name, key, model_class, event_class, filterable, link ):
grids.GridColumn.__init__(self, col_name, key=key, model_class=model_class, filterable=filterable, link=link)
self.event_class = event_class
def get_value(self, trans, grid, request):
if request.state() == request.states.REJECTED:
return '<div class="count-box state-color-error">%s</div>' % request.state()
elif request.state() == request.states.NEW:
return '<div class="count-box state-color-queued">%s</div>' % request.state()
elif request.state() == request.states.SUBMITTED:
return '<div class="count-box state-color-running">%s</div>' % request.state()
elif request.state() == request.states.COMPLETE:
return '<div class="count-box state-color-ok">%s</div>' % request.state()
return request.state()
def filter( self, db_session, query, column_filter ):
""" Modify query to filter request by state. """
if column_filter == "All":
return query
if column_filter:
# select r.id, r.name, re.id, re.state
# from request as r, request_event as re
# where re.request_id=r.id and re.state='Complete' and re.create_time in
# (select MAX( create_time)
# from request_event
# group by request_id)
q = query.join(self.event_class.table)\
.filter( self.model_class.table.c.id==self.event_class.table.c.request_id )\
.filter( self.event_class.table.c.state==column_filter )\
.filter( self.event_class.table.c.id.in_(select(columns=[func.max(self.event_class.table.c.id)],
from_obj=self.event_class.table,
group_by=self.event_class.table.c.request_id)))
#print column_filter, q
return q
def get_accepted_filters( self ):
""" Returns a list of accepted filters for this column. """
accepted_filter_labels_and_vals = [ model.Request.states.NEW,
model.Request.states.REJECTED,
model.Request.states.SUBMITTED,
model.Request.states.COMPLETE,
"All"]
accepted_filters = []
for val in accepted_filter_labels_and_vals:
label = val.lower()
args = { self.key: val }
accepted_filters.append( grids.GridColumnFilter( label, args) )
return accepted_filters
class UserColumn( grids.TextColumn ):
def get_value(self, trans, grid, request):
return request.user.email
class DeletedColumn( grids.GridColumn ):
def get_accepted_filters( self ):
""" Returns a list of accepted filters for this column. """
accepted_filter_labels_and_vals = { "active" : "False", "deleted" : "True", "all": "All" }
accepted_filters = []
for label, val in accepted_filter_labels_and_vals.items():
args = { self.key: val }
accepted_filters.append( grids.GridColumnFilter( label, args) )
return accepted_filters
# Grid definition
title = "Sequencing Requests"
template = "admin/requests/grid.mako"
model_class = model.Request
default_sort_key = "-create_time"
num_rows_per_page = 50
preserve_state = True
use_paging = True
default_filter = dict( deleted="False", state=model.Request.states.SUBMITTED)
columns = [
NameColumn( "Name",
key="name",
model_class=model.Request,
link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ),
attach_popup=True,
filterable="advanced" ),
DescriptionColumn( "Description",
key='desc',
model_class=model.Request,
filterable="advanced" ),
SamplesColumn( "Sample(s)",
link=( lambda item: iff( item.deleted, None, dict( operation="show_request", id=item.id ) ) ), ),
TypeColumn( "Type",
link=( lambda item: iff( item.deleted, None, dict( operation="view_type", id=item.type.id ) ) ), ),
LastUpdateColumn( "Last update",
format=time_ago ),
DeletedColumn( "Deleted",
key="deleted",
visible=False,
filterable="advanced" ),
StateColumn( "State",
model_class=model.Request,
event_class=model.RequestEvent,
key='state',
filterable="advanced",
link=( lambda item: iff( item.deleted, None, dict( operation="events", id=item.id ) ) ),
),
UserColumn( "User",
#key='user.email',
model_class=model.Request)
]
columns.append( grids.MulticolFilterColumn( "Search",
cols_to_filter=[ columns[0], columns[1], columns[6] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = [
grids.GridOperation( "Submit", allow_multiple=False, condition=( lambda item: not item.deleted and item.unsubmitted() and item.samples ),
confirm="More samples cannot be added to this request once it is submitted. Click OK to submit." ),
grids.GridOperation( "Edit", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
grids.GridOperation( "Reject", allow_multiple=False, condition=( lambda item: not item.deleted and item.submitted() ) ),
grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted ) ),
grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ),
]
global_actions = [
grids.GridAction( "Create new request", dict( controller='requests_admin',
action='new',
select_request_type='True' ) )
]
#
# ---- Request Type Gridr ------------------------------------------------------
#
class RequestTypeGrid( grids.Grid ):
# Custom column types
class NameColumn( grids.TextColumn ):
def get_value(self, trans, grid, request_type):
return request_type.name
class DescriptionColumn( grids.TextColumn ):
def get_value(self, trans, grid, request_type):
return request_type.desc
class RequestFormColumn( grids.TextColumn ):
def get_value(self, trans, grid, request_type):
return request_type.request_form.name
class SampleFormColumn( grids.TextColumn ):
def get_value(self, trans, grid, request_type):
return request_type.sample_form.name
class DeletedColumn( grids.GridColumn ):
def get_accepted_filters( self ):
""" Returns a list of accepted filters for this column. """
accepted_filter_labels_and_vals = { "active" : "False", "deleted" : "True", "all": "All" }
accepted_filters = []
for label, val in accepted_filter_labels_and_vals.items():
args = { self.key: val }
accepted_filters.append( grids.GridColumnFilter( label, args) )
return accepted_filters
# Grid definition
title = "Requests Types"
template = "admin/requests/manage_request_types.mako"
model_class = model.RequestType
default_sort_key = "-create_time"
num_rows_per_page = 50
preserve_state = True
use_paging = True
default_filter = dict( deleted="False" )
columns = [
NameColumn( "Name",
key="name",
model_class=model.RequestType,
link=( lambda item: iff( item.deleted, None, dict( operation="view", id=item.id ) ) ),
attach_popup=True,
filterable="advanced" ),
DescriptionColumn( "Description",
key='desc',
model_class=model.Request,
filterable="advanced" ),
RequestFormColumn( "Request Form",
link=( lambda item: iff( item.deleted, None, dict( operation="view_form", id=item.request_form.id ) ) ), ),
SampleFormColumn( "Sample Form",
link=( lambda item: iff( item.deleted, None, dict( operation="view_form", id=item.sample_form.id ) ) ), ),
DeletedColumn( "Deleted",
key="deleted",
visible=False,
filterable="advanced" )
]
columns.append( grids.MulticolFilterColumn( "Search",
cols_to_filter=[ columns[0], columns[1] ],
key="free-text-search",
visible=False,
filterable="standard" ) )
operations = [
#grids.GridOperation( "Update", allow_multiple=False, condition=( lambda item: not item.deleted ) ),
grids.GridOperation( "Delete", allow_multiple=True, condition=( lambda item: not item.deleted ) ),
grids.GridOperation( "Undelete", condition=( lambda item: item.deleted ) ),
]
global_actions = [
grids.GridAction( "Create new request type", dict( controller='requests_admin',
action='create_request_type' ) )
]
#
# ---- Request Controller ------------------------------------------------------
#
class Requests( BaseController ):
request_grid = RequestsGrid()
requesttype_grid = RequestTypeGrid()
@web.expose
@web.require_admin
def index( self, trans ):
return trans.fill_template( "/admin/requests/index.mako" )
@web.expose
@web.require_admin
def list( self, trans, **kwd ):
'''
List all request made by the current user
'''
if 'operation' in kwd:
operation = kwd['operation'].lower()
if not kwd.get( 'id', None ):
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message="Invalid request ID") )
if operation == "show_request":
return self.__show_request( trans, **kwd )
elif operation == "submit":
return self.__submit_request( trans, **kwd )
elif operation == "delete":
return self.__delete_request( trans, **kwd )
elif operation == "undelete":
return self.__undelete_request( trans, **kwd )
elif operation == "edit":
return self.__edit_request( trans, **kwd )
elif operation == "reject":
return self.__reject_request( trans, **kwd )
elif operation == "events":
return self.__request_events( trans, **kwd )
elif operation == "view_type":
return self.__view_request_type( trans, **kwd )
# Render the grid view
return self.request_grid( trans, **kwd )
def __show_request(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
add_sample = params.get('add_sample', False)
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message="Invalid request ID") )
current_samples = []
for s in request.samples:
current_samples.append([s.name, s.values.content])
if add_sample:
current_samples.append(['Sample_%i' % (len(current_samples)+1),['' for field in request.type.sample_form.fields]])
return trans.fill_template( '/admin/requests/show_request.mako',
request=request,
request_details=self.request_details(trans, request.id),
current_samples = current_samples,
sample_copy=self.__copy_sample(current_samples),
details='hide', edit_mode='False',
msg=msg, messagetype=messagetype )
@web.expose
@web.require_admin
def edit(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
try:
request = trans.sa_session.query( trans.app.model.Request ).get( int( params.get( 'request_id', None ) ) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message="Invalid request ID",
**kwd) )
if params.get('show', False) == 'True':
return self.__edit_request(trans, id=trans.security.encode_id(request.id), **kwd)
elif params.get('save_changes_request_button', False) == 'Save changes' \
or params.get('edit_samples_button', False) == 'Edit samples':
request_type = trans.sa_session.query( trans.app.model.RequestType ).get( int( params.select_request_type ) )
if not util.restore_text(params.get('name', '')):
msg = 'Please enter the <b>Name</b> of the request'
kwd['messagetype'] = 'error'
kwd['msg'] = msg
kwd['show'] = 'True'
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='edit',
**kwd) )
request = self.__save_request(trans, request, **kwd)
msg = 'The changes made to the request named %s has been saved' % request.name
if params.get('save_changes_request_button', False) == 'Save changes':
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
message=msg ,
status='done') )
elif params.get('edit_samples_button', False) == 'Edit samples':
new_kwd = {}
new_kwd['request_id'] = request.id
new_kwd['edit_samples_button'] = 'Edit samples'
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='show_request',
msg=msg ,
messagetype='done',
**new_kwd) )
elif params.get('refresh', False) == 'true':
return self.__edit_request(trans, id=trans.security.encode_id(request.id), **kwd)
def __edit_request(self, trans, **kwd):
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
msg = "Invalid request ID"
log.warn( msg )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg) )
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
select_request_type = self.__select_request_type(trans, request.type.id)
# list of widgets to be rendered on the request form
widgets = []
if util.restore_text( params.get( 'name', '' ) ):
name = util.restore_text( params.get( 'name', '' ) )
else:
name = request.name
widgets.append(dict(label='Name',
widget=TextField('name', 40, name),
helptext='(Required)'))
if util.restore_text( params.get( 'desc', '' ) ):
desc = util.restore_text( params.get( 'desc', '' ) )
else:
desc = request.desc
widgets.append(dict(label='Description',
widget=TextField('desc', 40, desc),
helptext='(Optional)'))
# libraries selectbox
libui = self.__library_ui(trans, request.user, request, **kwd)
widgets = widgets + libui
widgets = widgets + request.type.request_form.get_widgets( request.user, request.values.content, **kwd )
return trans.fill_template( '/admin/requests/edit_request.mako',
select_request_type=select_request_type,
request_type=request.type,
request=request,
widgets=widgets,
msg=msg,
messagetype=messagetype)
return self.__show_request_form(trans)
def __delete_request(self, trans, **kwd):
id_list = util.listify( kwd['id'] )
for id in id_list:
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(id) )
except:
msg = "Invalid request ID"
log.warn( msg )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
request.deleted = True
trans.sa_session.add( request )
trans.sa_session.flush()
msg = '%i request(s) has been deleted.' % len(id_list)
status = 'done'
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status=status,
message=msg) )
def __undelete_request(self, trans, **kwd):
id_list = util.listify( kwd['id'] )
for id in id_list:
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(id) )
except:
msg = "Invalid request ID"
log.warn( msg )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
request.deleted = False
trans.sa_session.add( request )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='done',
message='%i request(s) has been undeleted.' % len(id_list) ) )
def __submit_request(self, trans, **kwd):
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
msg = "Invalid request ID"
log.warn( msg )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
msg = self.__validate(trans, request)
if msg:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
operation='edit',
messagetype = 'error',
msg=msg,
id=trans.security.encode_id(request.id) ) )
# change the request state to 'Submitted'
if request.user.email is not trans.user:
comments = "Request moved to 'Submitted' state by admin (%s) on behalf of %s." % (trans.user.email, request.user.email)
else:
comments = ""
event = trans.app.model.RequestEvent(request, request.states.SUBMITTED, comments)
trans.sa_session.add( event )
trans.sa_session.flush()
# change the state of each of the samples of thus request
new_state = request.type.states[0]
for s in request.samples:
event = trans.app.model.SampleEvent(s, new_state, 'Samples submitted to the system')
trans.sa_session.add( event )
trans.sa_session.add( request )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
id=trans.security.encode_id(request.id),
status='done',
message='The request <b>%s</b> has been submitted.' % request.name
) )
def __reject_request(self, trans, **kwd):
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
msg = "Invalid request ID"
log.warn( msg )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
return trans.fill_template( '/admin/requests/reject.mako',
request=request)
@web.expose
@web.require_admin
def reject(self, trans, **kwd):
params = util.Params( kwd )
if params.get('cancel_reject_button', False):
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
operation='show_request',
id=kwd['id']))
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
msg = "Invalid request ID"
log.warn( msg )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
# validate
if not params.get('comment', ''):
return trans.fill_template( '/admin/requests/reject.mako',
request=request, messagetype='error',
msg='A comment is required for rejecting a request.')
# create an event with state 'Rejected' for this request
comments = util.restore_text( params.comment )
event = trans.app.model.RequestEvent(request, request.states.REJECTED, comments)
trans.sa_session.add( event )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='done',
message='Request <b>%s</b> has been rejected.' % request.name) )
def __request_events(self, trans, **kwd):
try:
request = trans.sa_session.query( trans.app.model.Request ).get( trans.security.decode_id(kwd['id']) )
except:
msg = "Invalid request ID"
log.warn( msg )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
events_list = []
all_events = request.events
for event in all_events:
delta = datetime.utcnow() - event.update_time
if delta > timedelta( minutes=60 ):
last_update = '%s hours' % int( delta.seconds / 60 / 60 )
else:
last_update = '%s minutes' % int( delta.seconds / 60 )
events_list.append((event.state, last_update, event.comment))
return trans.fill_template( '/admin/requests/events.mako',
events_list=events_list, request=request)
#
#---- Request Creation ----------------------------------------------------------
#
def __select_request_type(self, trans, rtid):
requesttype_list = trans.sa_session.query( trans.app.model.RequestType )\
.order_by( trans.app.model.RequestType.name.asc() )
rt_ids = ['none']
for rt in requesttype_list:
if not rt.deleted:
rt_ids.append(str(rt.id))
select_reqtype = SelectField('select_request_type',
refresh_on_change=True,
refresh_on_change_values=rt_ids[1:])
if rtid == 'none':
select_reqtype.add_option('Select one', 'none', selected=True)
else:
select_reqtype.add_option('Select one', 'none')
for rt in requesttype_list:
if not rt.deleted:
if rtid == rt.id:
select_reqtype.add_option(rt.name, rt.id, selected=True)
else:
select_reqtype.add_option(rt.name, rt.id)
return select_reqtype
@web.expose
@web.require_admin
def new(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
if params.get('select_request_type', False) == 'True':
return trans.fill_template( '/admin/requests/new_request.mako',
select_request_type=self.__select_request_type(trans, 'none'),
widgets=[],
msg=msg,
messagetype=messagetype)
elif params.get('create', False) == 'True':
if params.get('create_request_button', False) == 'Save' \
or params.get('create_request_samples_button', False) == 'Add samples':
request_type = trans.sa_session.query( trans.app.model.RequestType ).get( int( params.select_request_type ) )
if not util.restore_text(params.get('name', '')) \
or util.restore_text(params.get('select_user', '')) == unicode('none'):
msg = 'Please enter the <b>Name</b> of the request and the <b>user</b> on behalf of whom this request will be submitted before saving this request'
kwd['create'] = 'True'
kwd['messagetype'] = 'error'
kwd['msg'] = msg
kwd['create_request_button'] = None
kwd['create_request_samples_button'] = None
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='new',
**kwd) )
request = self.__save_request(trans, None, **kwd)
msg = 'The new request named %s has been created' % request.name
if params.get('create_request_button', False) == 'Save':
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
message=msg ,
status='done') )
elif params.get('create_request_samples_button', False) == 'Add samples':
new_kwd = {}
new_kwd['id'] = trans.security.encode_id(request.id)
new_kwd['operation'] = 'show_request'
new_kwd['add_sample'] = True
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
message=msg ,
status='done',
**new_kwd) )
else:
return self.__show_request_form(trans, **kwd)
elif params.get('refresh', False) == 'true':
return self.__show_request_form(trans, **kwd)
def __show_request_form(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
try:
request_type = trans.sa_session.query( trans.app.model.RequestType ).get( int( params.select_request_type ) )
except:
return trans.fill_template( '/admin/requests/new_request.mako',
select_request_type=self.__select_request_type(trans, 'none'),
widgets=[],
msg=msg,
messagetype=messagetype)
form_values = None
select_request_type = self.__select_request_type(trans, request_type.id)
# user
user_id = params.get( 'select_user', 'none' )
try:
user = trans.sa_session.query( trans.app.model.User ).get( int( user_id ) )
except:
user = None
# list of widgets to be rendered on the request form
widgets = []
widgets.append(dict(label='Select user',
widget=self.__select_user(trans, user_id),
helptext='The request would be submitted on behalf of this user (Required)'))
widgets.append(dict(label='Name of the Experiment',
widget=TextField('name', 40,
util.restore_text( params.get( 'name', '' ) )),
helptext='(Required)'))
widgets.append(dict(label='Description',
widget=TextField('desc', 40,
util.restore_text( params.get( 'desc', '' ) )),
helptext='(Optional)'))
# libraries selectbox
libui = self.__library_ui(trans, user, **kwd)
widgets = widgets + libui
widgets = widgets + request_type.request_form.get_widgets( user, **kwd )
return trans.fill_template( '/admin/requests/new_request.mako',
select_request_type=select_request_type,
request_type=request_type,
widgets=widgets,
msg=msg,
messagetype=messagetype)
def __select_user(self, trans, userid):
user_list = trans.sa_session.query( trans.app.model.User )\
.order_by( trans.app.model.User.email.asc() )
user_ids = ['none']
for user in user_list:
if not user.deleted:
user_ids.append(str(user.id))
select_user = SelectField('select_user',
refresh_on_change=True,
refresh_on_change_values=user_ids[1:])
if userid == 'none':
select_user.add_option('Select one', 'none', selected=True)
else:
select_user.add_option('Select one', 'none')
for user in user_list:
if not user.deleted:
if userid == str(user.id):
select_user.add_option(user.email, user.id, selected=True)
else:
select_user.add_option(user.email, user.id)
return select_user
def __library_ui(self, trans, user, request=None, **kwd):
'''
This method creates the data library & folder selectbox for new &
editing requests. First we get a list of all the libraries accessible to
the current user and display it in a selectbox. If the user has select an
existing library then display all the accessible sub folders of the selected
data library.
'''
params = util.Params( kwd )
lib_id = params.get( 'library_id', 'none' )
# if editing a request and the user has already associated a library to
# this request, then set the selected_lib to the request.library
selected_lib = None
if request and lib_id == 'none':
if request.library:
lib_id = str(request.library.id)
selected_lib = request.library
# if new request no user is selected initially, none of the libraries are
# listed in the selectfield
if not user:
libraries = {}
else:
# get all permitted libraries for this user
all_libraries = trans.sa_session.query( trans.app.model.Library ) \
.filter( trans.app.model.Library.table.c.deleted == False ) \
.order_by( trans.app.model.Library.name )
roles = user.all_roles()
actions_to_check = [ trans.app.security_agent.permitted_actions.LIBRARY_ADD ]
# The libraries dictionary looks like: { library : '1,2' }, library : '3' }
# Its keys are the libraries that should be displayed for the current user and whose values are a
# string of comma-separated folder ids, of the associated folders the should NOT be displayed.
# The folders that should not be displayed may not be a complete list, but it is ultimately passed
# to the calling method to keep from re-checking the same folders when the library / folder
# select lists are rendered.
libraries = {}
for library in all_libraries:
can_show, hidden_folder_ids = trans.app.security_agent.show_library_item( user, roles, library, actions_to_check )
if can_show:
libraries[ library ] = hidden_folder_ids
# create data library selectbox with refresh on change enabled
lib_id_list = ['new'] + [str(lib.id) for lib in libraries.keys()]
lib_list = SelectField( 'library_id', refresh_on_change=True, refresh_on_change_values=lib_id_list )
# fill up the options in the Library selectbox
# first option 'none' is the value for "Select one" option
if lib_id == 'none':
lib_list.add_option('Select one', 'none', selected=True)
else:
lib_list.add_option('Select one', 'none')
# all the libraries available to the selected user
for lib, hidden_folder_ids in libraries.items():
if str(lib.id) == lib_id:
lib_list.add_option(lib.name, lib.id, selected=True)
selected_lib, selected_hidden_folder_ids = lib, hidden_folder_ids.split(',')
else:
lib_list.add_option(lib.name, lib.id)
lib_list.refresh_on_change_values.append(lib.id)
# new data library option
if lib_id == 'new':
lib_list.add_option('Create a new data library', 'new', selected=True)
else:
lib_list.add_option('Create a new data library', 'new')
# widget
lib_widget = dict(label='Data library',
widget=lib_list,
helptext='Data library where the resultant dataset will be stored.')
# show the folder widget only if the user has selected a valid library above
if selected_lib:
# when editing a request, either the user has already selected a subfolder or not
if request:
if request.folder:
current_fid = request.folder.id
else:
# when a folder not yet associated with the request then the
# the current folder is set to the root_folder of the
# parent data library if present.
if request.library:
current_fid = request.library.root_folder.id
else:
current_fid = params.get( 'folder_id', 'none' )
else:
current_fid = params.get( 'folder_id', 'none' )
# create the folder selectbox
folder_list = SelectField( 'folder_id')
# first option
if lib_id == 'none':
folder_list.add_option('Select one', 'none', selected=True)
else:
folder_list.add_option('Select one', 'none')
# get all show-able folders for the selected library
showable_folders = trans.app.security_agent.get_showable_folders( user, roles,
selected_lib,
actions_to_check,
selected_hidden_folder_ids )
for f in showable_folders:
if str(f.id) == str(current_fid):
folder_list.add_option(f.name, f.id, selected=True)
else:
folder_list.add_option(f.name, f.id)
# folder widget
folder_widget = dict(label='Folder',
widget=folder_list,
helptext='Folder of the selected data library where the resultant dataset will be stored.')
if lib_id == 'new':
new_lib = dict(label='Create a new data library',
widget=TextField('new_library_name', 40,
util.restore_text( params.get( 'new_library_name', '' ) )),
helptext='Enter a name here to request a new data library')
return [lib_widget, new_lib]
else:
if selected_lib:
return [lib_widget, folder_widget]
else:
return [lib_widget]
def __validate(self, trans, request):
'''
Validates the request entered by the user
'''
if not request.samples:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
operation='show_request',
message='Please add one or more samples to this request before submitting.',
status='error',
id=trans.security.encode_id(request.id)) )
empty_fields = []
# check rest of the fields of the form
for index, field in enumerate(request.type.request_form.fields):
if field['required'] == 'required' and request.values.content[index] in ['', None]:
empty_fields.append(field['label'])
if empty_fields:
msg = 'Fill the following fields of the request <b>%s</b> before submitting<br/>' % request.name
for ef in empty_fields:
msg = msg + '<b>' +ef + '</b><br/>'
return msg
return None
def __save_request(self, trans, request=None, **kwd):
'''
This method saves a new request if request_id is None.
'''
params = util.Params( kwd )
request_type = trans.sa_session.query( trans.app.model.RequestType ).get( int( params.select_request_type ) )
if request:
user = request.user
else:
user = trans.sa_session.query( trans.app.model.User ).get( int( params.get( 'select_user', '' ) ) )
name = util.restore_text(params.get('name', ''))
desc = util.restore_text(params.get('desc', ''))
# library
try:
library = trans.sa_session.query( trans.app.model.Library ).get( int( params.get( 'library_id', None ) ) )
except:
library = None
try:
folder = trans.sa_session.query( trans.app.model.LibraryFolder ).get( int( params.get( 'folder_id', None ) ) )
except:
if library:
folder = library.root_folder
else:
folder = None
# fields
values = []
for index, field in enumerate(request_type.request_form.fields):
if field['type'] == 'AddressField':
value = util.restore_text(params.get('field_%i' % index, ''))
if value == 'new':
# save this new address in the list of this user's addresses
user_address = trans.app.model.UserAddress( user=user )
user_address.desc = util.restore_text(params.get('field_%i_short_desc' % index, ''))
user_address.name = util.restore_text(params.get('field_%i_name' % index, ''))
user_address.institution = util.restore_text(params.get('field_%i_institution' % index, ''))
user_address.address = util.restore_text(params.get('field_%i_address1' % index, ''))+' '+util.restore_text(params.get('field_%i_address2' % index, ''))
user_address.city = util.restore_text(params.get('field_%i_city' % index, ''))
user_address.state = util.restore_text(params.get('field_%i_state' % index, ''))
user_address.postal_code = util.restore_text(params.get('field_%i_postal_code' % index, ''))
user_address.country = util.restore_text(params.get('field_%i_country' % index, ''))
user_address.phone = util.restore_text(params.get('field_%i_phone' % index, ''))
trans.sa_session.add( user_address )
trans.sa_session.flush()
trans.sa_session.refresh( trans.user )
values.append(int(user_address.id))
elif value == unicode('none'):
values.append('')
else:
values.append(int(value))
elif field['type'] == 'CheckboxField':
values.append(CheckboxField.is_checked( params.get('field_%i' % index, '') ))
else:
values.append(util.restore_text(params.get('field_%i' % index, '')))
form_values = trans.app.model.FormValues(request_type.request_form, values)
trans.sa_session.add( form_values )
trans.sa_session.flush()
if not request:
request = trans.app.model.Request(name, desc, request_type,
user, form_values,
library=library, folder=folder)
trans.sa_session.add( request )
trans.sa_session.flush()
trans.sa_session.refresh( request )
# create an event with state 'New' for this new request
if request.user.email is not trans.user:
comments = "Request created by admin (%s) on behalf of %s." % (trans.user.email, request.user.email)
else:
comments = "Request created."
event = trans.app.model.RequestEvent(request, request.states.NEW, comments)
trans.sa_session.add( event )
trans.sa_session.flush()
else:
request.name = name
request.desc = desc
request.type = request_type
request.user = user
request.values = form_values
request.library = library
request.folder = folder
trans.sa_session.add( request )
trans.sa_session.flush()
return request
#
#---- Request Page ----------------------------------------------------------
#
def __update_samples(self, request, **kwd):
'''
This method retrieves all the user entered sample information and
returns an list of all the samples and their field values
'''
params = util.Params( kwd )
current_samples = []
for s in request.samples:
current_samples.append([s.name, s.values.content])
index = len(request.samples)
while True:
if params.get( 'sample_%i_name' % index, '' ):
sample_index = index
sample_name = util.restore_text( params.get( 'sample_%i_name' % sample_index, '' ) )
sample_values = []
for field_index in range(len(request.type.sample_form.fields)):
sample_values.append(util.restore_text( params.get( 'sample_%i_field_%i' % (sample_index, field_index), '' ) ))
current_samples.append([sample_name, sample_values])
index = index + 1
else:
break
details = params.get( 'details', 'hide' )
edit_mode = params.get( 'edit_mode', 'False' )
return current_samples, details, edit_mode
def __copy_sample(self, current_samples):
copy_list = SelectField('copy_sample')
copy_list.add_option('None', -1, selected=True)
for i, s in enumerate(current_samples):
copy_list.add_option(s[0], i)
return copy_list
@web.expose
@web.require_login( "create/submit sequencing requests" )
def show_request(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
try:
request = trans.sa_session.query( trans.app.model.Request ).get( int( params.get( 'request_id', None ) ) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message="Invalid request ID",
**kwd) )
# get the user entered sample details
current_samples, details, edit_mode = self.__update_samples( request, **kwd )
if params.get('import_samples_button', False) == 'Import samples':
try:
file_obj = params.get('file_data', '')
import csv
reader = csv.reader(file_obj.file)
for row in reader:
current_samples.append([row[0], row[1:]])
return trans.fill_template( '/admin/requests/show_request.mako',
request=request,
request_details=self.request_details(trans, request.id),
current_samples=current_samples,
sample_copy=self.__copy_sample(current_samples),
details=details,
edit_mode=edit_mode)
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message='Error in importing samples file',
**kwd))
elif params.get('add_sample_button', False) == 'Add New':
# add an empty or filled sample
# if the user has selected a sample no. to copy then copy the contents
# of the src sample to the new sample else an empty sample
src_sample_index = int(params.get( 'copy_sample', -1 ))
if src_sample_index == -1:
# empty sample
current_samples.append(['Sample_%i' % (len(current_samples)+1),['' for field in request.type.sample_form.fields]])
else:
current_samples.append([current_samples[src_sample_index][0]+'_%i' % (len(current_samples)+1),
[val for val in current_samples[src_sample_index][1]]])
return trans.fill_template( '/admin/requests/show_request.mako',
request=request,
request_details=self.request_details(trans, request.id),
current_samples=current_samples,
sample_copy=self.__copy_sample(current_samples),
details=details,
edit_mode=edit_mode)
elif params.get('save_samples_button', False) == 'Save':
# check for duplicate sample names
msg = ''
for index in range(len(current_samples)-len(request.samples)):
sample_index = index + len(request.samples)
sample_name = current_samples[sample_index][0]
if not sample_name.strip():
msg = 'Please enter the name of sample number %i' % sample_index
break
count = 0
for i in range(len(current_samples)):
if sample_name == current_samples[i][0]:
count = count + 1
if count > 1:
msg = "This request has <b>%i</b> samples with the name <b>%s</b>.\nSamples belonging to a request must have unique names." % (count, sample_name)
break
if msg:
return trans.fill_template( '/admin/requests/show_request.mako',
request=request,
request_details=self.request_details(trans, request.id),
current_samples = current_samples,
sample_copy=self.__copy_sample(current_samples),
details=details, edit_mode=edit_mode,
messagetype='error', msg=msg)
# save all the new/unsaved samples entered by the user
if edit_mode == 'False':
for index in range(len(current_samples)-len(request.samples)):
sample_index = len(request.samples)
sample_name = util.restore_text( params.get( 'sample_%i_name' % sample_index, '' ) )
sample_values = []
for field_index in range(len(request.type.sample_form.fields)):
sample_values.append(util.restore_text( params.get( 'sample_%i_field_%i' % (sample_index, field_index), '' ) ))
form_values = trans.app.model.FormValues(request.type.sample_form, sample_values)
trans.sa_session.add( form_values )
trans.sa_session.flush()
s = trans.app.model.Sample(sample_name, '', request, form_values)
trans.sa_session.add( s )
trans.sa_session.flush()
else:
for index in range(len(current_samples)):
sample_index = index
sample_name = current_samples[sample_index][0]
new_sample_name = util.restore_text( params.get( 'sample_%i_name' % sample_index, '' ) )
sample_values = []
for field_index in range(len(request.type.sample_form.fields)):
sample_values.append(util.restore_text( params.get( 'sample_%i_field_%i' % (sample_index, field_index), '' ) ))
sample = request.has_sample(sample_name)
if sample:
form_values = trans.sa_session.query( trans.app.model.FormValues ).get( sample.values.id )
form_values.content = sample_values
trans.sa_session.add( form_values )
trans.sa_session.flush()
sample.name = new_sample_name
trans.sa_session.add( sample )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
operation='show_request',
id=trans.security.encode_id(request.id)) )
elif params.get('edit_samples_button', False) == 'Edit samples':
edit_mode = 'True'
return trans.fill_template( '/admin/requests/show_request.mako',
request=request,
request_details=self.request_details(trans, request.id),
current_samples=current_samples,
sample_copy=self.__copy_sample(current_samples),
details=details,
edit_mode=edit_mode)
elif params.get('cancel_changes_button', False) == 'Cancel':
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
operation='show_request',
id=trans.security.encode_id(request.id)) )
@web.expose
@web.require_login( "create/submit sequencing requests" )
def delete_sample(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
request = trans.sa_session.query( trans.app.model.Request ).get( int( params.get( 'request_id', 0 ) ) )
current_samples, details, edit_mode = self.__update_samples( request, **kwd )
sample_index = int(params.get('sample_id', 0))
sample_name = current_samples[sample_index][0]
s = request.has_sample(sample_name)
if s:
trans.sa_session.delete( s )
trans.sa_session.flush()
del current_samples[sample_index]
return trans.fill_template( '/admin/requests/show_request.mako',
request=request,
request_details=self.request_details(trans, request.id),
current_samples = current_samples,
sample_copy=self.__copy_sample(current_samples),
details=details,
edit_mode=edit_mode)
@web.expose
@web.require_login( "create/submit sequencing requests" )
def toggle_request_details(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
request = trans.sa_session.query( trans.app.model.Request ).get( int( params.get( 'request_id', 0 ) ) )
current_samples, details, edit_mode = self.__update_samples( request, **kwd )
return trans.fill_template( '/admin/requests/show_request.mako',
request=request,
request_details=self.request_details(trans, request.id),
current_samples = current_samples,
sample_copy=self.__copy_sample(current_samples),
details=details,
edit_mode=edit_mode)
@web.expose
@web.require_admin
def request_details(self, trans, id):
'''
Shows the request details
'''
request = trans.sa_session.query( trans.app.model.Request ).get( id )
# list of widgets to be rendered on the request form
request_details = []
# main details
request_details.append(dict(label='User',
value=str(request.user.email),
helptext=''))
request_details.append(dict(label='Description',
value=request.desc,
helptext=''))
request_details.append(dict(label='Type',
value=request.type.name,
helptext=''))
request_details.append(dict(label='State',
value=request.state(),
helptext=''))
request_details.append(dict(label='Date created',
value=request.create_time,
helptext=''))
# library associated
if request.library:
value=request.library.name
else:
value = None
request_details.append(dict(label='Data library',
value=value,
helptext='Data library where the resultant dataset will be stored'))
# folder associated
if request.folder:
value = request.folder.name
else:
value = None
request_details.append( dict( label='Data library folder',
value=value,
helptext='Data library folder where the resultant dataset will be stored' ) )
# form fields
for index, field in enumerate(request.type.request_form.fields):
if field['required']:
req = 'Required'
else:
req = 'Optional'
if field['type'] == 'AddressField':
if request.values.content[index]:
request_details.append(dict(label=field['label'],
value=trans.sa_session.query( trans.app.model.UserAddress ).get( int( request.values.content[index] ) ).get_html(),
helptext=field['helptext']+' ('+req+')'))
else:
request_details.append(dict(label=field['label'],
value=None,
helptext=field['helptext']+' ('+req+')'))
else:
request_details.append(dict(label=field['label'],
value=request.values.content[index],
helptext=field['helptext']+' ('+req+')'))
return request_details
@web.expose
@web.require_admin
def bar_codes(self, trans, **kwd):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
request_id = params.get( 'request_id', None )
if request_id:
request = trans.sa_session.query( trans.app.model.Request ).get( int( request_id ))
if not request:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message="Invalid request ID",
**kwd) )
widgets = []
for index, sample in enumerate(request.samples):
if sample.bar_code:
bc = sample.bar_code
else:
bc = util.restore_text(params.get('sample_%i_bar_code' % index, ''))
widgets.append(TextField('sample_%i_bar_code' % index,
40,
bc))
return trans.fill_template( '/admin/samples/bar_codes.mako',
samples_list=[s for s in request.samples],
user=request.user, request=request, widgets=widgets,
messagetype=messagetype,
msg=msg)
@web.expose
@web.require_admin
def save_bar_codes(self, trans, **kwd):
params = util.Params( kwd )
try:
request = trans.sa_session.query( trans.app.model.Request ).get( int( params.get( 'request_id', None ) ) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message="Invalid request ID",
**kwd) )
# validate
# bar codes need to be globally unique
msg = ''
for index in range(len(request.samples)):
bar_code = util.restore_text(params.get('sample_%i_bar_code' % index, ''))
# check for empty bar code
if not bar_code.strip():
msg = 'Please fill the barcode for sample <b>%s</b>.' % request.samples[index].name
break
# check all the unsaved bar codes
count = 0
for i in range(len(request.samples)):
if bar_code == util.restore_text(params.get('sample_%i_bar_code' % i, '')):
count = count + 1
if count > 1:
msg = '''The barcode <b>%s</b> of sample <b>%s</b> belongs
another sample in this request. The sample barcodes must
be unique throughout the system''' % \
(bar_code, request.samples[index].name)
break
# check all the saved bar codes
all_samples = trans.sa_session.query( trans.app.model.Sample )
for sample in all_samples:
if bar_code == sample.bar_code:
msg = '''The bar code <b>%s</b> of sample <b>%s</b>
belongs another sample. The sample bar codes must be
unique throughout the system''' % \
(bar_code, request.samples[index].name)
break
if msg:
break
if msg:
widgets = []
for index, sample in enumerate(request.samples):
if sample.bar_code:
bc = sample.bar_code
else:
bc = util.restore_text(params.get('sample_%i_bar_code' % index, ''))
widgets.append(TextField('sample_%i_bar_code' % index,
40,
util.restore_text(params.get('sample_%i_bar_code' % index, ''))))
return trans.fill_template( '/admin/samples/bar_codes.mako',
samples_list=[s for s in request.samples],
user=request.user, request=request, widgets=widgets, messagetype='error',
msg=msg)
# now save the bar codes
for index, sample in enumerate(request.samples):
bar_code = util.restore_text(params.get('sample_%i_bar_code' % index, ''))
sample.bar_code = bar_code
trans.sa_session.add( sample )
trans.sa_session.flush()
# change the state of all the samples to the next state
# get the new state
new_state = request.type.states[1]
for s in request.samples:
event = trans.app.model.SampleEvent(s, new_state, 'Bar code added to this sample')
trans.sa_session.add( event )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
operation='show_request',
id=trans.security.encode_id(request.id),
msg='Bar codes have been saved for this request',
messagetype='done'))
def __set_request_state( self, trans, request ):
# check if all the samples of the current request are in the final state
complete = True
for s in request.samples:
if s.current_state().id != request.type.states[-1].id:
complete = False
if complete:
# change the request state to 'Complete'
comments = "All samples of this request are in the last sample state (%s)." % request.type.states[-1].name
event = trans.app.model.RequestEvent(request, request.states.COMPLETE, comments)
trans.sa_session.add( event )
trans.sa_session.flush()
# trans.sa_session.add( request )
# trans.sa_session.flush()
def change_state(self, trans, sample):
possible_states = sample.request.type.states
curr_state = sample.current_state()
states_input = SelectField('select_state')
for state in possible_states:
if curr_state.name == state.name:
states_input.add_option(state.name+' (Current)', state.id, selected=True)
else:
states_input.add_option(state.name, state.id)
widgets = []
widgets.append(('Select the new state of the sample from the list of possible state(s)',
states_input))
widgets.append(('Comments', TextArea('comment')))
title = 'Change current state'
return widgets, title
@web.expose
@web.require_admin
def save_state(self, trans, **kwd):
params = util.Params( kwd )
try:
sample_id = int(params.get('sample_id', False))
sample = trans.sa_session.query( trans.app.model.Sample ).get( sample_id )
except:
msg = "Invalid sample ID"
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
comments = util.restore_text( params.comment )
selected_state = int( params.select_state )
new_state = trans.sa_session.query( trans.app.model.SampleState ) \
.filter( and_( trans.app.model.SampleState.table.c.request_type_id == sample.request.type.id,
trans.app.model.SampleState.table.c.id == selected_state ) ) \
.first()
event = trans.app.model.SampleEvent(sample, new_state, comments)
trans.sa_session.add( event )
trans.sa_session.flush()
self.__set_request_state( trans, sample.request )
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='show_events',
sample_id=sample.id))
@web.expose
@web.require_admin
def show_events(self, trans, **kwd):
params = util.Params( kwd )
try:
sample_id = int(params.get('sample_id', False))
sample = trans.sa_session.query( trans.app.model.Sample ).get( sample_id )
except:
msg = "Invalid sample ID"
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='list',
status='error',
message=msg,
**kwd) )
events_list = []
all_events = sample.events
for event in all_events:
delta = datetime.utcnow() - event.update_time
if delta > timedelta( minutes=60 ):
last_update = '%s hours' % int( delta.seconds / 60 / 60 )
else:
last_update = '%s minutes' % int( delta.seconds / 60 )
events_list.append((event.state.name, event.state.desc, last_update, event.comment))
widgets, title = self.change_state(trans, sample)
return trans.fill_template( '/admin/samples/events.mako',
events_list=events_list,
sample=sample, widgets=widgets, title=title)
##
#### Request Type Stuff ###################################################
##
@web.expose
@web.require_admin
def manage_request_types( self, trans, **kwd ):
if 'operation' in kwd:
operation = kwd['operation'].lower()
if not kwd.get( 'id', None ):
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
status='error',
message="Invalid requesttype ID") )
if operation == "view":
return self.__view_request_type( trans, **kwd )
elif operation == "view_form":
return self.__view_form( trans, **kwd )
elif operation == "delete":
return self.__delete_request_type( trans, **kwd )
elif operation == "undelete":
return self.__undelete_request_type( trans, **kwd )
# elif operation == "update":
# return self.__edit_request( trans, **kwd )
# Render the grid view
return self.requesttype_grid( trans, **kwd )
def __view_request_type(self, trans, **kwd):
try:
rt = trans.sa_session.query( trans.app.model.RequestType ).get( trans.security.decode_id(kwd['id']) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
status='error',
message="Invalid requesttype ID") )
return trans.fill_template( '/admin/requests/view_request_type.mako',
request_type=rt,
forms=get_all_forms( trans ),
states_list=rt.states )
def __view_form(self, trans, **kwd):
try:
fd = trans.sa_session.query( trans.app.model.FormDefinition ).get( trans.security.decode_id(kwd['id']) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
status='error',
message="Invalid form ID") )
return trans.fill_template( '/admin/forms/show_form_read_only.mako',
form=fd )
@web.expose
@web.require_admin
def create_request_type( self, trans, **kwd ):
params = util.Params( kwd )
msg = util.restore_text( params.get( 'msg', '' ) )
messagetype = params.get( 'messagetype', 'done' )
if params.get( 'add_state_button', False ):
rt_info, rt_states = self.__create_request_type_form(trans, **kwd)
rt_states.append(("", ""))
return trans.fill_template( '/admin/requests/create_request_type.mako',
rt_info_widgets=rt_info,
rt_states_widgets=rt_states,
msg=msg,
messagetype=messagetype)
elif params.get( 'remove_state_button', False ):
rt_info, rt_states = self.__create_request_type_form(trans, **kwd)
index = int(params.get( 'remove_state_button', '' ).split(" ")[2])
del rt_states[index-1]
return trans.fill_template( '/admin/requests/create_request_type.mako',
rt_info_widgets=rt_info,
rt_states_widgets=rt_states,
msg=msg,
messagetype=messagetype)
elif params.get( 'save_request_type', False ):
st, msg = self.__save_request_type(trans, **kwd)
if not st:
return trans.fill_template( '/admin/requests/create_request_type.mako',
forms=get_all_forms( trans ),
msg=msg,
messagetype='error')
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
message='Request type <b>%s</b> has been created' % st.name,
status='done') )
else:
rt_info, rt_states = self.__create_request_type_form(trans, **kwd)
return trans.fill_template( '/admin/requests/create_request_type.mako',
rt_info_widgets=rt_info,
rt_states_widgets=rt_states,
msg=msg,
messagetype=messagetype)
def __create_request_type_form(self, trans, **kwd):
request_forms=get_all_forms( trans,
filter=dict(deleted=False),
form_type=trans.app.model.FormDefinition.types.REQUEST )
sample_forms=get_all_forms( trans,
filter=dict(deleted=False),
form_type=trans.app.model.FormDefinition.types.SAMPLE )
if not len(request_forms) or not len(sample_forms):
return [],[]
params = util.Params( kwd )
rt_info = []
rt_info.append(dict(label='Name',
widget=TextField('name', 40, util.restore_text( params.get( 'name', '' ) ) ) ))
rt_info.append(dict(label='Description',
widget=TextField('desc', 40, util.restore_text( params.get( 'desc', '' ) ) ) ))
rf_selectbox = SelectField('request_form_id')
for fd in request_forms:
if str(fd.id) == params.get( 'request_form_id', '' ):
rf_selectbox.add_option(fd.name, fd.id, selected=True)
else:
rf_selectbox.add_option(fd.name, fd.id)
rt_info.append(dict(label='Request form',
widget=rf_selectbox ))
sf_selectbox = SelectField('sample_form_id')
for fd in sample_forms:
if str(fd.id) == params.get( 'sample_form_id', '' ):
sf_selectbox.add_option(fd.name, fd.id, selected=True)
else:
sf_selectbox.add_option(fd.name, fd.id)
rt_info.append(dict(label='Sample form',
widget=sf_selectbox ))
# possible sample states
rt_states = []
i=0
while True:
if kwd.has_key( 'state_name_%i' % i ):
rt_states.append((params.get( 'state_name_%i' % i, '' ),
params.get( 'state_desc_%i' % i, '' )))
i=i+1
else:
break
return rt_info, rt_states
def __save_request_type(self, trans, **kwd):
params = util.Params( kwd )
rt = trans.app.model.RequestType()
rt.name = util.restore_text( params.get( 'name', '' ) )
rt.desc = util.restore_text( params.get( 'desc', '' ) )
rt.request_form = trans.sa_session.query( trans.app.model.FormDefinition ).get( int( params.request_form_id ) )
rt.sample_form = trans.sa_session.query( trans.app.model.FormDefinition ).get( int( params.sample_form_id ) )
trans.sa_session.add( rt )
trans.sa_session.flush()
# set sample states
ss_list = trans.sa_session.query( trans.app.model.SampleState ).filter( trans.app.model.SampleState.table.c.request_type_id == rt.id )
for ss in ss_list:
trans.sa_session.delete( ss )
trans.sa_session.flush()
i=0
while True:
if kwd.has_key( 'state_name_%i' % i ):
name = util.restore_text( params.get( 'state_name_%i' % i, None ))
desc = util.restore_text( params.get( 'state_desc_%i' % i, None ))
ss = trans.app.model.SampleState(name, desc, rt)
trans.sa_session.add( ss )
trans.sa_session.flush()
i = i + 1
else:
break
msg = "The new request type named '%s' with %s state(s) has been created" % (rt.name, i)
return rt, msg
def __delete_request_type( self, trans, **kwd ):
id_list = util.listify( kwd['id'] )
for id in id_list:
try:
rt = trans.sa_session.query( trans.app.model.RequestType ).get( trans.security.decode_id(id) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
msg='Invalid request type ID',
messagetype='error') )
rt.deleted = True
trans.sa_session.add( rt )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
msg='%i request type(s) has been deleted' % len(id_list),
messagetype='done') )
def __undelete_request_type( self, trans, **kwd ):
id_list = util.listify( kwd['id'] )
for id in id_list:
try:
rt = trans.sa_session.query( trans.app.model.RequestType ).get( trans.security.decode_id(id) )
except:
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
msg='Invalid request type ID',
messagetype='error') )
rt.deleted = False
trans.sa_session.add( rt )
trans.sa_session.flush()
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='manage_request_types',
msg='%i request type(s) has been undeleted' % len(id_list),
messagetype='done') )
|
volpino/Yeps-EURAC
|
lib/galaxy/web/controllers/requests_admin.py
|
Python
|
mit
| 86,992
|
[
"Galaxy"
] |
a52de8dac97e27af7cd4f24e3ac631d2afef92f7a5327447a05852b011e6cc08
|
# Copyright 2003 by Bartek Wilczynski. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parsing AlignACE and CompareACE files: AlignAceParser,CompareAceParser
"""
from Bio.Motif import Motif
from Bio.Alphabet import IUPAC
from Bio.Seq import Seq
class Record:
def __init__(self):
self.motifs=[]
self.current_motif=None
self.param_dict = None
def read(handle):
"""read(handle)"""
record = Record()
record.ver = handle.next()
record.cmd_line = handle.next()
for line in handle:
if line.strip() == "":
pass
elif line[:4]=="Para":
record.param_dict={}
elif line[0]=="#":
seq_name = line.split("\t")[1]
record.seq_dict.append(seq_name)
elif "=" in line:
par_name = line.split("=")[0].strip()
par_value = line.split("=")[1].strip()
record.param_dict[par_name]=par_value
elif line[:5]=="Input":
record.seq_dict=[]
elif line[:5]=="Motif":
record.current_motif = Motif()
record.motifs.append(record.current_motif)
record.current_motif.alphabet=IUPAC.unambiguous_dna
elif line[:3]=="MAP":
record.current_motif.score = float(line.split()[-1])
elif len(line.split("\t"))==4:
seq = Seq(line.split("\t")[0],IUPAC.unambiguous_dna)
record.current_motif.add_instance(seq)
elif "*" in line:
record.current_motif.set_mask(line.strip("\n\c"))
else:
raise ValueError(line)
return record
# Everything below is deprecated.
from Bio.ParserSupport import *
import Bio
class AlignAceConsumer:
"""
The general purpose consumer for the AlignAceScanner (DEPRECATED).
Should be passed as the consumer to the feed method of the AlignAceScanner. After 'consuming' the file, it has the list of motifs in the motifs property.
This class is DEPRECATED; please use the read() function in this module
instead.
"""
def __init__(self):
import warnings
warnings.warn("Bio.Motif.Parsers.AlignAce.AlignAceConsumer is deprecated; please use the read() function in this module instead.", Bio.BiopythonDeprecationWarning)
self.motifs=[]
self.current_motif=None
self.param_dict = None
def parameters(self,line):
self.param_dict={}
def parameter(self,line):
par_name = line.split("=")[0].strip()
par_value = line.split("=")[1].strip()
self.param_dict[par_name]=par_value
def sequences(self,line):
self.seq_dict=[]
def sequence(self,line):
seq_name = line.split("\t")[1]
self.seq_dict.append(seq_name)
def motif(self,line):
self.current_motif = Motif()
self.motifs.append(self.current_motif)
self.current_motif.alphabet=IUPAC.unambiguous_dna
def motif_hit(self,line):
seq = Seq(line.split("\t")[0],IUPAC.unambiguous_dna)
self.current_motif.add_instance(seq)
def motif_score(self,line):
self.current_motif.score = float(line.split()[-1])
def motif_mask(self,line):
self.current_motif.set_mask(line.strip("\n\c"))
def noevent(self,line):
pass
def version(self,line):
self.ver = line
def command_line(self,line):
self.cmd_line = line
class AlignAceParser(AbstractParser):
"""Parses AlignAce data into a sequence of Motifs (DEPRECATED)
This class is DEPRECATED; please use the read() function in this module
instead.
"""
def __init__(self):
"""__init__(self)"""
import warnings
warnings.warn("Bio.Motif.Parsers.AlignAce.AlignAceParser is deprecated; please use the read() function in this module instead.", Bio.BiopythonDeprecationWarning)
self._scanner = AlignAceScanner()
self._consumer = AlignAceConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer
class AlignAceScanner:
"""Scannner for AlignACE output (DEPRECATED).
Methods:
feed Feed data into the scanner.
The scanner generates (and calls the consumer) the following types of events:
noevent - blank line
version - AlignACE version number
command_line - AlignACE command line string
parameters - the begining of the parameters
parameter - the line containing a parameter
sequences - the begining of the sequences list
sequence - line containing the name of the input sequence (and a respective number)
motif - the begining of the motif (contains the number)
motif_hit - one hit for a motif
motif_mask - mask of the motif (space - gap, asterisk - significant position)
motif_score - MAP score of the motif - approx. N * log R, where R == (num. of actual occur.) / (num. of occur. expected by random.)
This class is DEPRECATED; please use the read() function in this module
instead.
"""
def __init__(self):
import warnings
warnings.warn("Bio.Motif.Parsers.AlignAce.AlignAceScanner is deprecated; please use the read() function in this module instead.", Bio.BiopythonDeprecationWarning)
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a AlignACE report for scanning. handle is a file-like
object that contains the AlignACE report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
consumer.version(handle.readline())
consumer.command_line(handle.readline())
for line in handle:
if line.strip() == "":
consumer.noevent(line)
elif line[:4]=="Para":
consumer.parameters(line)
elif line[0]=="#":
consumer.sequence(line)
elif "=" in line:
consumer.parameter(line)
elif line[:5]=="Input":
consumer.sequences(line)
elif line[:5]=="Motif":
consumer.motif(line)
elif line[:3]=="MAP":
consumer.motif_score(line)
elif len(line.split("\t"))==4:
consumer.motif_hit(line)
elif "*" in line:
consumer.motif_mask(line)
else:
raise ValueError(line)
class CompareAceScanner:
"""Scannner for CompareACE output (DEPRECATED).
Methods:
feed Feed data into the scanner.
The scanner generates (and calls the consumer) the following types of events:
motif_score - CompareACE score of motifs
###### TO DO #############3
extend the scanner to include other, more complex outputs.
"""
def __init__(self):
import warnings
warnings.warn("Bio.Motif.Parsers.AlignAce.CompareAceScanner is deprecated.", Bio.BiopythonDeprecationWarning)
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a CompareACE report for scanning. handle is a file-like
object that contains the CompareACE report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
consumer.motif_score(handle.readline())
class CompareAceConsumer:
"""
The general purpose consumer for the CompareAceScanner (DEPRECATED).
Should be passed as the consumer to the feed method of the CompareAceScanner. After 'consuming' the file, it has the list of motifs in the motifs property.
"""
def __init__(self):
import warnings
warnings.warn("Bio.Motif.Parsers.AlignAce.CompareAceConsumer is deprecated.", Bio.BiopythonDeprecationWarning)
def motif_score(self,line):
self.data = float(line.split()[-1])
class CompareAceParser(AbstractParser):
"""Parses CompareAce output to usable form
### so far only in a very limited way
"""
def __init__(self):
"""__init__(self)"""
import warnings
warnings.warn("CompareAceParser and ComparAceConsumer are" \
+" deprecated, and will be removed in a future release of"\
+" Biopython. If you want to continue to use this code,"\
+" please get in contact with the Biopython developers via"\
+" the mailing lists to avoid its permanent removal from"\
+" Biopython. See also the Python built in set datatype.", \
Bio.BiopythonDeprecationWarning)
self._scanner = CompareAceScanner()
self._consumer = CompareAceConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Motif/Parsers/AlignAce.py
|
Python
|
gpl-2.0
| 8,975
|
[
"Biopython"
] |
586d934411fcb20501ee1fcc3f9f180d2b70ec2bd67f97b88eb60f0c1f5bf58c
|
__author__ = 'Robert Meyer'
import logging
import os # For path names being viable under Windows and Linux
from pypet.environment import Environment
from pypet.brian.parameter import BrianParameter,BrianMonitorResult
from pypet.utils.explore import cartesian_product
# Don't do this at home:
from brian import *
# We define a function to set all parameter
def add_params(traj):
"""Adds all necessary parameters to `traj`."""
# We set the BrianParameter to be the standard parameter
traj.v_standard_parameter=BrianParameter
traj.v_fast_access=True
# Add parameters we need for our network
traj.f_add_parameter('Sim.defaultclock', 0.01*ms)
traj.f_add_parameter('Net.C',281*pF)
traj.f_add_parameter('Net.gL',30*nS)
traj.f_add_parameter('Net.EL',-70.6*mV)
traj.f_add_parameter('Net.VT',-50.4*mV)
traj.f_add_parameter('Net.DeltaT',2*mV)
traj.f_add_parameter('Net.tauw',40*ms)
traj.f_add_parameter('Net.a',4*nS)
traj.f_add_parameter('Net.b',0.08*nA)
traj.f_add_parameter('Net.I',.8*nA)
traj.f_add_parameter('Net.Vcut',traj.VT+5*traj.DeltaT) # practical threshold condition
traj.f_add_parameter('Net.N',50)
eqs='''
dvm/dt=(gL*(EL-vm)+gL*DeltaT*exp((vm-VT)/DeltaT)+I-w)/C : volt
dw/dt=(a*(vm-EL)-w)/tauw : amp
Vr:volt
'''
traj.f_add_parameter('Net.eqs', eqs)
traj.f_add_parameter('reset', 'vm=Vr;w+=b')
# This is our job that we will execute
def run_net(traj):
"""Creates and runs BRIAN network based on the parameters in `traj`."""
# We want to give every network a fresh start
clear(True, True)
defaultclock.dt=traj.defaultclock
# We let BRIAN grasp the parameters from the local namespace
C=traj.C
gL=traj.gL
EL=traj.EL
VT=traj.VT
DeltaT=traj.DeltaT
tauw=traj.tauw
a=traj.a
b=traj.b
I=traj.I
Vcut=traj.Vcut
N=traj.N
eqs=traj.eqs
# Create the Neuron Group
neuron=NeuronGroup(N,model=eqs,threshold=Vcut,reset=traj.reset)
neuron.vm=EL
neuron.w=a*(neuron.vm-EL)
neuron.Vr=linspace(-48.3*mV,-47.7*mV,N) # bifurcation parameter
# Run the network initially for 100 milliseconds
print 'Initial Run'
run(100*msecond,report='text') # we discard the first spikes
# Create a Spike Monitor
MSpike=SpikeMonitor(neuron, delay = 1*ms)
# Create a State Monitor for the membrane voltage, record from neurons 1-3
MStateV = StateMonitor(neuron,'vm',record=[1,2,3])
# Now record for 500 milliseconds
print 'Measurement run'
run(500*msecond,report='text')
# Add the BRAIN monitors
traj.v_standard_result = BrianMonitorResult
traj.f_add_result('SpikeMonitor',MSpike)
traj.f_add_result('StateMonitorV', MStateV)
def main():
# Let's be very verbose!
logging.basicConfig(level = logging.INFO)
# Let's do multiprocessing this time with a lock (which is default)
filename = os.path.join('hdf5', 'example_07.hdf5')
env = Environment(trajectory='Example_07_BRIAN',
filename=filename,
file_title='Example_07_Brian',
comment = 'Go Brian!',
dynamically_imported_classes=[BrianMonitorResult, BrianParameter],
multiproc=True,
wrap_mode='QUEUE',
ncores=2)
traj = env.trajectory
# 1st a) add the parameters
add_params(traj)
# 1st b) prepare, we want to explore the different network sizes and different tauw time scales
traj.f_explore(cartesian_product({traj.f_get('N').v_full_name:[50,60],
traj.f_get('tauw').v_full_name:[30*ms,40*ms]}))
# 2nd let's run our experiment
env.run(run_net)
# You can take a look at the results in the hdf5 file if you want!
# Finally disable logging and close all log-files
env.disable_logging()
if __name__ == '__main__':
main()
|
nigroup/pypet
|
examples/example_07_brian_network.py
|
Python
|
bsd-3-clause
| 3,935
|
[
"Brian",
"NEURON"
] |
9cf8b713f1c4f582fff08db2e4048eb6eb95c8dd4a37754ed03ffc0ff07dcaa2
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Generation and Analysis of X3DNA helicoidal parameter profiles --- :mod:`MDAnalysis.analysis.legacy.x3dna`
==========================================================================================================
:Author: Elizabeth Denning
:Year: 2013-2014
:Copyright: GNU Public License v2
.. versionadded:: 0.8
.. versionchanged:: 0.16.0
This module is difficult to test due to restrictions on the X3DNA_ code. It
is therefore considered unmaintained and legacy code. It was moved to the
:mod:`MDAnalysis.analysis.legacy` package (see `issue 906`_)
.. _`issue 906`: https://github.com/MDAnalysis/mdanalysis/issues/906
With the help of this module, X3DNA_ can be run on frames in a trajectory. Data
can be combined and analyzed. X3DNA_ [Lu2003]_ [Lu2008]_ must be installed
separately.
.. rubric:: References
.. [Lu2003] Xiang-Jun Lu & Wilma K. Olson (2003).
3DNA: a software package for the analysis, rebuilding and visualization
for three-dimensional nucleic acid structure
Nucleic Acids Res. 31(17), 5108-21.
.. [Lu2008] Xiang-Jun Lu & Wilma K. Olson (2008).
3DNA: a versatile, integrated software system for the analysis, rebuilding
and visualization of three-dimensional nucleic-acid structures.
Nat Protoc. 3(7), 1213-27.
.. _X3DNA: http://x3dna.org/
Example applications
--------------------
Single structure
~~~~~~~~~~~~~~~~
B-DNA structure::
from MDAnalysis.analysis.x3dna import X3DNA, X3DNAtraj
from MDAnalysis.tests.datafiles import PDB_X3DNA
# set path to your x3dna binary in bashrc file
H = X3DNA(PDB_X3DNA, executable="x3dna_ensemble analyze -b 355d.bps -p pdbfile")
H.run()
H.collect()
H.plot()
Trajectory
~~~~~~~~~~
Analyzing a trajectory::
u = MDAnalysis.Universe(psf, trajectory)
H = X3DNAtraj(u, ...)
H.run()
H.plot()
H.save()
The profiles are available as the attribute :attr:`X3DNAtraj.profiles`
(``H.profiles`` in the example) and are indexed by frame number but
can also be indexed by an arbitrary order parameter as shown in the
next example.
Analysis classes
----------------
.. autoclass:: X3DNA
:members:
:inherited-members:
.. attribute:: profiles
``x3dna_ensemble analyze -b 355d.bps -p pdbfile attribute``:
After running :meth:`X3DNA.collect`, this dict contains all the
X3DNA profiles, indexed by the frame number. If only a single
frame was analyzed then this will be ``X3DNA.profiles[0]``. Note
that the order is random; one needs to sort the keys first.
.. autoclass:: X3DNAtraj
:members:
:inherited-members:
.. attribute:: profiles
After running :meth:`X3DNA.collect`, this dict contains all the
X3DNA profiles, indexed by the frame number.
Utilities
---------
.. autoexception:: ApplicationError
"""
from __future__ import print_function
from six.moves import range
import glob
import os
import errno
import shutil
import warnings
import numpy as np
import os.path
import subprocess
import tempfile
import textwrap
from collections import OrderedDict
from MDAnalysis import ApplicationError
from MDAnalysis.lib.util import which, realpath, asiterable
import logging
logger = logging.getLogger("MDAnalysis.analysis.x3dna")
def mean_std_from_x3dnaPickle(profile):
"""Get mean and standard deviation of helicoidal parameters from a saved `profile`.
The `profile` should have been saved with :meth:`BaseX3DNA.save`. Then
load it with ::
profile = cPickle.load(open("x3dna.pickle"))
h_mean, h_std = mean_std_from_x3dnaPickle(profile)
Arguments
---------
profile : dict
A :attr:`X3DNA.profiles` dict with results from the
:class:`X3DNA` analysis.
Returns
-------
(list, list)
The tuple contains two lists with the means and the standard deviations
for the helicoidal parameters. The order for both lists is ``[shear,
stretch, stagger, buckle, propeller, opening, shift, slide, rise, tilt,
roll, twist]``.
"""
if profile.x3dna_param is False:
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = [], [], [], [], [], [], [], [], [], [], [], []
for i in range(len(profile)):
bp_shear.append(profile.values()[i].Shear)
bp_stretch.append(profile.values()[i].Stretch)
bp_stagger.append(profile.values()[i].Stagger)
bp_buckle.append(profile.values()[i].Buckle)
bp_prop.append(profile.values()[i].Propeller)
bp_open.append(profile.values()[i].Opening)
bp_rise.append(profile.values()[i].Rise)
bp_shift.append(profile.values()[i].Shift)
bp_slide.append(profile.values()[i].Slide)
bp_tilt.append(profile.values()[i].Tilt)
bp_roll.append(profile.values()[i].Roll)
bp_twist.append(profile.values()[i].Twist)
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = np.array(bp_shear), np.array(bp_stretch), np.array(bp_stagger), np.array(bp_rise),\
np.array(bp_shift), np.array(bp_slide), np.array(bp_buckle), np.array(bp_prop), \
np.array(bp_open), np.array(bp_tilt), np.array(bp_roll), np.array(bp_twist)
na_avg, na_std = [], []
for j in range(len(bp_shear[0])):
na_avg.append([
np.mean(bp_shear[:, j]), np.mean(bp_stretch[:, j]), np.mean(bp_stagger[:, j]),
np.mean(bp_buckle[:, j]), np.mean(bp_prop[:, j]), np.mean(bp_open[:, j]),
np.mean(bp_shift[:, j]), np.mean(bp_slide[:, j]), np.mean(bp_rise[:, j]),
np.mean(bp_tilt[:, j]), np.mean(bp_roll[:, j]), np.mean(bp_twist[:, j])])
na_std.append([
np.std(bp_shear[:, j]), np.std(bp_stretch[:, j]), np.std(bp_stagger[:, j]),
np.std(bp_buckle[:, j]), np.std(bp_prop[:, j]), np.std(bp_open[:, j]),
np.std(bp_shift[:, j]), np.std(bp_slide[:, j]), np.std(bp_rise[:, j]),
np.std(bp_tilt[:, j]), np.std(bp_roll[:, j]), np.std(bp_twist[:, j])])
else:
bp_rise, bp_shift, bp_slide, bp_tilt, bp_roll, bp_twist = [], [], [], [], [], [], [], [], [], [], [], []
for i in range(len(profile)):
#print i
bp_rise.append(profile.values()[i].Rise)
bp_shift.append(profile.values()[i].Shift)
bp_slide.append(profile.values()[i].Slide)
bp_tilt.append(profile.values()[i].Tilt)
bp_roll.append(profile.values()[i].Roll)
bp_twist.append(profile.values()[i].Twist)
bp_rise, bp_shift, bp_slide, bp_tilt, bp_roll, bp_twist = np.array(bp_shear),np.array(bp_stretch),\
np.array(bp_stagger), np.array(bp_rise), np.array(bp_shift), np.array(bp_slide),\
np.array(bp_buckle), np.array(bp_prop), np.array(bp_open), np.array(bp_tilt),\
np.array(bp_roll), np.array(bp_twist)
na_avg, na_std = [], []
for j in range(len(bp_shift[0])):
na_avg.append([
np.mean(bp_shift[:, j]), np.mean(bp_slide[:, j]), np.mean(bp_rise[:, j]),
np.mean(bp_tilt[:, j]), np.mean(bp_roll[:, j]), np.mean(bp_twist[:, j])])
na_std.append([
np.std(bp_shift[:, j]), np.std(bp_slide[:, j]), np.std(bp_rise[:, j]),
np.std(bp_tilt[:, j]), np.std(bp_roll[:, j]), np.std(bp_twist[:, j])])
na_avg, na_std = np.array(na_avg), np.array(na_std)
return na_avg, na_std
class BaseX3DNA(object):
"""Baseclass for X3DNA_ analysis, providing plotting and utility functions.
When methods return helicoidal basepair parameter as lists, then the order
is always
====== ==============
index parameter
====== ==============
0 shear
1 stretch
2 stagger
3 buckle
4 propeller
5 opening
6 shift
7 slide
8 rise
9 tilt
10 roll
11 twist
====== ==============
for each nucleic acid pair.
.. _X3DNA: http://x3dna.org
"""
def save(self, filename="x3dna.pickle"):
"""Save :attr:`profiles` as a Python pickle file *filename*.
Load profiles dictionary with ::
import cPickle
profiles = cPickle.load(open(filename))
"""
import cPickle
cPickle.dump(self.profiles, open(filename, "wb"), cPickle.HIGHEST_PROTOCOL)
def mean_std(self):
"""Returns the mean and standard deviation of base parameters.
Returns
-------
(list, list)
The tuple contains two lists with the means and the standard deviations
for the helicoidal parameters. The order for both lists is ``[shear,
stretch, stagger, buckle, propeller, opening, shift, slide, rise, tilt,
roll, twist]``.
"""
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = [], [], [], [], [], [], [], [], [], [], [], []
for i in range(len(self.profiles)):
bp_shear.append(self.profiles.values()[i].Shear)
bp_stretch.append(self.profiles.values()[i].Stretch)
bp_stagger.append(self.profiles.values()[i].Stagger)
bp_buckle.append(self.profiles.values()[i].Buckle)
bp_prop.append(self.profiles.values()[i].Propeller)
bp_open.append(self.profiles.values()[i].Opening)
bp_rise.append(self.profiles.values()[i].Rise)
bp_shift.append(self.profiles.values()[i].Shift)
bp_slide.append(self.profiles.values()[i].Slide)
bp_tilt.append(self.profiles.values()[i].Tilt)
bp_roll.append(self.profiles.values()[i].Roll)
bp_twist.append(self.profiles.values()[i].Twist)
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = np.array(bp_shear), np.array(bp_stretch), np.array(bp_stagger), np.array(bp_rise),\
np.array(bp_shift), np.array(bp_slide), np.array(bp_buckle), np.array(bp_prop),\
np.array(bp_open), np.array(bp_tilt), np.array(bp_roll), np.array(bp_twist)
na_avg, na_std = [], []
for j in range(len(bp_shear[0])):
na_avg.append([
np.mean(bp_shear[:, j]), np.mean(bp_stretch[:, j]), np.mean(bp_stagger[:, j]),
np.mean(bp_buckle[:, j]), np.mean(bp_prop[:, j]), np.mean(bp_open[:, j]),
np.mean(bp_shift[:, j]), np.mean(bp_slide[:, j]), np.mean(bp_rise[:, j]),
np.mean(bp_tilt[:, j]), np.mean(bp_roll[:, j]), np.mean(bp_twist[:, j])])
na_std.append([
np.std(bp_shear[:, j]), np.std(bp_stretch[:, j]), np.std(bp_stagger[:, j]),
np.std(bp_buckle[:, j]), np.std(bp_prop[:, j]), np.std(bp_open[:, j]),
np.std(bp_shift[:, j]), np.std(bp_slide[:, j]), np.std(bp_rise[:, j]),
np.std(bp_tilt[:, j]), np.std(bp_roll[:, j]), np.std(bp_twist[:, j])])
na_avg, na_std = np.array(na_avg), np.array(na_std)
return na_avg, na_std
def mean(self):
"""Returns the mean value for the base parameters.
Returns
-------
list
The list contains the means for the helicoidal parameters. The
order is ``[shear, stretch, stagger, buckle, propeller, opening,
shift, slide, rise, tilt, roll, twist]``.
"""
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = [], [], [], [], [], [], [], [], [], [], [], []
for i in range(len(self.profiles)):
bp_shear.append(self.profiles.values()[i].Shear)
bp_stretch.append(self.profiles.values()[i].Stretch)
bp_stagger.append(self.profiles.values()[i].Stagger)
bp_buckle.append(self.profiles.values()[i].Buckle)
bp_prop.append(self.profiles.values()[i].Propeller)
bp_open.append(self.profiles.values()[i].Opening)
bp_rise.append(self.profiles.values()[i].Rise)
bp_shift.append(self.profiles.values()[i].Shift)
bp_slide.append(self.profiles.values()[i].Slide)
bp_tilt.append(self.profiles.values()[i].Tilt)
bp_roll.append(self.profiles.values()[i].Roll)
bp_twist.append(self.profiles.values()[i].Twist)
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = np.array(bp_shear), np.array(bp_stretch), np.array(bp_stagger), np.array(bp_rise),\
np.array(bp_shift), np.array(bp_slide), np.array(bp_buckle), np.array(bp_prop),\
np.array(bp_open), np.array(bp_tilt), np.array(bp_roll), np.array(bp_twist)
na_avg = []
for j in range(len(bp_shear[0])):
na_avg.append([
np.mean(bp_shear[:, j]), np.mean(bp_stretch[:, j]), np.mean(bp_stagger[:, j]),
np.mean(bp_buckle[:j]), np.mean(bp_prop[:, j]), np.mean(bp_open[:, j]),
np.mean(bp_shift[:, j]), np.mean(bp_slide[:, j]), np.mean(bp_rise[:, j]),
np.mean(bp_tilt[:, j]), np.mean(bp_roll[:, j]), np.mean(bp_twist[:, j])])
na_avg = np.array(na_avg)
return na_avg
def std(self):
"""Returns the standard deviation for the base parameters.
Returns
-------
list
The list contains the standard deviations for the helicoidal
parameters. The order is ``[shear, stretch, stagger, buckle,
propeller, opening, shift, slide, rise, tilt, roll, twist]``.
"""
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = [], [], [], [], [], [], [], [], [], [], [], []
for i in range(len(self.profiles)):
bp_shear.append(self.profiles.values()[i].Shear)
bp_stretch.append(self.profiles.values()[i].Stretch)
bp_stagger.append(self.profiles.values()[i].Stagger)
bp_buckle.append(self.profiles.values()[i].Buckle)
bp_prop.append(self.profiles.values()[i].Propeller)
bp_open.append(self.profiles.values()[i].Opening)
bp_rise.append(self.profiles.values()[i].Rise)
bp_shift.append(self.profiles.values()[i].Shift)
bp_slide.append(self.profiles.values()[i].Slide)
bp_tilt.append(self.profiles.values()[i].Tilt)
bp_roll.append(self.profiles.values()[i].Roll)
bp_twist.append(self.profiles.values()[i].Twist)
bp_shear, bp_stretch, bp_stagger, bp_rise, bp_shift, bp_slide, bp_buckle, bp_prop, bp_open, bp_tilt, bp_roll,\
bp_twist = np.array(bp_shear), np.array(bp_stretch), np.array(bp_stagger), np.array(bp_rise),\
np.array(bp_shift), np.array(bp_slide), np.array(bp_buckle), np.array(bp_prop),\
np.array(bp_open), np.array(bp_tilt), np.array(bp_roll), np.array(bp_twist)
na_std = []
for j in range(len(bp_shear[0])):
na_std.append([
np.std(bp_shear[:, j]), np.std(bp_stretch[:, j]), np.std(bp_stagger[:, j]),
np.std(bp_buckle[:j]), np.std(bp_prop[:, j]), np.std(bp_open[:, j]), np.std(bp_shift[:, j]),
np.std(bp_slide[:, j]), np.std(bp_rise[:, j]), np.std(bp_tilt[:, j]), np.std(bp_roll[:, j]),
np.std(bp_twist[:, j])])
na_std = np.array(na_std)
return na_std
def plot(self, **kwargs):
"""Plot time-averaged base parameters for each basse pair in a 1D graph.
One plot is produced for each parameter. It shows the the mean and
standard deviation for each individual base pair. Each plot is saved to
PNG file with name "<parameter_name>.png".
Parameters
----------
ax : matplotlib.pyplot.Axes (optional)
Provide `ax` to have all plots plotted in the same axes.
"""
import matplotlib.pyplot as plt
na_avg, na_std = self.mean_std()
for k in range(len(na_avg[0])):
ax = kwargs.pop('ax', plt.subplot(111))
x = list(range(1, len(na_avg[:, k]) + 1))
ax.errorbar(x, na_avg[:, k], yerr=na_std[:, k], fmt='-o')
ax.set_xlim(0, len(na_avg[:, k]) + 1)
ax.set_xlabel(r"Nucleic Acid Number")
param = self.profiles.values()[0].dtype.names[k]
if param in ["Shear", "Stretch", "Stagger", "Rise", "Shift", "Slide"]:
ax.set_ylabel("{0!s} ($\AA$)".format((param)))
else:
ax.set_ylabel("{0!s} (deg)".format((param)))
ax.figure.savefig("{0!s}.png".format((param)))
ax.figure.clf()
def sorted_profiles_iter(self):
"""Return an iterator over profiles sorted by frame/order parameter.
The iterator produces tuples ``(q, profile)``. Typically, `q` is the
frame number.
"""
if self.profiles is None:
raise StopIteration
for q in sorted(self.profiles):
yield (q, self.profiles[q])
__iter__ = sorted_profiles_iter
class X3DNA(BaseX3DNA):
"""Run X3DNA_ on a single frame or a DCD trajectory.
Only a subset of all X3DNA control parameters is supported and can be set
with keyword arguments. For further details on X3DNA_ see the `X3DNA docs`_.
Running X3DNA with the :class:`X3DNA` class is a 3-step process:
1. set up the class with all desired parameters
2. run X3DNA with :meth:`X3DNA.run`
3. collect the data from the output file with :meth:`X3DNA.collect`
The class also provides some simple plotting functions of the collected
data such as :meth:`X3DNA.plot` or :meth:`X3DNA.plot3D`.
When methods return helicoidal basepair parameter as lists, then the order
is always
====== ==============
index parameter
====== ==============
0 shear
1 stretch
2 stagger
3 buckle
4 propeller
5 opening
6 shift
7 slide
8 rise
9 tilt
10 roll
11 twist
====== ==============
.. versionadded:: 0.8
.. _`X3DNA docs`: http://forum.x3dna.org/
"""
def __init__(self, filename, **kwargs):
"""Set up parameters to run X3DNA_ on PDB *filename*.
Parameters
----------
filename : str
The `filename` is used as input for X3DNA in the
:program:`xdna_ensemble` command. It specifies the name of a
PDB coordinate file to be used. This must be in Brookhaven
protein databank format or something closely approximating
this.
executable : str (optional)
Path to the :program:`xdna_ensemble` executable directories
(e.g. ``/opt/x3dna/2.1 and /opt/x3dna/2.1/bin``) must be set
and then added to export in bashrc file. See X3DNA
documentation for set-up instructions.
x3dna_param : bool (optional)
Determines whether base step or base pair parameters will be
calculated. If ``True`` (default) then stacked *base step*
parameters will be analyzed. If ``False`` then stacked *base
pair* parameters will be analyzed.
logfile : str (optional)
Write output from X3DNA to `logfile` (default: "bp_step.par")
.. SeeAlso:: :class:`X3DNAtraj`
"""
# list of temporary files, to be cleaned up on __del__
self.tempfiles = [
"auxiliary.par", "bestpairs.pdb", "bp_order.dat", "bp_helical.par", "cf_7methods.par",
"col_chains.scr", "col_helices.scr", "hel_regions.pdb", "ref_frames.dat", "hstacking.pdb", "stacking.pdb"
]
self.tempdirs = []
self.filename = filename
logger.info("Setting up X3DNA analysis for %(filename)r", vars(self))
# guess executables
self.exe = {}
x3dna_exe_name = kwargs.pop('executable', 'xdna_ensemble')
self.x3dna_param = kwargs.pop('x3dna_param', True)
self.exe['xdna_ensemble'] = which(x3dna_exe_name)
if self.exe['xdna_ensemble'] is None:
errmsg = "X3DNA binary {x3dna_exe_name!r} not found.".format(**vars())
logger.fatal(errmsg)
logger.fatal("%(x3dna_exe_name)r must be on the PATH or provided as keyword argument 'executable'.",
vars())
raise OSError(errno.ENOENT, errmsg)
x3dnapath = os.path.dirname(self.exe['xdna_ensemble'])
self.logfile = kwargs.pop("logfile", "bp_step.par")
if self.x3dna_param is False:
self.template = textwrap.dedent("""x3dna_ensemble analyze -b 355d.bps --one %(filename)r """)
else:
self.template = textwrap.dedent("""find_pair -s %(filename)r stdout |analyze stdin """)
# sanity checks
for program, path in self.exe.items():
if path is None or which(path) is None:
logger.error("Executable %(program)r not found, should have been %(path)r.",
vars())
# results
self.profiles = OrderedDict()
def run(self, **kwargs):
"""Run X3DNA on the input file."""
inpname = kwargs.pop("inpfile", None)
outname = kwargs.pop("outfile", self.logfile)
x3dnaargs = vars(self).copy()
x3dnaargs.update(kwargs)
x3dna_param = kwargs.pop('x3dna_param', self.x3dna_param)
inp = self.template % x3dnaargs
if inpname:
with open(inpname, "w") as f:
f.write(inp)
logger.debug("Wrote X3DNA input file %r for inspection", inpname)
logger.info("Starting X3DNA on %(filename)r (trajectory: %(dcd)r)", x3dnaargs)
logger.debug("%s", self.exe['xdna_ensemble'])
with open(outname, "w") as output:
x3dna = subprocess.call([inp], shell=True)
with open(outname, "r") as output:
# X3DNA is not very good at setting returncodes so check ourselves
for line in output:
if line.strip().startswith(('*** ERROR ***', 'ERROR')):
x3dna.returncode = 255
break
if x3dna.bit_length != 0:
logger.fatal("X3DNA Failure (%d). Check output %r", x3dna.bit_length, outname)
logger.info("X3DNA finished: output file %(outname)r", vars())
def collect(self, **kwargs):
"""Parse the output from a X3DNA run into numpy recarrays.
Can deal with outputs containing multiple frames.
The method saves the result as :attr:`X3DNA.profiles`, a dictionary
indexed by the frame number. Each entry is a
:class:`np.recarray`.
If the keyword `outdir` is supplied (e.g. ".") then each profile is
saved to a gzipped data file.
Parameters
----------
run : str, int (optional
identifier, free form [1]
outdir : str (optional)
save output data under `outdir`/`run` if set to any other
value but ``None`` [``None``]
"""
# Shear Stretch Stagger Buckle Prop-Tw Opening Shift Slide Rise Tilt Roll Twist
#0123456789.0123456789.0123456789.0123456789.0123456789.0123456789.123456789.123456789.123456789.123456789.123456789.123456789.123456789.
# 11 22 33 44
#T-A -0.033 -0.176 0.158 -12.177 -8.979 1.440 0.000 0.000 0.000 0.000 0.000 0.000
#C-G -0.529 0.122 -0.002 -7.983 -10.083 -0.091 -0.911 1.375 3.213 -0.766 -4.065 41.492
# only parse bp_step.par
x3dna_output = kwargs.pop("x3dnaout", self.logfile)
run = kwargs.pop("run", 1) # id number
outdir = kwargs.pop("outdir", os.path.curdir)
logger.info("Collecting X3DNA profiles for run with id %s", run)
length = 1 # length of trajectory --- is this really needed?? No... just for info
if '*' in self.filename:
import glob
filenames = glob.glob(self.filename)
length = len(filenames)
if length == 0:
logger.error("Glob pattern %r did not find any files.", self.filename)
raise ValueError("Glob pattern {0!r} did not find any files.".format(self.filename))
logger.info("Found %d input files based on glob pattern %s", length, self.filename)
# one recarray for each frame, indexed by frame number
self.profiles = OrderedDict()
logger.info("Run %s: Reading %d X3DNA profiles from %r", run, length, x3dna_output)
x3dna_profile_no = 0
records = []
with open(x3dna_output, "r") as x3dna:
read_data = False
for line in x3dna:
line = line.rstrip() # preserve columns (FORTRAN output...)
if self.x3dna_param is False:
if line.startswith("# Shear"):
read_data = True
logger.debug("Started reading data")
fields = line.split()
x3dna_profile_no = int(1) # useless int value code based off hole plugin
records = []
continue
if read_data:
if len(line.strip()) != 0:
try:
Sequence, Shear, Stretch, Stagger, Buckle, Propeller, Opening, Shift, Slide, Rise, \
Tilt, Roll, Twist = line.split()
except:
logger.critical("Run %d: Problem parsing line %r", run, line.strip())
logger.exception("Check input file %r.", x3dna_output)
raise
records.append(
[float(Shear), float(Stretch), float(Stagger), float(Buckle), float(Propeller),
float(Opening), float(Shift), float(Slide), float(Rise), float(Tilt), float(Roll),
float(Twist)])
continue
else:
# end of records (empty line)
read_data = False
else:
if line.startswith("# Shift"):
read_data = True
logger.debug("Started reading data")
fields = line.split()
x3dna_profile_no = int(1) # useless int value code based off hole plugin
records = []
continue
if read_data:
if len(line.strip()) != 0:
try:
Sequence, Shift, Slide, Rise, Tilt, Roll, Twist = line.split()
except:
logger.critical("Run %d: Problem parsing line %r", run, line.strip())
logger.exception("Check input file %r.", x3dna_output)
raise
records.append(
[float(Shift), float(Slide), float(Rise), float(Tilt), float(Roll), float(Twist)])
continue
else:
# end of records (empty line)
read_data = False
if self.x3dna_param is False:
frame_x3dna_output = np.rec.fromrecords(records, formats="f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8",
names="Shear,Stretch,Stagger,Buckle,Propeller,Opening,"
"Shift,Slide,Rise,Tilt,Roll,Twist")
else:
frame_x3dna_output = np.rec.fromrecords(records, formats="f8,f8,f8,f8,f8,f8",
names="Shift,Slide,Rise,Tilt,Roll,Twist")
# store the profile
self.profiles[x3dna_profile_no] = frame_x3dna_output
logger.debug("Collected X3DNA profile for frame %d (%d datapoints)",
x3dna_profile_no, len(frame_x3dna_output))
# save a profile for each frame (for debugging and scripted processing)
# a tmp folder for each trajectory
if outdir is not None:
rundir = os.path.join(outdir, "run_" + str(run))
os.system("rm -f tmp*.out")
if not os.path.exists(rundir):
os.makedirs(rundir)
frame_x3dna_txt = os.path.join(rundir, "bp_step_{0!s}_{1:04d}.dat.gz".format(run, x3dna_profile_no))
np.savetxt(frame_x3dna_txt, frame_x3dna_output)
logger.debug("Finished with frame %d, saved as %r", x3dna_profile_no, frame_x3dna_txt)
# if we get here then we haven't found anything interesting
if len(self.profiles) == length:
logger.info("Collected X3DNA profiles for %d frames", len(self.profiles))
else:
logger.warn("Missing data: Found %d X3DNA profiles from %d frames.", len(self.profiles), length)
def __del__(self):
for f in self.tempfiles:
try:
os.unlink(f)
except OSError:
pass
for d in self.tempdirs:
shutil.rmtree(d, ignore_errors=True)
class X3DNAtraj(BaseX3DNA):
"""Analyze all frames in a trajectory.
The :class:`X3DNA` class provides a direct interface to X3DNA. X3DNA itself
has limited support for analysing trajectories but cannot deal with all the
trajectory formats understood by MDAnalysis. This class can take any
universe and feed it to X3DNA. By default it sequentially creates a PDB for
each frame and runs X3DNA on the frame.
"""
def __init__(self, universe, **kwargs):
"""Set up the class.
Parameters
----------
universe : Universe
The input trajectory as part of a
:class:`~MDAnalysis.core.universe.Universe`; the trajectory is
converted to a sequence of PDB files and X3DNA is run on each
individual file. (Use the `start`, `stop`, and `step` keywords
to slice the trajectory.)
selection : str (optional)
MDAnalysis selection string (default: "nucleic") to select the
atoms that should be analyzed.
start : int (optional)
stop : int (optional)
step : int (optional)
frame indices to slice the trajectory as
``universe.trajectory[start, stop, step]``; by default, the whole
trajectory is analyzed.
x3dna_param : bool (optional)
indicates whether stacked bases or stacked base-pairs will be
analyzed. ``True`` is bases and ``False`` is stacked base-pairs
[Default is ``True``].
kwargs : keyword arguments (optional)
All other keywords are passed on to :class:`X3DNA` (see there
for description).
.. SeeAlso:: :class:`X3DNA`
"""
self.universe = universe
self.selection = kwargs.pop("selection", "nucleic")
self.x3dna_param = kwargs.pop('x3dna_param', True)
self.start = kwargs.pop('start', None)
self.stop = kwargs.pop('stop', None)
self.step = kwargs.pop('step', None)
self.x3dna_kwargs = kwargs
# processing
def run(self, **kwargs):
"""Run X3DNA on the whole trajectory and collect profiles.
Keyword arguments `start`, `stop`, and `step` can be used to only
analyse part of the trajectory. The defaults are the values provided to
the class constructor.
"""
start = kwargs.pop('start', self.start)
stop = kwargs.pop('stop', self.stop)
step = kwargs.pop('step', self.step)
x3dna_param = kwargs.pop('x3dna_param', self.x3dna_param)
x3dna_kw = self.x3dna_kwargs.copy()
x3dna_kw.update(kwargs)
profiles = OrderedDict()
nucleic = self.universe.select_atoms(self.selection)
for ts in self.universe.trajectory[start:stop:step]:
logger.info("X3DNA analysis frame %4d ", ts.frame)
fd, pdbfile = tempfile.mkstemp(suffix=".pdb")
os.close(fd)
nucleic.write(pdbfile)
os.system("find_pair {0!s} 355d.bps".format(pdbfile))
try:
nucleic.write(pdbfile)
x3dna_profiles = self.run_x3dna(pdbfile, **x3dna_kw)
finally:
try:
os.unlink(pdbfile)
except OSError:
pass
if len(x3dna_profiles) != 1:
err_msg = "Got {0} profiles ({1}) --- should be 1 (time step {2})".format(
len(x3dna_profiles), x3dna_profiles.keys(), ts)
logger.error(err_msg)
warnings.warn(err_msg)
profiles[ts.frame] = x3dna_profiles.values()[0]
self.profiles = profiles
def run_x3dna(self, pdbfile, **kwargs):
"""Run X3DNA on a single PDB file `pdbfile`."""
kwargs['x3dna_param'] = self.x3dna_param
H = X3DNA(pdbfile, **kwargs)
H.run()
H.collect()
return H.profiles
|
alejob/mdanalysis
|
package/MDAnalysis/analysis/legacy/x3dna.py
|
Python
|
gpl-2.0
| 35,158
|
[
"MDAnalysis"
] |
60cdedc4659e98d39307cd6b2c293af65e0665838f322f6c9c0d52527d8ec44f
|
from property import *
import nest
import numpy.random as random
# Neuron parameters
iaf_neuronparams = {'E_L': -70., # Resting membrane potential in mV
'V_th': -50., # Spike threshold in mV
'V_reset': -67., # Reset membrane potential after a spike in mV
'C_m': 2., # Capacity of the membrane in pF
't_ref': 2., # Duration of refractory period (V_m = V_reset) in ms
'V_m': -60., # Membrane potential in mV at start
'tau_syn_ex': 1., # Time constant of postsynaptic excitatory currents in ms
'tau_syn_in': 1.33} # Time constant of postsynaptic inhibitory currents in ms
# Synapse common parameters
STDP_synapseparams = {
'alpha': random.normal(0.5, 5.0), # Asymmetry parameter (scales depressing increments as alpha*lambda)
'lambda': 0.5 # Step size
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': random.uniform(low=1.0, high=1.3), # Distribution of delay values for connections
'weight': w_Glu, # Weight (power) of synapse
'Wmax': 20.}, **STDP_synapseparams) # Maximum allowed weight
# GABA synapse
STDP_synparams_GABA = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_GABA,
'Wmax': -20.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_ACh,
'Wmax': 20.}, **STDP_synapseparams)
# Dopamine excitatory synapse
DOPA_synparams_ex = dict({'delay': 1.,
'weight': w_DA_ex,
'Wmax': 100.})
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'delay': 1.,
'weight': w_DA_in,
'Wmax': -100.})
# Dictionary of synapses with keys and their parameters
synapses = {GABA: (gaba_synapse, w_GABA ),
Glu: (glu_synapse, w_Glu ),
ACh: (ach_synapse, w_ACh ),
DA_ex: (dopa_synapse_ex, w_DA_ex),
DA_in: (dopa_synapse_in, w_DA_in)
}
# Parameters for generator
static_syn = {
'weight': w_Glu * 5,
'delay': pg_delay
}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
|
vitaliykomarov/NEUCOGAR
|
nest/dopamine/integrated/scripts/parameters.py
|
Python
|
gpl-2.0
| 2,915
|
[
"NEURON"
] |
f0063e4082ab08fa8385c5627b032b1d65b70186ae7cf3a4a2a783ddbc2cd7cb
|
# -*- coding: utf-8 -*-
# PyNNLess -- Yet Another PyNN Abstraction Layer
# Copyright (C) 2015 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Contains the classes which allows to easily build PyNNLess networks without
having to manually fiddle arround with a dictionary of arrays of dictionaries.
"""
import pynnless_exceptions as exceptions
import pynnless_constants as const
import pynnless_utils as utils
class Population(dict):
"""
Container for the data which is used to represent a neuron population. The
Population class is a simple dictionary with some convenience methods and
validations.
"""
def __init__(self, data={}, count=1,
_type=const.TYPE_IF_COND_EXP, params={}, record=[]):
"""
Constructor of a neuron population instance.
:param data: dictionary the data may be copied from.
:param count: Number of neurons in the population
:param _type: Type of the neuron
:param record: Variables to be recorded
:param params: Neuron population parameters
"""
utils.init_key(self, data, "count", count, int)
utils.init_key(self, data, "type", _type)
utils.init_key(self, data, "params", params)
utils.init_key(self, data, "record", record)
self._canonicalize()
self._validate()
def _validate(self):
"""
Internally used to ensure the entries have correct values.
"""
if (self["count"] <= 0):
raise exceptions.PyNNLessException("Invalid population size: "
+ str(self["count"]))
if (not self["type"] in const.TYPES):
raise exceptions.PyNNLessException("Invalid neuron type '"
+ str(self["type"]) + "' supported are " + str(const.TYPES))
if len(self["params"]) > 1 and len(self["params"]) != self["count"]:
raise exceptions.PyNNLessException("Population parameter list " +
"must either have exactly one entry (shared by all neurons " +
"in the population) or exactly \"count\" entries.")
@staticmethod
def canonicalize_record(record):
"""
Makes sure the "record" signal list is indeed a list, is sorted and
contains no double entries.
"""
if isinstance(record, str):
record = [record]
else:
record = list(record)
record = list(set(record))
return record
def _canonicalize(self):
"""
Internal function, makes sure the "record" list is indeed a list, is
sorted and contains no double entries. Converts "params" to a list if
it is none.
"""
self["record"] = self.canonicalize_record(self["record"])
if not isinstance(self["params"], list):
self["params"] = [self["params"]]
return self
def record(self, signal):
"""
Adds the given signal to the list of recorded signals.
"""
self._canonicalize()
self["record"].append(signal)
return self._canonicalize()
def record_spikes(self):
"""
Adds the SIG_SPIKES signal to the list of recorded signals.
"""
return self.record(const.SIG_SPIKES)
class SourcePopulation(Population):
"""
Population of spike sources.
"""
def __init__(self, data={}, count=1, spike_times=[], record=[]):
# Convert spike_time lists to a list of parameters
if len(spike_times) > 0 and isinstance(spike_times[0], list):
params = [{"spike_times": t} for t in spike_times]
else:
params = {"spike_times": spike_times}
Population.__init__(self, data, count, const.TYPE_SOURCE, params,
record)
class NeuronPopulation(Population):
def record_ge(self):
"""
Adds the SIG_GE signal to the list of recorded signals, triggers
recording of the excitatory channel conductivity.
"""
return self.record(const.SIG_GE)
def record_gi(self):
"""
Adds the SIG_GI signal to the list of recorded signals, triggers
recording of the inhibitory channel conductivity.
"""
return self.record(const.SIG_GI)
def record_v(self):
"""
Adds the SIG_V signal to the list of recorded signals, triggers
recording of the neuron membrane potential.
"""
return self.record(const.SIG_V)
class IfCondExpPopulation(NeuronPopulation):
def __init__(self, data={}, count=1, params={}, record=[]):
NeuronPopulation.__init__(self, data, count, const.TYPE_IF_COND_EXP,
params, record)
class AdExPopulation(NeuronPopulation):
def __init__(self, data={}, count=1, params={}, record=[]):
NeuronPopulation.__init__(self, data, count, const.TYPE_AD_EX, params,
record)
class Network(dict):
"""
Represents a spiking neural network. This class merly is a dictionary
containing a "populations" and an "connections" entry.
"""
def __init__(self, data={}, populations=[], connections=[]):
"""
Constructor of the Network class, either copies the given data object or
initializes the "populations" and "connections" with the given elements.
:param data: another Network dictionary from which entries should be
copied.
:param populations: array of population descriptors.
:param connections: array of connection descriptors.
"""
utils.init_key(self, data, "populations", populations)
utils.init_key(self, data, "connections", connections)
def add_population(self, data={}, count=1, _type=const.TYPE_IF_COND_EXP,
params={}, record=[]):
self["populations"].append(
Population(data, count, _type, params, record))
return self
def add_populations(self, ps):
self["populations"] = self["populations"] + ps
return self
def add_source(self, spike_times=[]):
self["populations"].append(SourcePopulation(spike_times=spike_times))
return self
def add_neuron(self, params={}, _type=const.TYPE_IF_COND_EXP, record=[]):
return self.add_population(params=params, _type=_type, record=record)
def add_connection(self, src, dst, weight=0.1, delay=0.0):
self["connections"].append((src, dst, weight, delay))
return self
def add_connections(self, cs):
self["connections"] = self["connections"] + cs
return self
|
hbp-sanncs/pynnless
|
pynnless/pynnless_builder.py
|
Python
|
gpl-3.0
| 7,191
|
[
"NEURON"
] |
bba92a78cb39621f03cbdf51a55531aa0cd23ce71349c4ddc0621f8a813301ff
|
"""
Compute saddle-point integrals over trajectories traveling on adiabataic
potentials
This currently uses first-order saddle point.
"""
import numpy as np
import nomad.math.constants as constants
import nomad.core.glbl as glbl
import nomad.compiled.nuclear_gaussian_ccs as nuclear
# Let propagator know if we need data at centroids to propagate
require_centroids = False
# Determines the Hamiltonian symmetry
hermitian = True
# returns basis in which matrix elements are evaluated
basis = 'gaussian'
#cache previous values of theta, ensure continuity
theta_cache = dict()
# this is mostly to allow for consistent conventions: which
# diabat is the "excited" state, which is "ground" state.
gs = 0
es = 1
def v_integral(traj1, traj2, kecoef, nuc_ovrlp, elec_ovrlp):
"""Returns potential coupling matrix element between two trajectories."""
# get the linear combinations corresponding to the adiabatic states
nst = traj1.nstates
v_mat = np.zeros((nst,nst),dtype=complex)
# adiabatic states in diabatic basis -- cross terms between orthogonal
# diabatic states are zero
for i in range(glbl.interface.ham.nterms):
[s1,s2] = glbl.interface.ham.stalbl[i,:]-1
v_term = complex(1.,0.) * glbl.interface.ham.coe[i]
for q in range(len(glbl.interface.ham.order[i])):
qi = glbl.interface.ham.mode[i][q]
v_term *= nuclear.qn_integral(glbl.interface.ham.order[i][q],
traj1.widths()[qi],traj1.x()[qi],traj1.p()[qi],
traj2.widths()[qi],traj2.x()[qi],traj2.p()[qi])
v_mat[s1,s2] += v_term
# Fill in the upper-triangle
v_mat += v_mat.T - np.diag(v_mat.diagonal())
return np.dot(np.dot(phi(traj1), v_mat), phi(traj2)) * nuc_ovrlp
def rot_mat(theta):
"""Returns the adiabatic-diabatic rotation matrix for a given value of
theta"""
global gs, es
if gs == 0:
return np.array([[ np.cos(theta), -np.sin(theta)],
[ np.sin(theta), np.cos(theta)]])
else:
return np.array([[-np.sin(theta), np.cos(theta)],
[ np.cos(theta), np.sin(theta)]])
def theta(traj):
"""Returns to the adiabatic-diabatic rotation angle theta.
Choose theta to be consistent with diabatic-adiabatic transformation
matrix, which itself is chosen to have a phase resulting in a slowly
varying value of of theta.
"""
global theta_cache, gs, es
# can also run the trivial case of a single state
if traj.nstates == 1:
return 0.
hmat = traj.pes.get_data('diabat_pot')
h12 = hmat[0,1]
de = hmat[es,es]-hmat[gs,gs]
if abs(de) < constants.fpzero:
sgn = np.sign(de)
if sgn == 0.:
sgn = 1
de = sgn * constants.fpzero
ang = 0.5*np.arctan2(2.*h12,de)
# check the cached value and shift if necessary.
pi_mult = [0,-1.,1.]
# if not in cache, return current value
if traj.label in theta_cache:
dif_vec = [abs(ang + pi_mult[i]*np.pi - theta_cache[traj.label])
for i in range(len(pi_mult))]
shft = dif_vec.index(min(dif_vec))
if shft != 0:
ang += pi_mult[shft]*np.pi
theta_cache[traj.label] = ang
#print("traj="+str(traj.label)+" theta="+str(ang)+"\n")
return ang
def phi(traj):
"""Returns the transformation matrix using the rotation angle.
Should be indentical to the dat_mat in the vibronic interface"""
# can also run the trivial case of a single state
if traj.nstates == 1:
return np.array([1.], dtype=float)
angle = theta(traj)
phi_mat = rot_mat(angle)
return phi_mat[:,traj.state]
|
mschuurman/FMSpy
|
nomad/integrals/mca_vibronic.py
|
Python
|
lgpl-3.0
| 3,722
|
[
"Gaussian"
] |
7ef9d8c39bdb9e4254a2219b0e96299da4ef803b2d6fbbb0ae9ea384c8a4c267
|
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute for Medical Research, Heidelberg, Germany
# Authors: Sven Dorkenwald, Philipp Schubert, Joergen Kornfeld
from collections import Counter
import cPickle as pkl
import glob
import numpy as np
import os
import re
import shutil
import scipy.spatial
from multiprocessing.pool import ThreadPool
import networkx as nx
from scipy import spatial
import sys
from .segmentation import SegmentationObject
from .utils import knossos_ml_from_sso, colorcode_vertices, \
colorcode_vertices_color, \
knossos_ml_from_svixs, subfold_from_ix, subfold_from_ix_SSO
from ..processing.mesh_utils import write_mesh2kzip, merge_someshs
from ..processing.rendering import render_sampled_sso, comp_window, \
multi_render_sampled_svidlist, render_sso_coords
from ..processing.graphs import split_glia, split_subcc, create_mst_skeleton
from ..processing.general import single_conn_comp_img
from ..handler.basics import write_txt2kzip, get_filepaths_from_dir, safe_copy, \
coordpath2anno, load_pkl2obj, write_obj2pkl, flatten_list, chunkify
from ..handler.compression import AttributeDict, MeshDict
from ..config import parser
import segmentation
import super_segmentation_helper as ssh
import skel_based_classifier as sbc
from .segmentation_helper import predict_sos_views
from knossos_utils import skeleton, knossosdataset
from knossos_utils.skeleton_utils import write_skeleton, load_skeleton
import warnings
from ..processing.general import timeit
import time
import seaborn as sns
try:
from knossos_utils import mergelist_tools
except ImportError:
from knossos_utils import mergelist_tools_fallback as mergelist_tools
skeletopyze_available = False
attempted_skeletopyze_import = False
try:
import skeletopyze
skeletopyze_available = True
except:
skeletopyze_available = False
# print "skeletopyze not found - you won't be able to compute skeletons. " \
# "Install skeletopyze from https://github.com/funkey/skeletopyze"
from syconnmp import qsub_utils as qu
from syconnmp import shared_mem as sm
script_folder = os.path.abspath(os.path.dirname(__file__) + "/../qsub_scripts/")
try:
default_wd_available = True
from ..config.default_wd import wd
except:
default_wd_available = False
class SuperSegmentationDataset(object):
def __init__(self, working_dir=None, version=None, version_dict=None,
sv_mapping=None, scaling=None, config=None):
"""
Parameters
----------
working_dir : str
version : str
version_dict : dict
sv_mapping : dict or str
scaling : tuple
"""
self.ssv_dict = {}
self.mapping_dict = {}
self.reversed_mapping_dict = {}
self._id_changer = []
self._ssv_ids = None
self._config = config
if working_dir is None:
if default_wd_available:
self._working_dir = wd
else:
raise Exception("No working directory (wd) specified in config")
else:
self._working_dir = working_dir
if scaling is None:
try:
self._scaling = \
np.array(self.config.entries["Dataset"]["scaling"])
except:
self._scaling = np.array([1, 1, 1])
else:
self._scaling = scaling
if version is None:
try:
self._version = self.config.entries["Versions"][self.type]
except:
raise Exception("unclear value for version")
elif version == "new":
other_datasets = glob.glob(self.working_dir + "/%s_*" % self.type)
max_version = -1
for other_dataset in other_datasets:
other_version = \
int(re.findall("[\d]+",
os.path.basename(other_dataset))[-1])
if max_version < other_version:
max_version = other_version
self._version = max_version + 1
else:
self._version = version
if version_dict is None:
try:
self.version_dict = self.config.entries["Versions"]
except:
raise Exception("No version dict specified in config")
else:
if isinstance(version_dict, dict):
self.version_dict = version_dict
elif isinstance(version_dict, str) and version_dict == "load":
if self.version_dict_exists:
self.load_version_dict()
else:
raise Exception("No version dict specified in config")
if not os.path.exists(self.path):
os.makedirs(self.path)
if sv_mapping is not None:
self.apply_mergelist(sv_mapping)
@property
def type(self):
return "ssv"
@property
def scaling(self):
return self._scaling
@property
def working_dir(self):
return self._working_dir
@property
def config(self):
if self._config is None:
self._config = parser.Config(self.working_dir)
return self._config
@property
def path(self):
return "%s/ssv_%s/" % (self._working_dir, self.version)
@property
def version(self):
return str(self._version)
@property
def version_dict_path(self):
return self.path + "/version_dict.pkl"
@property
def mapping_dict_exists(self):
return os.path.exists(self.mapping_dict_path)
@property
def reversed_mapping_dict_exists(self):
return os.path.exists(self.reversed_mapping_dict_path)
@property
def mapping_dict_path(self):
return self.path + "/mapping_dict.pkl"
@property
def reversed_mapping_dict_path(self):
return self.path + "/reversed_mapping_dict.pkl"
@property
def id_changer_path(self):
return self.path + "/id_changer.npy"
@property
def version_dict_exists(self):
return os.path.exists(self.version_dict_path)
@property
def id_changer_exists(self):
return os.path.exists(self.id_changer_path)
@property
def ssv_ids(self):
if self._ssv_ids is None:
if len(self.mapping_dict) > 0:
return self.mapping_dict.keys()
elif len(self.ssv_dict) > 0:
return self.ssv_dict.keys()
elif self.mapping_dict_exists:
self.load_mapping_dict()
return self.mapping_dict.keys()
elif os.path.exists(self.path + "/ids.npy"):
self._ssv_ids = np.load(self.path + "/ids.npy")
return self._ssv_ids
else:
paths = glob.glob(self.path + "/so_storage/*/*/*/")
self._ssv_ids = np.array([int(os.path.basename(p.strip("/")))
for p in paths], dtype=np.int)
return self._ssv_ids
else:
return self._ssv_ids
@property
def ssvs(self):
ix = 0
tot_nb_ssvs = len(self.ssv_ids)
while ix < tot_nb_ssvs:
yield self.get_super_segmentation_object(self.ssv_ids[ix])
ix += 1
@property
def id_changer(self):
if len(self._id_changer) == 0:
self.load_id_changer()
return self._id_changer
def load_cached_data(self, name):
if os.path.exists(self.path + name + "s.npy"):
return np.load(self.path + name + "s.npy")
def sv_id_to_ssv_id(self, sv_id):
return self.id_changer[sv_id]
def get_segmentationdataset(self, obj_type):
assert obj_type in self.version_dict
return segmentation.SegmentationDataset(obj_type,
version=self.version_dict[
obj_type],
working_dir=self.working_dir)
def apply_mergelist(self, sv_mapping):
if sv_mapping is not None:
assert "sv" in self.version_dict
if isinstance(sv_mapping, dict):
pass
elif isinstance(sv_mapping, str):
with open(sv_mapping, "r") as f:
sv_mapping = mergelist_tools. \
subobject_map_from_mergelist(f.read())
else:
raise Exception("sv_mapping has unknown type")
self.reversed_mapping_dict = sv_mapping
for sv_id in sv_mapping.values():
self.mapping_dict[sv_id] = []
self._id_changer = np.ones(np.max(sv_mapping.keys()) + 1,
dtype=np.int) * (-1)
for sv_id in sv_mapping.keys():
self.mapping_dict[sv_mapping[sv_id]].append(sv_id)
self._id_changer[sv_id] = sv_mapping[sv_id]
self.save_dataset_shallow()
def get_super_segmentation_object(self, obj_id, new_mapping=False,
caching=True, create=False):
if new_mapping:
sso = SuperSegmentationObject(obj_id,
self.version,
self.version_dict,
self.working_dir,
create=create,
sv_ids=self.mapping_dict[obj_id],
scaling=self.scaling,
object_caching=caching,
voxel_caching=caching,
mesh_cashing=caching,
view_caching=caching)
else:
sso = SuperSegmentationObject(obj_id,
self.version,
self.version_dict,
self.working_dir,
create=create,
scaling=self.scaling,
object_caching=caching,
voxel_caching=caching,
mesh_cashing=caching,
view_caching=caching)
return sso
def save_dataset_shallow(self):
self.save_version_dict()
self.save_mapping_dict()
self.save_id_changer()
def save_dataset_deep(self, extract_only=False, attr_keys=(), stride=1000,
qsub_pe=None, qsub_queue=None, nb_cpus=1,
n_max_co_processes=None):
self.save_dataset_shallow()
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir, extract_only, attr_keys])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.write_super_segmentation_dataset_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"write_super_segmentation_dataset",
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder,
n_cores=nb_cpus,
n_max_co_processes=n_max_co_processes)
out_files = glob.glob(path_to_out + "/*")
results = []
for out_file in out_files:
with open(out_file) as f:
results.append(pkl.load(f))
else:
raise Exception("QSUB not available")
attr_dict = {}
for this_attr_dict in results:
for attribute in this_attr_dict.keys():
if not attribute in attr_dict:
attr_dict[attribute] = []
attr_dict[attribute] += this_attr_dict[attribute]
if not self.mapping_dict_exists:
self.mapping_dict = dict(zip(attr_dict["id"], attr_dict["sv"]))
self.save_dataset_shallow()
for attribute in attr_dict.keys():
if extract_only:
np.save(self.path + "/%ss_sel.npy" % attribute,
attr_dict[attribute])
else:
np.save(self.path + "/%ss.npy" % attribute,
attr_dict[attribute])
def export_to_knossosdataset(self, kd, stride=1000, qsub_pe=None,
qsub_queue=None, nb_cpus=10):
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir, kd.knossos_path, nb_cpus])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(ssh.export_to_knossosdataset_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"export_to_knossosdataset",
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def convert_knossosdataset(self, sv_kd_path, ssv_kd_path,
stride=256, qsub_pe=None, qsub_queue=None,
nb_cpus=None):
self.save_dataset_shallow()
sv_kd = knossosdataset.KnossosDataset()
sv_kd.initialize_from_knossos_path(sv_kd_path)
if not os.path.exists(ssv_kd_path):
ssv_kd = knossosdataset.KnossosDataset()
ssv_kd.initialize_without_conf(ssv_kd_path, sv_kd.boundary,
sv_kd.scale,
sv_kd.experiment_name,
mags=[1])
size = np.ones(3, dtype=np.int) * stride
multi_params = []
offsets = []
for x in range(0, sv_kd.boundary[0], stride):
for y in range(0, sv_kd.boundary[1], stride):
for z in range(0, sv_kd.boundary[2], stride):
offsets.append([x, y, z])
if len(offsets) >= 20:
multi_params.append([self.version, self.version_dict,
self.working_dir, nb_cpus,
sv_kd_path, ssv_kd_path, offsets,
size])
offsets = []
if len(offsets) > 0:
multi_params.append([self.version, self.version_dict,
self.working_dir, nb_cpus,
sv_kd_path, ssv_kd_path, offsets,
size])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(ssh.convert_knossosdataset_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"convert_knossosdataset",
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def aggregate_segmentation_object_mappings(self, obj_types,
stride=1000, qsub_pe=None,
qsub_queue=None, nb_cpus=1):
for obj_type in obj_types:
assert obj_type in self.version_dict
assert "sv" in self.version_dict
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in
xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir, obj_types])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.aggregate_segmentation_object_mappings_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"aggregate_segmentation_object_mappings",
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def apply_mapping_decisions(self, obj_types, stride=1000, qsub_pe=None,
qsub_queue=None, nb_cpus=1):
for obj_type in obj_types:
assert obj_type in self.version_dict
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in
xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir, obj_types])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.apply_mapping_decisions_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"apply_mapping_decisions",
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def reskeletonize_objects(self, stride=200, small=True, big=True,
qsub_pe=None, qsub_queue=None, nb_cpus=1,
n_max_co_processes=None):
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in
xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir])
if small:
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.reskeletonize_objects_small_ones_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"reskeletonize_objects_small_ones",
n_cores=nb_cpus,
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder,
n_max_co_processes=
n_max_co_processes)
else:
raise Exception("QSUB not available")
if big:
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.reskeletonize_objects_big_ones_thread,
multi_params, nb_cpus=1)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"reskeletonize_objects_big_ones",
n_cores=10,
n_max_co_processes=int(n_max_co_processes/10*nb_cpus),
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def export_skeletons(self, obj_types, apply_mapping=True, stride=1000,
qsub_pe=None, qsub_queue=None, nb_cpus=1):
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in
xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir, obj_types, apply_mapping])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.reskeletonize_objects_small_ones_thread,
multi_params, nb_cpus=nb_cpus)
no_skel_cnt = np.sum(results)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"export_skeletons",
n_cores=nb_cpus,
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
out_files = glob.glob(path_to_out + "/*")
no_skel_cnt = 0
for out_file in out_files:
with open(out_file) as f:
no_skel_cnt += np.sum(pkl.load(f))
else:
raise Exception("QSUB not available")
print "N no skeletons: %d" % no_skel_cnt
def associate_objs_with_skel_nodes(self, obj_types, stride=1000,
qsub_pe=None, qsub_queue=None,
nb_cpus=1):
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in
xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir, obj_types])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.associate_objs_with_skel_nodes_thread,
multi_params, nb_cpus=nb_cpus)
no_skel_cnt = np.sum(results)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"associate_objs_with_skel_nodes",
n_cores=nb_cpus,
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def predict_axoness(self, stride=1000, qsub_pe=None, qsub_queue=None,
nb_cpus=1):
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in
xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.predict_axoness_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"predict_axoness",
n_cores=nb_cpus,
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def predict_cell_types(self, stride=1000, qsub_pe=None, qsub_queue=None,
nb_cpus=1):
multi_params = []
for ssv_id_block in [self.ssv_ids[i:i + stride]
for i in
xrange(0, len(self.ssv_ids), stride)]:
multi_params.append([ssv_id_block, self.version, self.version_dict,
self.working_dir])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess(
ssh.predict_cell_type_thread,
multi_params, nb_cpus=nb_cpus)
elif qu.__QSUB__:
path_to_out = qu.QSUB_script(multi_params,
"predict_cell_type",
n_cores=nb_cpus,
pe=qsub_pe, queue=qsub_queue,
script_folder=script_folder)
else:
raise Exception("QSUB not available")
def save_version_dict(self):
if len(self.version_dict) > 0:
write_obj2pkl(self.version_dict_path, self.version_dict)
def load_version_dict(self):
assert self.version_dict_exists
self.version_dict = load_pkl2obj(self.version_dict_path)
def save_mapping_dict(self):
if len(self.mapping_dict) > 0:
write_obj2pkl(self.mapping_dict_path, self.mapping_dict)
def save_reversed_mapping_dict(self):
if len(self.reversed_mapping_dict) > 0:
write_obj2pkl(self.reversed_mapping_dict_path,
self.reversed_mapping_dict)
def load_mapping_dict(self):
assert self.mapping_dict_exists
self.mapping_dict = load_pkl2obj(self.mapping_dict_path)
def load_reversed_mapping_dict(self):
assert self.reversed_mapping_dict_exists
self.reversed_mapping_dict = load_pkl2obj(self.reversed_mapping_dict_path)
def save_id_changer(self):
if len(self._id_changer) > 0:
np.save(self.id_changer_path, self._id_changer)
def load_id_changer(self):
assert self.id_changer_exists
self._id_changer = np.load(self.id_changer_path)
class SuperSegmentationObject(object):
def __init__(self, ssv_id, version=None, version_dict=None,
working_dir=None, create=True, sv_ids=None, scaling=None,
object_caching=True, voxel_caching=True, mesh_cashing=False,
view_caching=False, config=None, nb_cpus=1):
self.nb_cpus = nb_cpus
self._id = ssv_id
self.attr_dict = dict(mi=[], sj=[], vc=[], sv=[])
self._rep_coord = None
self._size = None
self._bounding_box = None
self._config = config
self._object_caching = object_caching
self._voxel_caching = voxel_caching
self._mesh_caching = mesh_cashing
self._view_caching = view_caching
self._objects = {}
self.skeleton = None
self._voxels = None
self._voxels_xy_downsampled = None
self._voxels_downsampled = None
self._mesh = None
self._edge_graph = None
# init mesh dicts
self._mesh = None
self._mi_mesh = None
self._sj_mesh = None
self._vc_mesh = None
self._views = None
self._dataset = None
self._weighted_graph = None
if sv_ids is not None:
self.attr_dict["sv"] = sv_ids
try:
self._scaling = np.array(scaling)
except:
print "Currently, scaling has to be set in the config"
self._scaling = np.array([1, 1, 1])
if working_dir is None:
try:
self._working_dir = wd
except:
raise Exception("No working directory (wd) specified in config")
else:
self._working_dir = working_dir
if scaling is None:
try:
self._scaling = \
np.array(self.config.entries["Dataset"]["scaling"])
except:
self._scaling = np.array([1, 1, 1])
else:
self._scaling = scaling
if version is None:
try:
self._version = self.config.entries["Versions"][self.type]
except:
raise Exception("unclear value for version")
elif version == "new":
other_datasets = glob.glob(self.working_dir + "/%s_*" % self.type)
max_version = -1
for other_dataset in other_datasets:
other_version = \
int(re.findall("[\d]+",
os.path.basename(other_dataset))[-1])
if max_version < other_version:
max_version = other_version
self._version = max_version + 1
else:
self._version = version
if version_dict is None:
try:
self.version_dict = self.config.entries["Versions"]
except:
raise Exception("No version dict specified in config")
else:
if isinstance(version_dict, dict):
self.version_dict = version_dict
# TODO @Sven: only valid for SSDS!
elif isinstance(version_dict, str) and version_dict == "load":
if self.version_dict_exists:
self.load_version_dict()
else:
raise Exception("No version dict specified in config")
if create and not os.path.exists(self.ssv_dir):
os.makedirs(self.ssv_dir)
def __hash__(self):
return hash((self.id, self.type, frozenset(self.sv_ids)))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.id == other.id and self.type == other.type and \
frozenset(self.sv_ids) == frozenset(other.sv_ids)
def __ne__(self, other):
return not self.__eq__(other)
# IMMEDIATE PARAMETERS
@property
def type(self):
return "ssv"
@property
def id(self):
return self._id
@property
def version(self):
return str(self._version)
@property
def object_caching(self):
return self._object_caching
@property
def voxel_caching(self):
return self._voxel_caching
@property
def mesh_caching(self):
return self._mesh_caching
@property
def view_caching(self):
return self._view_caching
@property
def scaling(self):
return self._scaling
@property
def dataset(self):
if self._dataset is None:
self._dataset = SuperSegmentationDataset(
working_dir=self.working_dir,
version=self.version,
scaling=self.scaling,
version_dict=self.version_dict)
return self._dataset
# PATHS
@property
def working_dir(self):
return self._working_dir
@property
def identifier(self):
return "ssv_%s" % (self.version.lstrip("_"))
@property
def ssds_dir(self):
return "%s/%s/" % (self.working_dir, self.identifier)
@property
def ssv_dir(self):
return "%s/so_storage/%s/" % (self.ssds_dir, subfold_from_ix_SSO(self.id))
@property
def attr_dict_path(self):
return self.ssv_dir + "atrr_dict.pkl"
@property
def attr_dict_path_new(self):
return self.ssv_dir + "attr_dict.pkl"
@property
def skeleton_nml_path(self):
return self.ssv_dir + "skeleton.nml"
@property
def skeleton_kzip_path(self):
return self.ssv_dir + "skeleton.k.zip"
@property
def skeleton_kzip_path_views(self):
return self.ssv_dir + "skeleton_views.k.zip"
@property
def objects_dense_kzip_path(self):
return self.ssv_dir + "objects_overlay.k.zip"
@property
def skeleton_path(self):
return self.ssv_dir + "skeleton.pkl"
@property
def skeleton_path_views(self):
return self.ssv_dir + "skeleton_views.pkl"
@property
def edgelist_path(self):
return self.ssv_dir + "edge_list.bz2"
@property
def view_path(self):
return self.ssv_dir + "views.lz4"
@property
def mesh_dc_path(self):
return self.ssv_dir + "mesh_dc.pkl"
# IDS
@property
def sv_ids(self):
return self.lookup_in_attribute_dict("sv")
@property
def sj_ids(self):
return self.lookup_in_attribute_dict("sj")
@property
def mi_ids(self):
return self.lookup_in_attribute_dict("mi")
@property
def vc_ids(self):
return self.lookup_in_attribute_dict("vc")
@property
def dense_kzip_ids(self):
return dict([("mi", 1), ("vc", 2), ("sj", 3)])
# SEGMENTATIONOBJECTS
@property
def svs(self):
return self.get_seg_objects("sv")
@property
def sjs(self):
return self.get_seg_objects("sj")
@property
def mis(self):
return self.get_seg_objects("mi")
@property
def vcs(self):
return self.get_seg_objects("vc")
# MESHES
@property
def mesh(self):
if self._mesh is None:
if not self.mesh_caching:
return self._load_obj_mesh("sv")
self._mesh = self._load_obj_mesh("sv")
return self._mesh
@property
def sj_mesh(self):
if self._sj_mesh is None:
if not self.mesh_caching:
return self._load_obj_mesh("sj")
self._sj_mesh = self._load_obj_mesh("sj")
return self._sj_mesh
@property
def vc_mesh(self):
if self._vc_mesh is None:
if not self.mesh_caching:
return self._load_obj_mesh("vc")
self._vc_mesh = self._load_obj_mesh("vc")
return self._vc_mesh
@property
def mi_mesh(self):
if self._mi_mesh is None:
if not self.mesh_caching:
return self._load_obj_mesh("mi")
self._mi_mesh = self._load_obj_mesh("mi")
return self._mi_mesh
# PROPERTIES
@property
def cell_type(self):
if self.cell_type_ratios is not None:
return np.argmax(self.cell_type_ratios)
else:
return None
@property
def cell_type_ratios(self):
return self.lookup_in_attribute_dict("cell_type_ratios")
@property
def weighted_graph(self):
if self._weighted_graph is None:
if self.skeleton is None:
self.load_skeleton()
node_scaled = self.skeleton["nodes"] * self.scaling
edge_coords = node_scaled[self.skeleton["edges"]]
weights = np.linalg.norm(edge_coords[:, 0] - edge_coords[:, 1],
axis=1)
self._weighted_graph = nx.Graph()
self._weighted_graph.add_weighted_edges_from(
np.concatenate((self.skeleton["edges"], weights[:, None]),
axis=1))
return self._weighted_graph
@property
def config(self):
if self._config is None:
self._config = parser.Config(self.working_dir)
return self._config
@property
def size(self):
if self._size is None:
self._size = self.lookup_in_attribute_dict("size")
if self._size is None:
self.calculate_size()
return self._size
@property
def bounding_box(self):
if self._bounding_box is None:
self._bounding_box = self.lookup_in_attribute_dict("bounding_box")
if self._bounding_box is None:
self.calculate_bounding_box()
return self._bounding_box
@property
def shape(self):
return self.bounding_box[1] - self.bounding_box[0]
@property
def rep_coord(self):
if self._rep_coord is None:
self._rep_coord = self.lookup_in_attribute_dict("rep_coord")
if self._rep_coord is None:
self._rep_coord = self.svs[0].rep_coord
return self._rep_coord
@property
def attr_dict_exists(self):
return os.path.isfile(self.attr_dict_path)
def mesh_exists(self, obj_type):
mesh_dc = MeshDict(self.mesh_dc_path)
return obj_type in mesh_dc
@property
def voxels(self):
if len(self.sv_ids) == 0:
return None
if self._voxels is None:
voxels = np.zeros(self.bounding_box[1] - self.bounding_box[0],
dtype=np.bool)
for sv in self.svs:
sv._voxel_caching = False
if sv.voxels_exist:
print np.sum(sv.voxels), sv.size
box = [sv.bounding_box[0] - self.bounding_box[0],
sv.bounding_box[1] - self.bounding_box[0]]
voxels[box[0][0]: box[1][0],
box[0][1]: box[1][1],
box[0][2]: box[1][2]][sv.voxels] = True
else:
print "missing voxels from %d" % sv.id
if self.voxel_caching:
self._voxels = voxels
else:
return voxels
return self._voxels
@property
def voxels_xy_downsampled(self):
if self._voxels_xy_downsampled is None:
if self.voxel_caching:
self._voxels_xy_downsampled = \
self.load_voxels_downsampled((2, 2, 1))
else:
return self.load_voxels_downsampled((2, 2, 1))
return self._voxels_xy_downsampled
def load_voxels_downsampled(self, downsampling=(2, 2, 1), nb_threads=10):
def _load_sv_voxels_thread(args):
sv_id = args[0]
sv = segmentation.SegmentationObject(sv_id,
obj_type="sv",
version=self.version_dict[
"sv"],
working_dir=self.working_dir,
config=self.config,
voxel_caching=False)
if sv.voxels_exist:
box = [np.array(sv.bounding_box[0] - self.bounding_box[0],
dtype=np.int)]
box[0] /= downsampling
size = np.array(sv.bounding_box[1] -
sv.bounding_box[0], dtype=np.float)
size = np.ceil(size.astype(np.float) /
downsampling).astype(np.int)
box.append(box[0] + size)
sv_voxels = sv.voxels
if not isinstance(sv_voxels, int):
sv_voxels = sv_voxels[::downsampling[0],
::downsampling[1],
::downsampling[2]]
voxels[box[0][0]: box[1][0],
box[0][1]: box[1][1],
box[0][2]: box[1][2]][sv_voxels] = True
downsampling = np.array(downsampling, dtype=np.int)
if len(self.sv_ids) == 0:
return None
voxel_box_size = self.bounding_box[1] - self.bounding_box[0]
voxel_box_size = voxel_box_size.astype(np.float)
voxel_box_size = np.ceil(voxel_box_size / downsampling).astype(np.int)
voxels = np.zeros(voxel_box_size, dtype=np.bool)
multi_params = []
for sv_id in self.sv_ids:
multi_params.append([sv_id])
if nb_threads > 1:
pool = ThreadPool(nb_threads)
pool.map(_load_sv_voxels_thread, multi_params)
pool.close()
pool.join()
else:
map(_load_sv_voxels_thread, multi_params)
return voxels
def get_seg_objects(self, obj_type):
if obj_type not in self._objects:
objs = []
for obj_id in self.lookup_in_attribute_dict(obj_type):
objs.append(self.get_seg_obj(obj_type, obj_id))
if self.object_caching:
self._objects[obj_type] = objs
else:
return objs
return self._objects[obj_type]
def get_seg_obj(self, obj_type, obj_id):
return segmentation.SegmentationObject(obj_id=obj_id,
obj_type=obj_type,
version=self.version_dict[
obj_type],
working_dir=self.working_dir,
create=False,
scaling=self.scaling)
def get_seg_dataset(self, obj_type):
return segmentation.SegmentationDataset(obj_type,
version_dict=self.version_dict,
version=self.version_dict[
obj_type],
scaling=self.scaling,
working_dir=self.working_dir)
def load_attr_dict(self):
try:
self.attr_dict = load_pkl2obj(self.attr_dict_path)
return 0
except (IOError, EOFError):
return -1
@property
def edge_graph(self):
if self._edge_graph is None:
self._edge_graph = self.load_graph()
return self._edge_graph
def load_graph(self):
G = nx.read_edgelist(self.edgelist_path, nodetype=int)
new_G = nx.Graph()
for e in G.edges_iter():
new_G.add_edge(self.get_seg_obj("sv", e[0]),
self.get_seg_obj("sv", e[1]))
return new_G
def load_edgelist(self):
g = self.load_graph()
return g.edges()
def _load_obj_mesh(self, obj_type="sv", rewrite=False):
if not rewrite and self.mesh_exists(obj_type) and not \
self.version == "tmp":
mesh_dc = MeshDict(self.mesh_dc_path)
ind, vert = mesh_dc[obj_type]
else:
ind, vert = merge_someshs(self.get_seg_objects(obj_type),
nb_cpus=self.nb_cpus)
if not self.version == "tmp":
mesh_dc = MeshDict(self.mesh_dc_path, read_only=False)
mesh_dc[obj_type] = [ind, vert]
mesh_dc.save2pkl()
return np.array(ind, dtype=np.int), np.array(vert, dtype=np.int)
def load_svixs(self):
if not os.path.isfile(self.edgelist_path):
warnings.warn("Edge list of SSO %d does not exist. Return empty "
"list.", RuntimeWarning)
return []
edges = self.load_edgelist()
return np.unique(np.concatenate([[a.id, b.id] for a, b in edges]))
def save_attr_dict(self):
try:
orig_dc = load_pkl2obj(self.attr_dict_path)
except IOError:
orig_dc = {}
orig_dc.update(self.attr_dict)
write_obj2pkl(orig_dc, self.attr_dict_path)
def save_attributes(self, attr_keys, attr_values):
"""
Writes attributes to attribute dict on file system. Does not care about
self.attr_dict.
Parameters
----------
sv_ix : int
label : tuple of str
label_values : tuple of items
"""
if not hasattr(attr_keys, "__len__"):
attr_keys = [attr_keys]
if not hasattr(attr_values, "__len__"):
attr_values = [attr_values]
try:
attr_dict = load_pkl2obj(self.attr_dict_path)
except IOError, e:
if not "[Errno 13] Permission denied" in str(e):
pass
else:
warnings.warn("Could not load SSO attributes to %s due to "
"missing permissions." % self.attr_dict_path,
RuntimeWarning)
attr_dict = {}
for k, v in zip(attr_keys, attr_values):
attr_dict[k] = v
try:
write_obj2pkl(self.attr_dict_path, attr_dict)
except IOError, e:
if not "[Errno 13] Permission denied" in str(e):
raise (IOError, e)
else:
warnings.warn("Could not save SSO attributes to %s due to "
"missing permissions." % self.attr_dict_path,
RuntimeWarning)
def attr_exists(self, attr_key):
return attr_key in self.attr_dict
def lookup_in_attribute_dict(self, attr_key):
if attr_key in self.attr_dict:
return self.attr_dict[attr_key]
elif len(self.attr_dict) <= 4:
if self.load_attr_dict() == -1:
return None
if attr_key in self.attr_dict:
return self.attr_dict[attr_key]
else:
return None
def calculate_size(self):
self._size = 0
for sv in self.svs:
self._size += sv.size
def calculate_bounding_box(self):
self._bounding_box = np.ones((2, 3), dtype=np.int) * np.inf
self._bounding_box[1] = 0
self._size = 0
real_sv_cnt = 0
for sv in self.svs:
if sv.voxels_exist:
real_sv_cnt += 1
sv_bb = sv.bounding_box
sv.clear_cache()
for dim in range(3):
if self._bounding_box[0, dim] > sv_bb[0, dim]:
self._bounding_box[0, dim] = sv_bb[0, dim]
if self._bounding_box[1, dim] < sv_bb[1, dim]:
self._bounding_box[1, dim] = sv_bb[1, dim]
self._size += sv.size
if real_sv_cnt > 0:
self._bounding_box = self._bounding_box.astype(np.int)
else:
self._bounding_box = np.zeros((2, 3), dtype=np.int)
def calculate_skeleton(self, size_threshold=1e20, kd=None,
coord_scaling=(8, 8, 4), plain=False, cleanup=True,
nb_threads=1):
if np.product(self.shape) < size_threshold:
# vx = self.load_voxels_downsampled(coord_scaling)
# vx = self.voxels[::coord_scaling[0],
# ::coord_scaling[1],
# ::coord_scaling[2]]
vx = self.load_voxels_downsampled(downsampling=coord_scaling)
vx = scipy.ndimage.morphology.binary_closing(
np.pad(vx, 3, mode="constant", constant_values=0), iterations=3)
vx = vx[3: -3, 3: -3, 3:-3]
if plain:
nodes, edges, diameters = \
ssh.reskeletonize_plain(vx, coord_scaling=coord_scaling)
nodes = np.array(nodes, dtype=np.int) + self.bounding_box[0]
else:
nodes, edges, diameters = \
ssh.reskeletonize_chunked(self.id, self.shape,
self.bounding_box[0],
self.scaling,
voxels=vx,
coord_scaling=coord_scaling,
nb_threads=nb_threads)
elif kd is not None:
nodes, edges, diameters = \
ssh.reskeletonize_chunked(self.id, self.shape,
self.bounding_box[0], self.scaling,
kd=kd, coord_scaling=coord_scaling,
nb_threads=nb_threads)
else:
return
nodes = np.array(nodes, dtype=np.int)
edges = np.array(edges, dtype=np.int)
diameters = np.array(diameters, dtype=np.float)
self.skeleton = {}
self.skeleton["edges"] = edges
self.skeleton["nodes"] = nodes
self.skeleton["diameters"] = diameters
if cleanup:
for i in range(2):
if len(self.skeleton["edges"]) > 2:
self.skeleton = ssh.cleanup_skeleton(self.skeleton,
coord_scaling)
def save_skeleton_to_kzip(self, dest_path=None):
try:
if self.skeleton is None:
self.load_skeleton()
a = skeleton.SkeletonAnnotation()
a.scaling = self.scaling
a.comment = "skeleton"
skel_nodes = []
for i_node in range(len(self.skeleton["nodes"])):
c = self.skeleton["nodes"][i_node]
r = self.skeleton["diameters"][i_node] / 2
skel_nodes.append(skeleton.SkeletonNode().
from_scratch(a, c[0], c[1], c[2], radius=r))
if "axoness" in self.skeleton:
skel_nodes[-1].data["axoness"] = self.skeleton["axoness"][
i_node]
if "cell_type" in self.skeleton:
skel_nodes[-1].data["cell_type"] = \
self.skeleton["cell_type"][i_node]
if "meta" in self.skeleton:
skel_nodes[-1].data["meta"] = self.skeleton["meta"][i_node]
a.addNode(skel_nodes[-1])
for edge in self.skeleton["edges"]:
a.addEdge(skel_nodes[edge[0]], skel_nodes[edge[1]])
if dest_path is None:
dest_path = self.skeleton_kzip_path
write_skeleton(dest_path, [a])
except Exception, e:
print "[SSO: %d] Could not load/save skeleton:\n%s" % (self.id, e)
def save_objects_to_kzip_sparse(self, obj_types=("sj", "mi", "vc"),
dest_path=None):
annotations = []
for obj_type in obj_types:
assert obj_type in self.attr_dict
map_ratio_key = "mapping_%s_ratios" % obj_type
if not map_ratio_key in self.attr_dict.keys():
print "%s not yet mapped. Object nodes are not written to " \
"k.zip." % obj_type
continue
overlap_ratios = np.array(self.attr_dict[map_ratio_key])
overlap_ids = np.array(self.attr_dict["mapping_%s_ids" % obj_type])
a = skeleton.SkeletonAnnotation()
a.scaling = self.scaling
a.comment = obj_type
so_objs = self.get_seg_objects(obj_type)
for so_obj in so_objs:
c = so_obj.rep_coord
# somewhat approximated from sphere volume:
r = np.power(so_obj.size / 3., 1 / 3.)
skel_node = skeleton.SkeletonNode(). \
from_scratch(a, c[0], c[1], c[2], radius=r)
skel_node.data["overlap"] = \
overlap_ratios[overlap_ids == so_obj.id][0]
skel_node.data["size"] = so_obj.size
skel_node.data["shape"] = so_obj.shape
a.addNode(skel_node)
annotations.append(a)
if dest_path is None:
dest_path = self.skeleton_kzip_path
write_skeleton(dest_path, annotations)
def save_objects_to_kzip_dense(self, obj_types):
if os.path.exists(self.objects_dense_kzip_path[:-6]):
shutil.rmtree(self.objects_dense_kzip_path[:-6])
if os.path.exists(self.objects_dense_kzip_path):
os.remove(self.objects_dense_kzip_path)
for obj_type in obj_types:
so_objs = self.get_seg_objects(obj_type)
for so_obj in so_objs:
print so_obj.id
so_obj.save_kzip(path=self.objects_dense_kzip_path,
write_id=self.dense_kzip_ids[obj_type])
def save_skeleton(self, to_kzip=True, to_object=True):
if to_object:
write_obj2pkl(self.skeleton, self.skeleton_path_views)
if to_kzip:
self.save_skeleton_to_kzip()
def load_skeleton(self):
try:
self.skeleton = load_pkl2obj(self.skeleton_path_views)
return True
except:
return False
def aggregate_segmentation_object_mappings(self, obj_types, save=False):
assert isinstance(obj_types, list)
mappings = dict((obj_type, Counter()) for obj_type in obj_types)
for sv in self.svs:
sv.load_attr_dict()
for obj_type in obj_types:
if "mapping_%s_ids" % obj_type in sv.attr_dict:
keys = sv.attr_dict["mapping_%s_ids" % obj_type]
values = sv.attr_dict["mapping_%s_ratios" % obj_type]
mappings[obj_type] += Counter(dict(zip(keys, values)))
for obj_type in obj_types:
if obj_type in mappings:
self.attr_dict["mapping_%s_ids" % obj_type] = \
mappings[obj_type].keys()
self.attr_dict["mapping_%s_ratios" % obj_type] = \
mappings[obj_type].values()
if save:
self.save_attr_dict()
def apply_mapping_decision(self, obj_type, correct_for_background=True,
lower_ratio=None, upper_ratio=None,
sizethreshold=None, save=True):
assert obj_type in self.version_dict
# self.load_attr_dict()
if not "mapping_%s_ratios" % obj_type in self.attr_dict:
print "No mapping ratios found"
return
if not "mapping_%s_ids" % obj_type in self.attr_dict:
print "no mapping ids found"
return
if lower_ratio is None:
try:
lower_ratio = self.config.entries["LowerMappingRatios"][
obj_type]
except:
raise ("Lower ratio undefined")
if upper_ratio is None:
try:
upper_ratio = self.config.entries["UpperMappingRatios"][
obj_type]
except:
print "Upper ratio undefined - 1. assumed"
upper_ratio = 1.
if sizethreshold is None:
try:
sizethreshold = self.config.entries["Sizethresholds"][obj_type]
except:
raise ("Size threshold undefined")
obj_ratios = np.array(self.attr_dict["mapping_%s_ratios" % obj_type])
if correct_for_background:
for i_so_id in range(
len(self.attr_dict["mapping_%s_ids" % obj_type])):
so_id = self.attr_dict["mapping_%s_ids" % obj_type][i_so_id]
obj_version = self.config.entries["Versions"][obj_type]
this_so = SegmentationObject(so_id, obj_type,
version=obj_version,
scaling=self.scaling,
working_dir=self.working_dir)
this_so.load_attr_dict()
if 0 in this_so.attr_dict["mapping_ids"]:
ratio_0 = this_so.attr_dict["mapping_ratios"][
this_so.attr_dict["mapping_ids"] == 0][0]
obj_ratios[i_so_id] /= (1 - ratio_0)
id_mask = obj_ratios > lower_ratio
if upper_ratio < 1.:
id_mask[obj_ratios > upper_ratio] = False
candidate_ids = \
np.array(self.attr_dict["mapping_%s_ids" % obj_type])[id_mask]
self.attr_dict[obj_type] = []
for candidate_id in candidate_ids:
obj = segmentation.SegmentationObject(candidate_id,
obj_type=obj_type,
version=self.version_dict[
obj_type],
working_dir=self.working_dir,
config=self.config)
if obj.size > sizethreshold:
self.attr_dict[obj_type].append(candidate_id)
if save:
self.save_attr_dict()
def map_cellobjects(self, obj_types=None, save=False):
if obj_types is None:
obj_types = ["mi", "sj", "vc"]
self.aggregate_segmentation_object_mappings(obj_types, save=save)
for obj_type in obj_types:
self.apply_mapping_decision(obj_type, save=save,
correct_for_background=obj_type == "sj")
def clear_cache(self):
self._objects = {}
self._voxels = None
self._voxels_xy_downsampled = None
self._mesh = None
self._views = None
self.skeleton = None
def copy2dir(self, dest_dir, safe=True):
raise ("To be tested.", NotImplementedError)
# get all files in home directory
fps = get_filepaths_from_dir(self.ssv_dir, ending="")
fnames = [os.path.split(fname)[1] for fname in fps]
# Open the file and raise an exception if it exists
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
for i in range(len(fps)):
src_filename = fps[i]
dest_filename = dest_dir + "/" + fnames[i]
try:
safe_copy(src_filename, dest_filename, safe=safe)
except Exception:
print "Skipped", fnames[i]
pass
self.load_attr_dict()
if os.path.isfile(dest_dir + "/atrr_dict.pkl"):
dest_attr_dc = load_pkl2obj(dest_dir + "/atrr_dict.pkl")
else:
dest_attr_dc = {}
dest_attr_dc.update(self.attr_dict)
self.attr_dict = dest_attr_dc
self.save_attr_dict()
def partition_cc(self, max_nb=25):
"""
Splits connected component into subgraphs.
Parameters
----------
sso : SuperSegmentationObject
max_nb : int
Number of SV per CC
Returns
-------
dict
"""
init_g = self.edge_graph
partitions = split_subcc(init_g, max_nb)
return partitions
# -------------------------------------------------------------------- VIEWS
def load_views(self, woglia=True, raw_only=False):
"""
Parameters
----------
woglia : bool
Returns
-------
list of array
Views for each SV in self.svs
"""
params = [[sv, {"woglia": woglia, "raw_only": raw_only}] for sv in self.svs]
# list of arrays
views = sm.start_multiprocess_obj("load_views", params,
nb_cpus=self.nb_cpus)
return views
def view_existence(self, woglia=True):
params = [[sv, {"woglia": woglia}] for sv in self.svs]
so_views_exist = sm.start_multiprocess_obj("views_exist", params,
nb_cpus=self.nb_cpus)
return so_views_exist
def render_views(self, add_cellobjects=False, random_processing=True,
qsub_pe=None, overwrite=False, cellobjects_only=False,
woglia=True):
if len(self.sv_ids) > 5e3:
part = self.partition_cc()
if 0:#not overwrite: # check existence of glia preds
views_exist = np.array(self.view_existence(), dtype=np.int)
print "Rendering huge SSO. %d/%d views left to process." \
% (np.sum(~views_exist), len(self.svs))
ex_dc = {}
for ii, k in enumerate(self.svs):
ex_dc[k] = views_exist[ii]
for k in part.keys():
if ex_dc[k]: # delete SO's with existing pred
del part[k]
continue
del ex_dc
else:
print "Rendering huge SSO. %d views left to process." \
% len(self.svs)
for k in part.keys():
val = part[k]
part[k] = [so.id for so in val]
params = part.values()
if random_processing:
np.random.seed(int(time.time() * 1e4 % 1e6))
np.random.shuffle(params)
if qsub_pe is None:
sm.start_multiprocess(multi_render_sampled_svidlist, params,
nb_cpus=self.nb_cpus, debug=False)
elif qu.__QSUB__:
params = chunkify(params, 700)
params = [[par, {"overwrite": overwrite,
"render_first_only": True,
"cellobjects_only": cellobjects_only}] for par in params]
qu.QSUB_script(params, "render_views", pe=qsub_pe, queue=None,
script_folder=script_folder, n_max_co_processes=100)
else:
raise Exception("QSUB not available")
else:
render_sampled_sso(self, add_cellobjects=add_cellobjects,
verbose=False, overwrite=overwrite,
cellobjects_only=cellobjects_only, woglia=woglia)
def sample_locations(self, force=False, cache=False, verbose=False):
"""
Parameters
----------
force : bool
force resampling of locations
cache : bool
Returns
-------
list of array
Sample coordinates for each SV in self.svs.
"""
if verbose:
start = time.time()
if not force and cache:
if not self.attr_exists("sample_locations"):
self.load_attr_dict()
if self.attr_exists("sample_locations"):
return self.attr_dict["sample_locations"]
else:
return self.attr_dict["sample_locations"]
params = [[sv, {"force": force}] for sv in self.svs]
# list of arrays
locs = sm.start_multiprocess_obj("sample_locations", params,
nb_cpus=self.nb_cpus)
if cache:
self.save_attributes(["sample_locations"], [locs])
if verbose:
dur = time.time() - start
print "Sampling locations from %d SVs took %0.2fs. %0.4fs/SV (in" \
"cl. read/write)" % (len(self.svs), dur, dur / len(self.svs))
return locs
# ------------------------------------------------------------------ EXPORTS
def pklskel2kzip(self):
self.load_skeleton()
es = self.skeleton["edges"]
ns = self.skeleton["nodes"]
a = skeleton.SkeletonAnnotation()
a.scaling = self.scaling
a.comment = "skeleton"
for e in es:
n0 = skeleton.SkeletonNode().from_scratch(a, ns[e[0]][0],
ns[e[0]][1], ns[e[0]][2])
n1 = skeleton.SkeletonNode().from_scratch(a, ns[e[1]][0],
ns[e[1]][1], ns[e[1]][2])
a.addNode(n0)
a.addNode(n1)
a.addEdge(n0, n1)
write_skeleton(self.skeleton_kzip_path_views, a)
def write_locations2kzip(self, dest_path=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
loc = np.concatenate(self.sample_locations())
new_anno = coordpath2anno(loc, add_edges=False)
new_anno.setComment("sample_locations")
write_skeleton(dest_path, [new_anno])
def mergelist2kzip(self, dest_path=None):
self.load_attr_dict()
kml = knossos_ml_from_sso(self)
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
write_txt2kzip(dest_path, kml, "mergelist.txt")
def mesh2kzip(self, obj_type="sv", dest_path=None, ext_color=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
if obj_type == "sv":
mesh = self.mesh
color = (130, 130, 130, 160)
elif obj_type == "sj":
mesh = self.sj_mesh
color = (int(0.849 * 255), int(0.138 * 255), int(0.133 * 255), 255)
elif obj_type == "vc":
mesh = self.vc_mesh
color = (int(0.175 * 255), int(0.585 * 255), int(0.301 * 255), 255)
elif obj_type == "mi":
mesh = self.mi_mesh
color = (0, 153, 255, 255)
else:
raise ("Given object type '%s' does not exist." % obj_type,
TypeError)
if ext_color is not None:
if ext_color == 0:
color = None
else:
color = ext_color
write_mesh2kzip(dest_path, mesh[0], mesh[1], color,
ply_fname=obj_type + ".ply")
def meshs2kzip(self, dest_path=None, sv_color=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
for ot in ["sj", "vc", "mi",
"sv"]: # determins rendering order in KNOSSOS
self.mesh2kzip(ot, dest_path=dest_path, ext_color=sv_color if
ot == "sv" else None)
def export_kzip(self, dest_path=None, sv_color=None):
"""
Writes the sso to a KNOSSOS loadable kzip.
Color is specified as rgba, 0 to 255.
Parameters
----------
dest_path : str
sv_color : 4-tuple of int
Returns
-------
"""
self.load_attr_dict()
self.save_skeleton_to_kzip(dest_path=dest_path)
self.save_objects_to_kzip_sparse(["mi", "sj", "vc"],
dest_path=dest_path)
self.meshs2kzip(dest_path=dest_path, sv_color=sv_color)
self.mergelist2kzip(dest_path=dest_path)
def write_svmeshs2kzip(self, dest_path=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
for ii, sv in enumerate(self.svs):
mesh = sv.mesh
write_mesh2kzip(dest_path, mesh[0], mesh[1], None,
ply_fname="sv%d.ply" % ii)
def _svattr2mesh(self, dest_path, attr_key, cmap, normalize_vals=False):
sv_attrs = np.array([sv.lookup_in_attribute_dict(attr_key)
for sv in self.svs])
if normalize_vals:
min_val = sv_attrs.min()
sv_attrs -= min_val
sv_attrs /= sv_attrs.max()
ind, vert, col = merge_someshs(self.svs, color_vals=sv_attrs, cmap=cmap)
write_mesh2kzip(dest_path, ind, vert, col, "%s.ply" % attr_key)
def svprobas2mergelist(self, key="glia_probas", dest_path=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
coords = np.array([sv.rep_coord for sv in self.svs])
sv_comments = ["%s; %s" % (str(np.mean(sv.attr_dict[key], axis=0)),
str(sv.attr_dict[key]).replace('\n', ''))
for sv in self.svs]
kml = knossos_ml_from_svixs([sv.id for sv in self.svs], coords,
comments=sv_comments)
write_txt2kzip(dest_path, kml, "mergelist.txt")
def _pred2mesh(self, pred_coords, preds, ply_fname, dest_path=None,
colors=None, k=1):
mesh = self.mesh
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
col = colorcode_vertices(mesh[1].reshape((-1, 3)), pred_coords,
preds, colors=colors, k=k)
write_mesh2kzip(dest_path, mesh[0], mesh[1], col,
ply_fname=ply_fname)
# --------------------------------------------------------------------- GLIA
def gliaprobas2mesh(self, dest_path=None, pred_key_appendix=""):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
mcmp = sns.diverging_palette(250, 15, s=99, l=60, center="dark",
as_cmap=True)
self._svattr2mesh(dest_path, "glia_probas" + pred_key_appendix,
cmap=mcmp)
def gliapred2mesh(self, dest_path=None, thresh=0.161489,
pred_key_appendix=""):
self.load_attr_dict()
for sv in self.svs:
sv.load_attr_dict()
glia_svs = [sv for sv in self.svs if
sv.glia_pred(thresh, pred_key_appendix) == 1]
nonglia_svs = [sv for sv in self.svs if
sv.glia_pred(thresh, pred_key_appendix) == 0]
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
mesh = merge_someshs(glia_svs)
write_mesh2kzip(dest_path, mesh[0], mesh[1], None,
ply_fname="glia_%0.2f.ply" % thresh)
mesh = merge_someshs(nonglia_svs)
write_mesh2kzip(dest_path, mesh[0], mesh[1], None,
ply_fname="nonglia_%0.2f.ply" % thresh)
def gliapred2mergelist(self, dest_path=None, thresh=0.161489,
pred_key_appendix=""):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
params = [[sv, ] for sv in self.svs]
coords = sm.start_multiprocess_obj("rep_coord", params,
nb_cpus=self.nb_cpus)
coords = np.array(coords)
params = [[sv, {"thresh": thresh, "pred_key_appendix":
pred_key_appendix}] for sv in self.svs]
glia_preds = sm.start_multiprocess_obj("glia_pred", params,
nb_cpus=self.nb_cpus)
glia_preds = np.array(glia_preds)
glia_comments = ["%0.4f" % gp for gp in glia_preds]
kml = knossos_ml_from_svixs([sv.id for sv in self.svs], coords,
comments=glia_comments)
write_txt2kzip(dest_path, kml, "mergelist.txt")
def gliasplit(self, dest_path=None, recompute=False, thresh=0.161489,
write_shortest_paths=False, verbose=False,
pred_key_appendix=""):
if recompute or not (
self.attr_exists("glia_svs") and self.attr_exists(
"nonglia_svs")):
# # HACK
dest_dir = "/wholebrain/scratch/pschuber/ssv3_splits_v2/%s" % subfold_from_ix_SSO(
self.id)
ad = AttributeDict(dest_dir + "attr_dict.pkl", read_only=True)
if self.id in ad:
return
# # HACK END
if write_shortest_paths:
shortest_paths_dir = os.path.split(dest_path)[0]
else:
shortest_paths_dir = None
if verbose:
print "Splitting glia in SSV %d with %d SV's." % \
(self.id, len(self.svs))
start = time.time()
nonglia_ccs, glia_ccs = split_glia(self, thresh=thresh,
pred_key_appendix=pred_key_appendix,
shortest_paths_dest_dir=shortest_paths_dir)
# from neuropatch.nets.prediction import get_glia_model
# m = get_glia_model()
# self.predict_views_gliaSV(m)
# del m
# nonglia_ccs, glia_ccs = split_glia(self, thresh=thresh,
# shortest_paths_dest_dir=shortest_paths_dir)
if verbose:
print "Splitting glia in SSV %d with %d SV's finished after " \
"%.4gs." % (self.id, len(self.svs), time.time() - start)
non_glia_ccs_ixs = [[so.id for so in nonglia] for nonglia in
nonglia_ccs]
glia_ccs_ixs = [[so.id for so in glia] for glia in
glia_ccs]
# self.attr_dict["glia_svs"] = glia_ccs_ixs
# self.attr_dict["nonglia_svs"] = non_glia_ccs_ixs
# self.save_attributes(["glia_svs", "nonglia_svs"],
# [glia_ccs_ixs, non_glia_ccs_ixs])
# HACK
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
ad = AttributeDict(dest_dir + "attr_dict.pkl", read_only=False)
ad[self.id]["glia_svs"] = glia_ccs_ixs
ad[self.id]["nonglia_svs"] = non_glia_ccs_ixs
ad.save2pkl()
# HACK END
def load_gliasplit_ad(self):
dest_dir = "/wholebrain/scratch/pschuber/ssv3_splits_v2/%s" % subfold_from_ix_SSO(
self.id)
ad = AttributeDict(dest_dir + "attr_dict.pkl", read_only=True)
return ad[self.id]
def gliasplit2mesh(self, dest_path=None):
"""
Parameters
----------
dest_path :
recompute :
thresh :
write_shortest_paths : bool
Write shortest paths between neuron type leaf nodes in SV graph
as k.zip's to dest_path.
Returns
-------
"""
attr_dict = self.load_gliasplit_ad()
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
# write meshes of CC's
glia_ccs = attr_dict["glia_svs"]
for kk, glia in enumerate(glia_ccs):
mesh = merge_someshs([self.get_seg_obj("sv", ix) for ix in
glia])
write_mesh2kzip(dest_path, mesh[0], mesh[1], None,
"glia_cc%d.ply" % kk)
non_glia_ccs = attr_dict["nonglia_svs"]
for kk, nonglia in enumerate(non_glia_ccs):
mesh = merge_someshs([self.get_seg_obj("sv", ix) for ix in
nonglia])
write_mesh2kzip(dest_path, mesh[0], mesh[1], None,
"nonglia_cc%d.ply" % kk)
def write_gliapred_cnn(self, dest_path=None):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
skel = load_skeleton(self.skeleton_kzip_path_views)[
"sample_locations"]
n_nodes = [n for n in skel.getNodes()]
pred_coords = [n.getCoordinate() * np.array(self.scaling) for n in
n_nodes]
preds = [int(n.data["glia_pred"]) for n in n_nodes]
self._pred2mesh(pred_coords, preds, "gliapred.ply",
dest_path=dest_path,
colors=[[11, 129, 220, 255], [218, 73, 58, 255]])
def predict_views_gliaSV(self, model, woglia=True, verbose=True,
overwrite=False, pred_key_appendix=""):
# params = self.svs
# if check_view_existence:
# ex_views = self.view_existence()
# if not np.all(ex_views):
# self.render_views(add_cellobjects=False)
existing_preds = sm.start_multiprocess(glia_pred_exists, self.svs,
nb_cpus=self.nb_cpus)
if overwrite:
missing_sos = self.svs
else:
missing_sos = np.array(self.svs)[~np.array(existing_preds,
dtype=np.bool)]
if verbose:
print "Predicting %d/%d SV's of SSV %d." % (len(missing_sos),
len(self.svs),
self.id)
start = time.time()
if len(missing_sos) == 0:
return
pred_key = "glia_probas"
if woglia:
pred_key += "woglia"
pred_key += pred_key_appendix
try:
predict_sos_views(model, missing_sos, pred_key,
nb_cpus=self.nb_cpus, verbose=True,
woglia=woglia, raw_only=True)
except KeyError:
self.render_views(add_cellobjects=False)
predict_sos_views(model, missing_sos, pred_key,
nb_cpus=self.nb_cpus, verbose=True,
woglia=woglia, raw_only=True)
if verbose:
end = time.time()
print "Prediction of %d SV's took %0.2fs (incl. read/write). " \
"%0.4fs/SV" % (len(missing_sos), end - start,
float(end - start) / len(missing_sos))
# self.save_attributes(["gliaSV_model"], [model._fname])
def predict_views_glia(self, model, thresh=0.5, dest_path=None,
woglia=False):
raise (
NotImplementedError, "Change code to use 'predict_sos_views'.")
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
loc_coords = self.sample_locations()
views = self.load_views(woglia=woglia)
assert len(views) == len(loc_coords)
views = np.concatenate(views)
loc_coords = np.concatenate(loc_coords)
# get single connected component in img
for i in range(len(views)):
sing_cc = np.concatenate([single_conn_comp_img(views[i, 0, :1]),
single_conn_comp_img(
views[i, 0, 1:])])
views[i, 0] = sing_cc
probas = model.predict_proba(views)
locs = skeleton.SkeletonAnnotation()
locs.scaling = self.scaling
locs.comment = "sample_locations"
for ii, c in enumerate(loc_coords):
n = skeleton.SkeletonNode().from_scratch(locs,
c[0] / self.scaling[0],
c[1] / self.scaling[1],
c[2] / self.scaling[2])
n.data["glia_proba"] = probas[ii][1]
n.data["glia_pred"] = int(probas[ii][1] > thresh)
locs.addNode(n)
write_skeleton(dest_path, [locs])
self.save_attributes(["glia_model"], [model._fname])
# ------------------------------------------------------------------ AXONESS
def write_axpred_rfc(self):
if self.load_skeleton():
if not "axoness" in self.skeleton:
return False
axoness = self.skeleton["axoness"].copy()
axoness[self.skeleton["axoness"] == 1] = 0
axoness[self.skeleton["axoness"] == 0] = 1
print np.unique(axoness, return_counts=True)
self._axonesspred2mesh(self.skeleton["nodes"] * self.scaling,
axoness)
def write_axpred_cnn(self, dest_path=None, k=1, pred_key_appendix=""):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
preds = np.array(sm.start_multiprocess_obj("axoness_preds",
[[sv, {"pred_key_appendix": pred_key_appendix}]
for sv in self.svs], nb_cpus=self.nb_cpus))
preds = np.concatenate(preds)
print "Collected axoness:", Counter(preds).most_common()
locs = np.array(sm.start_multiprocess_obj("sample_locations",
[[sv, ] for sv in self.svs], nb_cpus=self.nb_cpus))
print "Collected locations."
pred_coords = np.concatenate(locs)
assert pred_coords.ndim == 2
assert pred_coords.shape[1] == 3
self._pred2mesh(pred_coords, preds, "axoness.ply", dest_path=dest_path,
k=k)
def associate_objs_with_skel_nodes(self, obj_types=("sj", "vc", "mi"),
downsampling=(8, 8, 4)):
self.load_skeleton()
for obj_type in obj_types:
voxels = []
voxel_ids = [0]
for obj in self.get_seg_objects(obj_type):
vl = obj.load_voxel_list_downsampled_adapt(downsampling)
if len(vl) == 0:
continue
if len(voxels) == 0:
voxels = vl
else:
voxels = np.concatenate((voxels, vl))
voxel_ids.append(voxel_ids[-1] + len(vl))
if len(voxels) == 0:
self.skeleton["assoc_%s" % obj_type] = [[]] * len(
self.skeleton["nodes"])
continue
voxel_ids = np.array(voxel_ids)
kdtree = scipy.spatial.cKDTree(voxels * self.scaling)
balls = kdtree.query_ball_point(self.skeleton["nodes"] *
self.scaling, 500)
nodes_objs = []
for i_node in range(len(self.skeleton["nodes"])):
nodes_objs.append(list(np.unique(
np.sum(voxel_ids[:, None] <= np.array(balls[i_node]),
axis=0) - 1)))
self.skeleton["assoc_%s" % obj_type] = nodes_objs
self.save_skeleton(to_kzip=False, to_object=True)
# self.save_objects_to_kzip_sparse(obj_types=obj_types)
def extract_ax_features(self, feature_context_nm=8000, max_diameter=250,
obj_types=("sj", "mi", "vc"), downsample_to=None):
node_degrees = np.array(self.weighted_graph.degree().values(),
dtype=np.int)
sizes = {}
for obj_type in obj_types:
objs = self.get_seg_objects(obj_type)
sizes[obj_type] = np.array([obj.size for obj in objs],
dtype=np.int)
if downsample_to is not None:
if downsample_to > len(self.skeleton["nodes"]):
downsample_by = 1
else:
downsample_by = int(len(self.skeleton["nodes"]) /
float(downsample_to))
else:
downsample_by = 1
features = []
for i_node in range(len(self.skeleton["nodes"][::downsample_by])):
this_i_node = i_node * downsample_by
this_features = []
paths = nx.single_source_dijkstra_path(self.weighted_graph,
this_i_node,
feature_context_nm)
neighs = np.array(paths.keys(), dtype=np.int)
neigh_diameters = self.skeleton["diameters"][neighs]
this_features.append(np.mean(neigh_diameters))
this_features.append(np.std(neigh_diameters))
this_features += list(np.histogram(neigh_diameters,
bins=10,
range=(0, max_diameter),
normed=True)[0])
this_features.append(np.mean(node_degrees[neighs]))
for obj_type in obj_types:
neigh_objs = np.array(self.skeleton["assoc_%s" % obj_type])[
neighs]
neigh_objs = [item for sublist in neigh_objs for item in
sublist]
neigh_objs = np.unique(np.array(neigh_objs))
if len(neigh_objs) == 0:
this_features += [0, 0, 0]
continue
this_features.append(len(neigh_objs))
obj_sizes = sizes[obj_type][neigh_objs]
this_features.append(np.mean(obj_sizes))
this_features.append(np.std(obj_sizes))
features.append(np.array(this_features))
return features
def predict_axoness(self, ssd_version="axgt", clf_name="rfc",
feature_context_nm=5000):
sc = sbc.SkelClassifier(working_dir=self.working_dir,
ssd_version=ssd_version,
create=False)
# if feature_context_nm is None:
# if np.linalg.norm(self.shape * self.scaling) > 24000:
# radius = 12000
# else:
# radius = nx.diameter(self.weighted_graph) / 2
#
# if radius > 12000:
# radius = 12000
# elif radius < 2000:
# radius = 2000
#
# avail_fc = sc.avail_feature_contexts(clf_name)
# feature_context_nm = avail_fc[np.argmin(np.abs(avail_fc - radius))]
features = self.extract_ax_features(feature_context_nm=
feature_context_nm)
clf = sc.load_classifier(clf_name, feature_context_nm)
probas = clf.predict_proba(features)
pred = []
class_weights = np.array([1, 1, 1])
for i_node in range(len(self.skeleton["nodes"])):
paths = nx.single_source_dijkstra_path(self.weighted_graph, i_node,
10000)
neighs = np.array(paths.keys(), dtype=np.int)
pred.append(
np.argmax(np.sum(probas[neighs], axis=0) * class_weights))
# pred = np.argmax(probas, axis=1)
self.skeleton["axoness"] = np.array(pred, dtype=np.int)
self.save_skeleton(to_object=True, to_kzip=True)
try:
self.save_objects_to_kzip_sparse()
except:
pass
def axoness_for_coords(self, coords, radius_nm=4000):
coords = np.array(coords)
self.load_skeleton()
kdtree = scipy.spatial.cKDTree(self.skeleton["nodes"] * self.scaling)
close_node_ids = kdtree.query_ball_point(coords * self.scaling,
radius_nm)
axoness_pred = []
for i_coord in range(len(coords)):
cls, cnts = np.unique(
self.skeleton["axoness"][close_node_ids[i_coord]],
return_counts=True)
if len(cls) > 0:
axoness_pred.append(cls[np.argmax(cnts)])
else:
axoness_pred.append(-1)
return np.array(axoness_pred)
def cnn_axoness_2_skel(self, dest_path=None, pred_key_appendix=""):
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
probas = np.array(sm.start_multiprocess_obj("axoness_probas",
[[sv, {"pred_key_appendix": pred_key_appendix}] for sv in self.svs], nb_cpus=self.nb_cpus))
probas = np.concatenate(probas)
loc_coords = np.array(sm.start_multiprocess_obj("sample_locations",
[[sv, ] for sv in self.svs], nb_cpus=self.nb_cpus))
loc_coords = np.concatenate(loc_coords)
assert len(loc_coords) == len(probas)
locs = skeleton.SkeletonAnnotation()
locs.scaling = self.scaling
locs.comment = "sample_locations"
for ii, c in enumerate(loc_coords):
n = skeleton.SkeletonNode().from_scratch(locs,
c[0] / self.scaling[0],
c[1] / self.scaling[1],
c[2] / self.scaling[2])
n.data["den_proba"] = probas[ii][0]
n.data["ax_proba"] = probas[ii][1]
n.data["soma_proba"] = probas[ii][2]
n.data["axoness_pred"] = np.argmax(probas[ii])
n.setComment("axoness_pred: %d" % np.argmax(probas[ii]))
locs.addNode(n)
write_skeleton(dest_path, [locs])
try:
if not os.path.isfile(self.skeleton_kzip_path_views):
skel = load_skeleton(self.skeleton_kzip_path)["skeleton"]
else:
skel = load_skeleton(self.skeleton_kzip_path_views)["skeleton"]
skel_nodes = [n for n in skel.getNodes()]
skel_coords = [n.getCoordinate() * np.array(self.scaling) for n in
skel_nodes]
tree = spatial.cKDTree(loc_coords)
dist, nn_ixs = tree.query(skel_coords, k=1)
for i in range(len(nn_ixs)):
skel_nodes[i].data["nearest_views"] = nn_ixs[i]
skel_nodes[i].data["nearest_views_dist"] = dist[i]
if np.max(dist[i]) > comp_window:
warnings.warn("High distance between skeleton node and view:"
" %0.0f" % np.max(dist[i]), RuntimeWarning)
for n in skel.getNodes():
n_ixs = n.data["nearest_views"]
n.data["axoness_pred"] = np.argmax(probas[n_ixs])
ssh.majority_vote(skel, "axoness", 30000)
skel.comment = "majority_vote"
write_skeleton(dest_path, [skel])
except KeyError as e:
print e
# --------------------------------------------------------------- CELL TYPES
def predict_cell_type(self, ssd_version="ctgt", clf_name="rfc",
feature_context_nm=25000):
sc = sbc.SkelClassifier(working_dir=self.working_dir,
ssd_version=ssd_version,
create=False)
# if feature_context_nm is None:
# if np.linalg.norm(self.shape * self.scaling) > 24000:
# radius = 12000
# else:
# radius = nx.diameter(self.weighted_graph) / 2
#
# if radius > 12000:
# radius = 12000
# elif radius < 2000:
# radius = 2000
#
# avail_fc = sc.avail_feature_contexts(clf_name)
# feature_context_nm = avail_fc[np.argmin(np.abs(avail_fc - radius))]
features = self.extract_ax_features(feature_context_nm=
feature_context_nm,
downsample_to=200)
clf = sc.load_classifier(clf_name, feature_context_nm)
probs = clf.predict_proba(features)
ratios = np.sum(probs, axis=0)
ratios /= np.sum(ratios)
self.attr_dict["cell_type_ratios"] = ratios
self.save_attr_dict()
def get_pca_view_hists(self, t_net, pca):
views = np.concatenate(self.load_views())
latent = t_net.predict_proba(views2tripletinput(views))
latent = pca.transform(latent)
hist0 = np.histogram(latent[:, 0], bins=50, range=[-2, 2], normed=True)
hist1 = np.histogram(latent[:, 1], bins=50, range=[-3.2, 3], normed=True)
hist2 = np.histogram(latent[:, 2], bins=50, range=[-3.5, 3.5], normed=True)
return np.array([hist0, hist1, hist2])
def save_view_pca_proj(self, t_net, pca, dest_dir, ls=20, s=6.0, special_points=(),
special_markers=(), special_kwargs=()):
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
views = np.concatenate(self.load_views())
latent = t_net.predict_proba(views2tripletinput(views))
latent = pca.transform(latent)
col = (np.array(latent) - latent.min(axis=0)) / (latent.max(axis=0)-latent.min(axis=0))
col = np.concatenate([col, np.ones_like(col)[:, :1]], axis=1)
for ii, (a, b) in enumerate([[0, 1], [0, 2], [1, 2]]):
fig, ax = plt.subplots()
plt.scatter(latent[:, a], latent[:, b], c=col, s=s, lw=0.5, marker="o",
edgecolors=col)
if len(special_points) >= 0:
for kk, sp in enumerate(special_points):
if len(special_markers) == 0:
sm = "x"
else:
sm = special_markers[kk]
if len(special_kwargs) == 0:
plt.scatter(sp[None, a], sp[None, b], s=75.0, lw=2.3,
marker=sm, edgecolor="0.3", facecolor="none")
else:
plt.scatter(sp[None, a], sp[None, b], **special_kwargs)
fig.patch.set_facecolor('white')
ax.tick_params(axis='x', which='major', labelsize=ls, direction='out',
length=4, width=3, right="off", top="off", pad=10)
ax.tick_params(axis='y', which='major', labelsize=ls, direction='out',
length=4, width=3, right="off", top="off", pad=10)
ax.tick_params(axis='x', which='minor', labelsize=ls, direction='out',
length=4, width=3, right="off", top="off", pad=10)
ax.tick_params(axis='y', which='minor', labelsize=ls, direction='out',
length=4, width=3, right="off", top="off", pad=10)
plt.xlabel(r"$Z_%d$" % (a+1), fontsize=ls)
plt.ylabel(r"$Z_%d$" % (b+1), fontsize=ls)
ax.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax.yaxis.set_major_locator(ticker.MultipleLocator(2))
plt.tight_layout()
plt.savefig(dest_dir+"/%d_pca_%d%d.png" % (self.id, a+1, b+1), dpi=400)
plt.close()
def gen_skel_from_sample_locs(self, dest_path=None, pred_key_appendix=""):
try:
if os.path.isfile(self.skeleton_path_views):
return
if dest_path is None:
dest_path = self.skeleton_kzip_path_views
locs = np.concatenate(self.sample_locations())
edge_list = create_mst_skeleton(locs)
self.skeleton = {}
self.skeleton["nodes"] = locs / np.array(self.scaling)
self.skeleton["edges"] = edge_list
self.skeleton["diameters"] = np.ones(len(locs))
ax_probas = np.array(sm.start_multiprocess_obj("axoness_probas",
[[sv, {"pred_key_appendix": pred_key_appendix}] for sv in self.svs], nb_cpus=self.nb_cpus))
ax_probas = np.concatenate(ax_probas)
# first stage averaging
curr_ax_preds = np.argmax(ax_probas, axis=1)
ax_preds = np.zeros((len(locs)), dtype=np.int)
for i_node in range(len(self.skeleton["nodes"])):
paths = nx.single_source_dijkstra_path(self.weighted_graph, i_node,
30000)
neighs = np.array(paths.keys(), dtype=np.int)
cnt = Counter(curr_ax_preds[neighs])
loc_average = np.zeros((3, ))
for k, v in cnt.items():
loc_average[k] = v
loc_average /= float(len(neighs))
if (curr_ax_preds[i_node] == 2 and loc_average[2] >= 0.20) or (loc_average[2] >= 0.98):
ax_preds[i_node] = 2
else:
ax_preds[i_node] = np.argmax(loc_average[:2])
# second stage averaging, majority vote on every branch
curr_ax_preds = np.array(ax_preds, dtype=np.int)
edge_coords = locs[self.skeleton["edges"]]
edge_ax = curr_ax_preds[self.skeleton["edges"]]
edges = []
for i in range(len(edge_coords)):
if 2 in edge_ax[i]:
continue
edges.append(self.skeleton["edges"][i])
edges = np.array(edges)
g = nx.Graph()
g.add_edges_from(edges)
ccs = nx.connected_components(g)
for cc in ccs:
curr_ixs = np.array(list(cc), dtype=np.int)
cnt = Counter(ax_preds[curr_ixs])
loc_average = np.zeros((3, ))
for k, v in cnt.items():
loc_average[k] = v
curr_ax_preds[curr_ixs] = np.argmax(loc_average)
self.skeleton["axoness"] = curr_ax_preds
# self.save_skeleton_to_kzip(dest_path=dest_path)
write_obj2pkl(self.skeleton_path_views, self.skeleton)
except Exception as e:
if "null graph" in str(e) and len(self.sv_ids) == 2:
print "Null graph error with 2 nodes, falling back to " \
"original classification and one edge."
locs = np.concatenate(self.sample_locations())
self.skeleton = {}
self.skeleton["nodes"] = locs / np.array(self.scaling)
self.skeleton["edges"] = np.array([[0, 1]])
self.skeleton["diameters"] = np.ones(len(locs))
ax_probas = np.array(sm.start_multiprocess_obj("axoness_probas",
[[sv, {
"pred_key_appendix": pred_key_appendix}]
for sv in
self.svs],
nb_cpus=self.nb_cpus))
ax_probas = np.concatenate(ax_probas)
# first stage averaging
curr_ax_preds = np.argmax(ax_probas, axis=1)
self.skeleton["axoness"] = curr_ax_preds
write_obj2pkl(self.skeleton_path_views, self.skeleton)
else:
print "Error %s occured with SSO %d (%d SVs)." % (e, self.id, len(self.sv_ids))
def predict_celltype_cnn(self, model):
ssh.predict_sso_celltype(self, model)
# ------------------------------------------------------------------------------
# SO rendering code
def render_sampled_sos_cc(sos, ws=(256, 128), verbose=False, woglia=True,
render_first_only=False, add_cellobjects=True,
overwrite=False, cellobjects_only=False):
"""
Renders for each SV views at sampled locations (number is dependent on
SV mesh size with scaling fact) from combined mesh of all SV.
----------------------------------------------------------------------
USED FOR GLIA DETECTION
----------------------------------------------------------------------
Parameters
----------
sos : list of SegmentationObject
ws : tuple
verbose : bool
woglia : bool
without glia components
render_first_only : bool
add_cellobjects : bool
overwrite : bool
"""
# initilaize temporary SSO
if not overwrite:
if render_first_only:
if sos[0].views_exist:
sys.stdout.write("\r%d" % sos[0].id)
sys.stdout.flush()
return
else:
if np.all([sv.views_exist for sv in sos]):
return
sso = SuperSegmentationObject(np.random.randint(0, sys.maxint),
create=False,
working_dir="/wholebrain/scratch/areaxfs/",
version="tmp", scaling=(10, 10, 20))
sso._objects["sv"] = sos
if render_first_only:
coords = [sos[0].sample_locations()]
else:
coords = sso.sample_locations(cache=False)
if add_cellobjects:
sso.map_cellobjects()
part_views = np.cumsum([0] + [len(c) for c in coords])
views = render_sso_coords(sso, flatten_list(coords), add_cellobjects=add_cellobjects,
ws=ws, verbose=verbose, cellobjects_only=cellobjects_only)
for i in range(len(coords)):
v = views[part_views[i]:part_views[i+1]]
if np.sum(v) == 0 or np.sum(v) == np.prod(v.shape):
warnings.warn("Empty views detected after rendering.",
RuntimeWarning)
sv_obj = sos[i]
sv_obj.save_views(views=v, woglia=woglia, cellobjects_only=cellobjects_only)
print sv_obj.segobj_dir
def render_so(so, ws=(256, 128), add_cellobjects=True, verbose=False):
"""
Render super voxel views located at given locations. Does not write views
to so.views_path
Parameters
----------
so : SegmentationObject
super voxel ID
coords : np.array
Rendering locations
ws : tuple of int
Rendering windows size
add_cellobjects : bool
verbose : bool
Returns
-------
np.array
views
"""
# initilaize temporary SSO for cellobject mapping purposes
sso = SuperSegmentationObject(np.random.randint(0, sys.maxint),
create=False,
working_dir="/wholebrain/scratch/areaxfs/",
version="tmp", scaling=(10, 10, 20))
sso._objects["sv"] = [so]
coords = sso.sample_locations(cache=False)[0]
if add_cellobjects:
sso.map_cellobjects()
views = render_sso_coords(sso, coords, ws=ws, add_cellobjects=add_cellobjects,
verbose=verbose)
return views
def glia_pred_exists(so):
so.load_attr_dict()
return "glia_probas" in so.attr_dict
def views2tripletinput(views):
views = views[:, :, :1] # use first view only
out_d = np.concatenate([views,
np.ones_like(views),
np.ones_like(views)], axis=2)
return out_d.astype(np.float32)
|
StructuralNeurobiologyLab/SyConnFS
|
syconnfs/representations/super_segmentation.py
|
Python
|
gpl-2.0
| 101,748
|
[
"NEURON"
] |
a34552a7685e47be2ecba3f2074e4f6ec76274e6bf399403a44ccc45ae48a5b6
|
# -*- coding: utf-8 -*-
import json
import logging
import sys
import networkx as nx
from networkx.algorithms import weakly_connected_components
from collections import defaultdict
from functools import partial
from itertools import chain
from math import sqrt
from numpy import subtract
from numpy.linalg import norm
from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union
from django.db import connection
from django.http import HttpRequest, JsonResponse
from catmaid.models import Relation, UserRole
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import get_relation_to_id_map, get_request_list
from catmaid.control.link import KNOWN_LINK_PAIRS
from catmaid.control.review import get_treenodes_to_reviews
from catmaid.control.tree_util import simplify, find_root, reroot, partition, \
spanning_tree, cable_length
from catmaid.control.synapseclustering import tree_max_density
def split_by_confidence_and_add_edges(confidence_threshold, digraphs, rows) -> Dict:
""" dipgraphs is a dictionary of skeleton IDs as keys and DiGraph instances as values,
where the DiGraph does not have any edges yet.
WARNING: side effect on contents of digraph: will add the edges
"""
arbors = {}
# Define edges, which may result in multiple subgraphs for each skeleton
# when splitting at low-confidence edges:
if 0 == confidence_threshold:
# Do not split skeletons
for row in rows:
if row[1]:
digraphs[row[3]].add_edge(row[1], row[0])
for skid, digraph in digraphs.items():
arbors[skid] = [digraph]
else:
# The DiGraph representing the skeleton may be disconnected at a low-confidence edge
to_split = set()
for row in rows:
if row[2] < confidence_threshold:
to_split.add(row[3])
elif row[1]:
digraphs[row[3]].add_edge(row[1], row[0])
for skid, digraph in digraphs.items():
if skid in to_split:
arbors[skid] = list(digraph.subgraph(c).copy() for c in weakly_connected_components(digraph))
else:
arbors[skid] = [digraph]
return arbors
def split_by_synapse_domain(bandwidth, locations, arbors, treenode_connector, minis) -> Tuple[Dict, Any]:
""" locations: dictionary of treenode ID vs tuple with x,y,z
arbors: dictionary of skeleton ID vs list of DiGraph (that were, or not, split by confidence)
treenode_connectors: dictionary of treenode ID vs list of tuples of connector_id, string of 'presynaptic_to' or 'postsynaptic_to'
"""
arbors2:Dict = {}
# Some arbors will be split further
for skeleton_id, graphs in arbors.items():
subdomains:List = []
arbors2[skeleton_id] = subdomains
for graph in graphs:
treenode_ids = []
connector_ids =[]
relation_ids = []
for treenode_id in filter(treenode_connector.has_key, graph.nodes): # type: ignore
for c in treenode_connector.get(treenode_id):
connector_id, relation = c
treenode_ids.append(treenode_id)
connector_ids.append(connector_id)
relation_ids.append(relation)
if not connector_ids:
subdomains.append(graph)
continue
for parent_id, treenode_id in graph.edges:
loc0 = locations[treenode_id]
loc1 = locations[parent_id]
graph[parent_id][treenode_id]['weight'] = norm(subtract(loc0, loc1))
# Invoke Casey's magic
max_density = tree_max_density(graph.to_undirected(), treenode_ids,
connector_ids, relation_ids, [bandwidth])
synapse_group = next(max_density.values())
# The list of nodes of each synapse_group contains only nodes that have connectors
# A local_max is the skeleton node most central to a synapse_group
anchors = {}
for domain in synapse_group.values():
g = nx.DiGraph()
g.add_nodes_from(domain.node_ids) # bogus graph, containing treenodes that point to connectors
subdomains.append(g)
anchors[domain.local_max] = g
# Define edges between domains: create a simplified graph
mini = simplify(graph, anchors.keys())
# Replace each node by the corresponding graph, or a graph of a single node
for node in mini.nodes:
g = anchors.get(node)
if not g:
# A branch node that was not an anchor, i.e. did not represent a synapse group
g = nx.Graph()
g.add_node(node, **{'branch': True})
subdomains.append(g)
# Associate the Graph with treenodes that have connectors
# with the node in the minified tree
mini.nodes[node]['g'] = g
# Put the mini into a map of skeleton_id and list of minis,
# to be used later for defining intra-neuron edges in the circuit graph
minis[skeleton_id].append(mini)
return arbors2, minis
def _skeleton_graph(project_id, skeleton_ids, confidence_threshold, bandwidth,
expand, compute_risk, cable_spread, path_confluence,
pre_rel='presynaptic_to', post_rel='postsynaptic_to') -> nx.DiGraph:
""" Assumes all skeleton_ids belong to project_id. """
skeletons_string = ",".join(str(int(x)) for x in skeleton_ids)
cursor = connection.cursor()
# Fetch all treenodes of all skeletons
cursor.execute('''
SELECT id, parent_id, confidence, skeleton_id,
location_x, location_y, location_z
FROM treenode
WHERE skeleton_id IN (%s)
''' % skeletons_string)
rows = tuple(cursor.fetchall())
# Each skeleton is represented with a DiGraph
arbors:Union[DefaultDict[Any, nx.DiGraph], Dict[Any, nx.DiGraph]] = defaultdict(nx.DiGraph)
# Get reviewers for the requested skeletons
reviews = get_treenodes_to_reviews(skeleton_ids=skeleton_ids)
# Create a DiGraph for every skeleton
for row in rows:
arbors[row[3]].add_node(row[0], **{'reviewer_ids': reviews.get(row[0], [])})
# Dictionary of skeleton IDs vs list of DiGraph instances
arbors = split_by_confidence_and_add_edges(confidence_threshold, arbors, rows)
# Fetch all synapses
relations = get_relation_to_id_map(project_id, cursor=cursor)
cursor.execute('''
SELECT connector_id, relation_id, treenode_id, skeleton_id
FROM treenode_connector
WHERE skeleton_id IN (%s)
AND (relation_id = %s OR relation_id = %s)
''' % (skeletons_string, relations[pre_rel], relations[post_rel]))
connectors:DefaultDict = defaultdict(partial(defaultdict, list))
skeleton_synapses:DefaultDict = defaultdict(partial(defaultdict, list))
for row in cursor.fetchall():
connectors[row[0]][row[1]].append((row[2], row[3]))
skeleton_synapses[row[3]][row[1]].append(row[2])
# Cluster by synapses
minis:DefaultDict[Any, List] = defaultdict(list) # skeleton_id vs list of minified graphs
locations = None
whole_arbors = arbors
if expand and bandwidth > 0:
locations = {row[0]: (row[4], row[5], row[6]) for row in rows}
treenode_connector:DefaultDict[Any, List] = defaultdict(list)
for connector_id, pp in connectors.items():
for treenode_id in chain.from_iterable(pp[relations[pre_rel]]):
treenode_connector[treenode_id].append((connector_id, pre_rel))
for treenode_id in chain.from_iterable(pp[relations[post_rel]]):
treenode_connector[treenode_id].append((connector_id, post_rel))
arbors_to_expand = {skid: ls for skid, ls in arbors.items() if skid in expand}
expanded_arbors, minis = split_by_synapse_domain(bandwidth, locations, arbors_to_expand, treenode_connector, minis)
arbors.update(expanded_arbors)
# Obtain neuron names
cursor.execute('''
SELECT cici.class_instance_a, ci.name
FROM class_instance ci,
class_instance_class_instance cici
WHERE cici.class_instance_a IN (%s)
AND cici.class_instance_b = ci.id
AND cici.relation_id = %s
''' % (skeletons_string, relations['model_of']))
names = dict(cursor.fetchall())
# A DiGraph representing the connections between the arbors (every node is an arbor)
circuit = nx.DiGraph()
for skid, digraphs in arbors.items():
base_label = names[skid]
tag = len(digraphs) > 1
i = 0
for g in digraphs:
if g.number_of_nodes() == 0:
continue
if tag:
label = "%s [%s]" % (base_label, i+1)
else:
label = base_label
circuit.add_node(g, **{
'id': "%s_%s" % (skid, i+1),
'label': label,
'skeleton_id': skid,
'node_count': len(g),
'node_reviewed_count': sum(1 for v in g.nodes.values() if 0 != len(v.get('reviewer_ids', []))), # TODO when bandwidth > 0, not all nodes are included. They will be included when the bandwidth is computed with an O(n) algorithm rather than the current O(n^2)
'branch': False,
})
i += 1
# Define edges between arbors, with number of synapses as an edge property
for c in connectors.values():
for pre_treenode, pre_skeleton in c[relations[pre_rel]]:
for pre_arbor in arbors.get(pre_skeleton, ()):
if pre_treenode in pre_arbor:
# Found the DiGraph representing an arbor derived from the skeleton to which the presynaptic treenode belongs.
for post_treenode, post_skeleton in c[relations[post_rel]]:
for post_arbor in arbors.get(post_skeleton, ()):
if post_treenode in post_arbor:
# Found the DiGraph representing an arbor derived from the skeleton to which the postsynaptic treenode belongs.
edge_props = circuit.get_edge_data(pre_arbor, post_arbor)
if edge_props:
edge_props['c'] += 1
edge_props['pre_treenodes'].append(pre_treenode)
edge_props['post_treenodes'].append(post_treenode)
else:
circuit.add_edge(pre_arbor, post_arbor, **{
'c': 1,
'pre_treenodes': [pre_treenode],
'post_treenodes': [post_treenode],
'arrow': 'triangle',
'directed': True,
})
break
break
if compute_risk and bandwidth <= 0:
# Compute synapse risk:
# Compute synapse centrality of every node in every arbor that has synapses
for skeleton_id, arbors in whole_arbors.items():
synapses = skeleton_synapses[skeleton_id]
pre = synapses[relations[pre_rel]]
post = synapses[relations[post_rel]]
for arbor in arbors:
# The subset of synapses that belong to the fraction of the original arbor
pre_sub = tuple(treenodeID for treenodeID in pre if treenodeID in arbor)
post_sub = tuple(treenodeID for treenodeID in post if treenodeID in arbor)
totalInputs = len(pre_sub)
totalOutputs = len(post_sub)
tc = {treenodeID: Counts() for treenodeID in arbor}
for treenodeID in pre_sub:
tc[treenodeID].outputs += 1
for treenodeID in post_sub:
tc[treenodeID].inputs += 1
# Update the nPossibleIOPaths field in the Counts instance of each treenode
_node_centrality_by_synapse(arbor, tc, totalOutputs, totalInputs)
arbor.treenode_synapse_counts = tc
if not locations:
locations = {row[0]: (row[4], row[5], row[6]) for row in rows}
# Estimate the risk factor of the edge between two arbors,
# as a function of the number of synapses and their location within the arbor.
# Algorithm by Casey Schneider-Mizell
# Implemented by Albert Cardona
for pre_arbor, post_arbor, edge_props in circuit.edges(data=True):
if pre_arbor == post_arbor:
# Signal autapse
edge_props['risk'] = -2
continue
try:
spanning = spanning_tree(post_arbor, edge_props['post_treenodes'])
# for arbor in whole_arbors[circuit[post_arbor]['skeleton_id']]:
# if post_arbor == arbor:
# tc = arbor.treenode_synapse_counts
tc = post_arbor.treenode_synapse_counts
count = spanning.number_of_nodes()
if count < 3:
median_synapse_centrality = sum(tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes) / count
else:
median_synapse_centrality = sorted(tc[treenodeID].synapse_centrality for treenodeID in spanning.nodes)[count / 2]
cable = cable_length(spanning, locations)
if -1 == median_synapse_centrality:
# Signal not computable
edge_props['risk'] = -1
else:
edge_props['risk'] = 1.0 / sqrt(pow(cable / cable_spread, 2) + pow(median_synapse_centrality / path_confluence, 2)) # NOTE: should subtract 1 from median_synapse_centrality, but not doing it here to avoid potential divisions by zero
except Exception as e:
logging.getLogger(__name__).error(e)
# Signal error when computing
edge_props['risk'] = -3
if expand and bandwidth > 0:
# Add edges between circuit nodes that represent different domains of the same neuron
for skeleton_id, list_mini in minis.items():
for mini in list_mini:
for node in mini.nodes:
g = mini.nodes[node]['g']
if 1 == len(g) and next(g.nodes(data=True))[1].get('branch'):
# A branch node that was preserved in the minified arbor
circuit.add_node(g, **{
'id': '%s-%s' % (skeleton_id, node),
'skeleton_id': skeleton_id,
'label': "", # "%s [%s]" % (names[skeleton_id], node),
'node_count': 1,
'branch': True,
})
for node1, node2 in mini.edges:
g1 = mini.nodes[node1]['g']
g2 = mini.nodes[node2]['g']
circuit.add_edge(g1, g2, **{
'c': 10,
'arrow': 'none',
'directed': False
})
return circuit
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def skeleton_graph(request:HttpRequest, project_id=None) -> JsonResponse:
project_id = int(project_id)
skeleton_ids = set(int(v) for k,v in request.POST.items() if k.startswith('skeleton_list['))
confidence_threshold = int(request.POST.get('confidence_threshold', 0))
bandwidth = float(request.POST.get('bandwidth', 0)) # in nanometers
cable_spread = float(request.POST.get('cable_spread', 2500)) # in nanometers
path_confluence = int(request.POST.get('path_confluence', 10)) # a count
compute_risk = 1 == int(request.POST.get('risk', 0))
expand = set(int(v) for k,v in request.POST.items() if k.startswith('expand['))
link_types = get_request_list(request.POST, 'link_types', None)
by_link_type = bool(link_types)
if not by_link_type:
link_types = ['synaptic-connector']
result = {} # type: ignore
for link_type in link_types:
pair = KNOWN_LINK_PAIRS.get(link_type)
if not pair:
raise ValueError(f"Unknown link type: {link_type}")
source_rel = pair['source']
target_rel = pair['target']
circuit = _skeleton_graph(project_id, skeleton_ids,
confidence_threshold, bandwidth, expand, compute_risk,
cable_spread, path_confluence, source_rel, target_rel)
package:Dict[str, Any] = {'nodes': [{'data': props} for props in circuit.nodes.values()],
'edges': []}
edges:List = package['edges']
for g1, g2, props in circuit.edges(data=True):
id1 = circuit.nodes[g1]['id']
id2 = circuit.nodes[g2]['id']
data = {'id': '%s_%s' % (id1, id2),
'source': id1,
'target': id2,
'weight': props['c'],
'label': str(props['c']) if props['directed'] else None,
'directed': props['directed'],
'arrow': props['arrow']}
if compute_risk:
data['risk'] = props.get('risk')
edges.append({'data': data})
if by_link_type:
result[link_type] = package
else:
result = package
return JsonResponse(result, safe=False)
class Counts():
def __init__(self):
self.inputs = 0
self.outputs = 0
self.seenInputs = 0
self.seenOutputs = 0
self.nPossibleIOPaths = 0
self.synapse_centrality = 0
def _node_centrality_by_synapse_db(skeleton_id:Union[int,str]) -> Dict:
""" Compute the synapse centrality of every node in a tree.
Return the dictionary of node ID keys and Count values.
This function is meant for TESTING. """
cursor = connection.cursor()
cursor.execute('''
SELECT t.id, t.parent_id, r.relation_name
FROM treenode t LEFT OUTER JOIN (treenode_connector tc INNER JOIN relation r ON tc.relation_id = r.id) ON t.skeleton_id = tc.skeleton_id
WHERE t.skeleton_id = %s
''', (skeleton_id))
nodes:Dict = {} # node ID vs Counts
tree = nx.DiGraph()
root = None
totalInputs = 0
totalOutputs = 0
for row in cursor.fetchall():
counts = nodes.get(row[0])
if not counts:
counts = Counts()
nodes[row[0]] = counts
if row[2]:
if 'presynaptic_to' == row[2]:
counts.outputs += 1
totalOutputs += 1
elif 'postsynaptic_to' == row[2]:
counts.inputs += 1
totalInputs += 1
if row[1]:
tree.add_edge(row[0], row[1])
else:
root = row[0]
_node_centrality_by_synapse(tree, nodes, totalOutputs, totalInputs)
return nodes
def _node_centrality_by_synapse(tree, nodes:Dict, totalOutputs:int, totalInputs:int) -> None:
""" tree: a DiGraph
nodes: a dictionary of treenode ID vs Counts instance
totalOutputs: the total number of output synapses of the tree
totalInputs: the total number of input synapses of the tree
Returns nothing, the results are an update to the Counts instance of each treenode entry in nodes, namely the nPossibleIOPaths. """
# 1. Ensure the root is an end by checking that it has only one child; otherwise reroot at the first end node found
if 0 == totalOutputs:
# Not computable
for counts in nodes.values():
counts.synapse_centrality = -1
return
if len(list(tree.successors(find_root(tree)))) > 1:
# Reroot at the first end node found
tree = tree.copy()
endNode = next(nodeID for nodeID in nodes.keys() if not list(tree.successors(nodeID)))
reroot(tree, endNode)
# 2. Partition into sequences, sorted from small to large
sequences = sorted(partition(tree), key=len)
# 3. Traverse all partitions counting synapses seen
for seq in sequences:
# Each seq runs from an end node towards the root or a branch node
seenI = 0
seenO = 0
for nodeID in seq:
counts = nodes[nodeID]
seenI += counts.inputs + counts.seenInputs
seenO += counts.outputs + counts.seenOutputs
counts.seenInputs = seenI
counts.seenOutputs = seenO
counts.nPossibleIOPaths = counts.seenInputs * (totalOutputs - counts.seenOutputs) + counts.seenOutputs * (totalInputs - counts.seenInputs)
counts.synapse_centrality = counts.nPossibleIOPaths / float(totalOutputs)
|
catmaid/CATMAID
|
django/applications/catmaid/control/graph.py
|
Python
|
gpl-3.0
| 20,999
|
[
"NEURON"
] |
d665373f18f4a998c92a0b8b98ba09f0434838a7d1b668b5c44254e40cb6d4e4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("icanhabit.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
Nurdok/icanhabit
|
config/urls.py
|
Python
|
mit
| 1,231
|
[
"VisIt"
] |
cf08f6e68c3f3b38d259932835d0379fb3ff7393d27e230c073eae3ddd660515
|
"""
Macro library containning diffractometer related macros for the macros
server Tango device server as part of the Sardana project.
"""
# TODO: use taurus instead of PyTango API e.g. read_attribute,
# write_attribute. This module is full of PyTango centric calls.
# TODO: use explicit getters to obtain Sardana elements
# (controller - getController, pseudomotor - getPseudoMotor, ...) instead of
# using getDevice. However this getter seems to accept only the elements names
# and not the full names.
__all__ = ["addreflexion", "affine", "br", "ca", "caa", "ci", "computeub",
"freeze", "getmode", "hklscan", "hscan", "kscan", "latticecal",
"loadcrystal", "lscan", "newcrystal", "or0", "or1", "orswap",
"pa", "savecrystal", "setaz", "setlat", "setmode", "setor0",
"setor1", "setorn", "th2th", "ubr", "wh"]
import time
import math
import os
import re
import numpy as np
from sardana.macroserver.macro import *
from sardana.macroserver.macros.scan import aNscan
from sardana.macroserver.msexception import UnknownEnv
from taurus.core.util.log import Logger
logger = Logger.getLogger("MacroManager")
logger.info("Diffractometer macros are at early stage. They can slightly change. Macro luppsi is not tested.")
class _diffrac:
"""Internal class used as a base class for the diffractometer macros"""
env = ('DiffracDevice',)
def prepare(self):
dev_name = self.getEnv('DiffracDevice')
self.diffrac = self.getDevice(dev_name)
try:
dev_name = self.getEnv('Psi')
self.psidevice = self.getDevice(dev_name)
except:
pass
motorlist = self.diffrac.motorlist
pseudo_motor_names = []
for motor in self.diffrac.hklpseudomotorlist:
pseudo_motor_names.append(motor.split(' ')[0])
self.h_device = self.getDevice(pseudo_motor_names[0])
self.k_device = self.getDevice(pseudo_motor_names[1])
self.l_device = self.getDevice(pseudo_motor_names[2])
motor_list = self.diffrac.motorlist
self.nb_motors = len(motor_list)
try:
self.angle_names = self.diffrac.motorroles
except: # Only for compatibility
self.angle_names = []
if self.nb_motors == 4:
self.angle_names.append("omega")
self.angle_names.append("chi")
self.angle_names.append("phi")
self.angle_names.append("theta")
elif self.nb_motors == 6:
self.angle_names.append("mu")
self.angle_names.append("omega")
self.angle_names.append("chi")
self.angle_names.append("phi")
self.angle_names.append("gamma")
self.angle_names.append("delta")
if self.nb_motors == 4:
self.labelmotor = {'Omega': "omega",
'Chi': "chi", 'Phi': "phi", 'Theta': "theta"}
elif self.nb_motors == 6:
self.labelmotor = {'Mu': "mu", 'Theta': "omega", 'Chi': "chi",
'Phi': "phi", 'Gamma': "gamma", 'Delta': "delta"}
prop = self.diffrac.get_property(['DiffractometerType'])
for v in prop['DiffractometerType']:
self.type = v
self.angle_device_names = {}
i = 0
for motor in motor_list:
self.angle_device_names[self.angle_names[i]] = motor.split(' ')[0]
i = i + 1
# TODO: it should not be necessary to implement on_stop methods in the
# macros in order to stop the moveables. Macro API should provide this kind
# of emergency stop (if the moveables are correctly reserved with the
# getMotion method) in case of aborting a macro.
def on_stop(self):
for angle in self.angle_names:
angle_dev = self.getDevice(self.angle_device_names[angle])
angle_dev.Stop()
def check_collinearity(self, h0, k0, l0, h1, k1, l1):
print h0
cpx = k0 * l1 - l0 * k1
cpy = l0 * h1 - h0 * l1
cpz = h0 * k1 - k0 * h1
cp_square = math.sqrt(cpx * cpx + cpy * cpy + cpz * cpz)
collinearity = False
if cp_square < 0.01:
collinearity = True
return collinearity
def get_hkl_ref0(self):
reflections = []
try:
reflections = self.diffrac.reflectionlist
except:
pass
hkl = []
if reflections != None:
for i in range(1, 4):
hkl.append(reflections[0][i])
return hkl
def get_hkl_ref1(self):
reflections = []
try:
reflections = self.diffrac.reflectionlist
except:
pass
hkl = []
if reflections != None:
if len(reflections) > 1:
for i in range(1, 4):
hkl.append(reflections[1][i])
return hkl
def fl(self, ch,
regx=re.compile(
'(?<![\d.])'
'(?![1-9]\d*(?![\d.])|\d*\.\d*\.)'
'0*(?!(?<=0)\.)'
'([\d.]+?)'
'\.?0*'
'(?![\d.])'
),
repl=lambda mat: mat.group(mat.lastindex)
if mat.lastindex != 3
else '0' + mat.group(3)):
mat = regx.search(ch)
if mat:
return regx.sub(repl, ch)
class br(Macro, _diffrac):
"""Move the diffractometer to the reciprocal space coordinates given by
H, K and L.
If a fourth parameter is given, the combination of angles to be set is
the correspondig to the given index. The index of the
angles combinations are then changed."""
param_def = [
['H', Type.String, None, "H value"],
['K', Type.String, None, "K value"],
['L', Type.String, None, "L value"],
['AnglesIndex', Type.Integer, -1, "Angles index"],
['FlagNotBlocking', Type.Integer, 0,
"If 1 not block. Return without finish movement"],
['FlagPrinting', Type.Integer, 0,
"If 1 printing. Used by ubr"]
]
def prepare(self, H, K, L, AnglesIndex, FlagNotBlocking, FlagPrinting):
_diffrac.prepare(self)
def run(self, H, K, L, AnglesIndex, FlagNotBlocking, FlagPrinting):
h_idx = 0; k_idx = 1; l_idx = 2
if AnglesIndex != -1:
sel_tr = AnglesIndex
else:
sel_tr = self.diffrac.selectedtrajectory
hkl_labels = ["H", "K", "L"]
if H in hkl_labels or K in hkl_labels or L in hkl_labels:
try:
q_vector = self.getEnv('Q')
except UnknownEnv:
self.error("Environment Q not defined. Run wh to define it")
return
try:
if H in hkl_labels:
H = float(q_vector[h_idx])
if K in hkl_labels:
K = float(q_vector[k_idx])
if L in hkl_labels:
L = float(q_vector[l_idx])
except:
self.error("Wrong format of Q vector")
return
hkl_values = [float(H), float(K), float(L)]
self.diffrac.write_attribute("computetrajectoriessim", hkl_values)
angles_list = self.diffrac.trajectorylist[sel_tr]
if FlagNotBlocking == 0:
cmd = "mv"
for name, angle in zip(self.angle_names, angles_list):
cmd = cmd + " " + str(self.angle_device_names[name])
cmd = cmd + " " + str(angle)
if FlagPrinting == 1:
cmd = "u" + cmd
self.execMacro(cmd)
else:
for name, angle in zip(self.angle_names, angles_list):
angle_dev = self.getObj(self.angle_device_names[name])
angle_dev.write_attribute("Position", angle)
self.setEnv('Q', [hkl_values[h_idx], hkl_values[k_idx],
hkl_values[l_idx], self.diffrac.WaveLength])
class ubr(Macro, _diffrac):
"""Move the diffractometer to the reciprocal space coordinates given by
H, K and L und update.
"""
param_def = [
["hh", Type.String, "Not set", "H position"],
["kk", Type.String, "Not set", "K position"],
["ll", Type.String, "Not set", "L position"],
['AnglesIndex', Type.Integer, -1, "Angles index"]
]
def prepare(self, hh, kk, ll, AnglesIndex):
_diffrac.prepare(self)
def run(self, hh, kk, ll, AnglesIndex):
if ll != "Not set":
self.execMacro("br", hh, kk, ll, AnglesIndex, 0, 1)
else:
self.output("usage: ubr H K L [Trajectory]")
class _ca(Macro, _diffrac):
"""Calculate motor positions for given H K L according to the current
operation mode, for all trajectories or for the first one"""
param_def = [
['H', Type.Float, None, "H value for the azimutal vector"],
['K', Type.Float, None, "K value for the azimutal vector"],
['L', Type.Float, None, "L value for the azimutal vector"],
['Trajectory', Type.Float, -1, "If -1, all trajectories"],
]
def prepare(self, H, K, L, Trajectory):
_diffrac.prepare(self)
def run(self, H, K, L, Trajectory):
hkl_values = [H, K, L]
self.diffrac.write_attribute("computetrajectoriessim", hkl_values)
if Trajectory == -1:
start_range = 0
end_range = len(self.diffrac.trajectorylist)
else:
start_range = Trajectory
end_range = Trajectory + 1
for i in range(int(start_range), int(end_range)):
angles_list = self.diffrac.trajectorylist[i]
self.output("")
self.output("Trajectory %2d " % i)
self.output("H K L = %9.5f %9.5f %9.5f " %
(self.h_device.position, self.k_device.position,
self.l_device.position))
try:
self.output("Azimuth (Psi) = %7.5f" %
(self.psidevice.Position))
except:
self.warning(
"Not able to read psi. Check if environment Psi is defined")
self.output("Wavelength = %7.5f" % (self.diffrac.WaveLength))
self.output("")
str_pos = {}
j = 0
for name in self.angle_names:
str_pos[name] = "%7.5f" % angles_list[j]
j = j + 1
self.output("%10s %11s %12s %11s %10s %11s" %
("Delta", "Theta", "Chi", "Phi", "Mu", "Gamma"))
self.output("%10s %11s %12s %11s %10s %11s" %
(str_pos[self.labelmotor["Delta"]], str_pos[self.labelmotor["Theta"]], str_pos[self.labelmotor["Chi"]], str_pos[self.labelmotor["Phi"]], str_pos[self.labelmotor["Mu"]], str_pos[self.labelmotor["Gamma"]]))
class ca(Macro, _diffrac):
"""Calculate motor positions for given H K L according to the current
operation mode (trajectory 0)."""
param_def = [
['H', Type.Float, None, "H value for the azimutal vector"],
['K', Type.Float, None, "K value for the azimutal vector"],
['L', Type.Float, None, "L value for the azimutal vector"],
]
def prepare(self, H, K, L):
_diffrac.prepare(self)
def run(self, H, K, L):
hkl_values = [H, K, L]
self.execMacro("_ca", H, K, L, 0)
class caa(Macro, _diffrac):
"""Calculate motor positions for given H K L according to the current
operation mode (all trajectories)"""
param_def = [
['H', Type.Float, None, "H value for the azimutal vector"],
['K', Type.Float, None, "K value for the azimutal vector"],
['L', Type.Float, None, "L value for the azimutal vector"],
]
def prepare(self, H, K, L):
_diffrac.prepare(self)
def run(self, H, K, L):
hkl_values = [H, K, L]
self.execMacro("_ca", H, K, L)
class ci(Macro, _diffrac):
""" Calculate hkl for given angle values """
param_def = [
['mu', Type.Float, None, "Mu value"],
['theta', Type.Float, None, "Theta value"],
['chi', Type.Float, None, "Chi value"],
['phi', Type.Float, None, "Phi value"],
['gamma', Type.Float, -999, "Gamma value"],
['delta', Type.Float, -999, "Delta value"],
]
def prepare(self, mu, theta, chi, phi, gamma, delta):
_diffrac.prepare(self)
def run(self, mu, theta, chi, phi, gamma, delta):
if delta == -999 and self.nb_motors == 6:
self.error("Six angle values are need as argument")
else:
angles = [mu, theta, chi, phi, gamma, delta]
self.diffrac.write_attribute("computehkl", angles)
hkl_values = self.diffrac.computehkl
self.output("h %f k %f l %f" %
(hkl_values[0], hkl_values[1], hkl_values[2]))
class pa(Macro, _diffrac):
"""Prints information about the active diffractometer."""
suffix = ("st", "nd", "rd", "th")
def prepare(self):
_diffrac.prepare(self)
def run(self):
str_type = "Eulerian 6C"
if self.type == 'E4CV':
str_type = "Eulerian 4C Vertical"
elif self.type == 'E4CH':
str_type = "Eulerian 4C Horizontal"
elif self.type == 'K6C':
str_type = "Kappa 6C"
elif self.type == 'K4CV':
str_type = "Kappa 4C Vertical"
self.output("%s Geometry (%s), %s" %
(str_type, self.type, self.diffrac.enginemode))
#self.output("Sector %s" % "[ToDo]")
self.output("")
reflections = self.diffrac.reflectionlist
nb_ref = 0
if reflections != None:
for ref in reflections:
if nb_ref < len(self.suffix):
sf = self.suffix[nb_ref]
else:
sf = self.suffix[3]
self.output(" %d%s Reflection (index %d): " %
(nb_ref + 1, sf, ref[0]))
#self.output(" Affinement, Relevance : %d %d" % (ref[4], ref[5]))
if len(ref) > 10:
self.output(" %s %s %s %s %s %s: %s %s %s %s %s %s" % (self.angle_names[5], self.angle_names[1], self.angle_names[2], self.angle_names[3], self.angle_names[4], self.angle_names[0], _diffrac.fl(
self, str(ref[11])), _diffrac.fl(self, str(ref[7])), _diffrac.fl(self, str(ref[8])), _diffrac.fl(self, str(ref[9])), _diffrac.fl(self, str(ref[10])), _diffrac.fl(self, str(ref[6]))))
else:
self.output(" %s %s %s %s: %s %s %s %s" % (self.angle_names[0], self.angle_names[1], self.angle_names[2], self.angle_names[
3], _diffrac.fl(self, str(ref[6])), _diffrac.fl(self, str(ref[7])), _diffrac.fl(self, str(ref[8])), _diffrac.fl(self, str(ref[9]))))
nb_ref = nb_ref + 1
self.output(" %33s %s %s %s" % ("H K L =", _diffrac.fl(self, str(
ref[1])), _diffrac.fl(self, str(ref[2])), _diffrac.fl(self, str(ref[3]))))
self.output("")
# self.output("")
self.output(" Lattice Constants (lengths / angles):")
self.output("%32s = %s %s %s / %s %s %s" % ("real space", self.diffrac.a,
self.diffrac.b, self.diffrac.c, _diffrac.fl(
self, str(self.diffrac.alpha)),
_diffrac.fl(self, str(self.diffrac.beta)), _diffrac.fl(self, str(self.diffrac.gamma))))
self.output("")
self.output(" Azimuthal reference:")
self.output("%34s %s %s %s " %
("H K L =", _diffrac.fl(self, str(self.diffrac.psirefh)), _diffrac.fl(self, str(self.diffrac.psirefk)), _diffrac.fl(self, str(self.diffrac.psirefl))))
self.output("")
self.output(" Lambda = %s" % (self.diffrac.WaveLength))
lst = self.diffrac.ubmatrix
self.output(" UB-Matrix")
self.output(" %15g %15g %15g" % (lst[0][0], lst[0][1], lst[0][2]))
self.output(" %15g %15g %15g" % (lst[1][0], lst[1][1], lst[1][2]))
self.output(" %15g %15g %15g" % (lst[2][0], lst[2][1], lst[2][2]))
class wh(Macro, _diffrac):
"""Show principal axes and reciprocal space positions.
Prints the current reciprocal space coordinates (H K L) and the user
positions of the principal motors. Depending on the diffractometer geometry,
other parameters such as the angles of incidence and reflection (ALPHA and
BETA) and the incident wavelength (LAMBDA) may be displayed."""
def prepare(self):
_diffrac.prepare(self)
def run(self):
self.output("")
self.output("Engine: %s" % self.diffrac.engine)
self.output("")
self.output("Mode: %s" % self.diffrac.enginemode)
self.output("")
self.output("%s %s %3s %9.5f %9.5f %9.5f " %
("H", "K", "L = ", self.h_device.position, self.k_device.position, self.l_device.position))
if self.diffrac.psirefh == -999:
self.output("")
else:
self.output("%8s %9.5f %9.5f %9.5f " %
("Ref = ", self.diffrac.psirefh, self.diffrac.psirefk, self.diffrac.psirefl))
psirefh_in = self.diffrac.psirefh
psirefk_in = self.diffrac.psirefk
psirefl_in = self.diffrac.psirefl
engine_restore = self.diffrac.engine
mode_restore = self.diffrac.enginemode
self.diffrac.write_attribute("engine", "psi")
psirefh_psi = self.diffrac.psirefh
psirefk_psi = self.diffrac.psirefk
psirefl_psi = self.diffrac.psirefl
self.diffrac.write_attribute("engine", engine_restore)
self.diffrac.write_attribute("enginemode", mode_restore)
if psirefh_in != psirefh_psi or psirefk_in != psirefk_psi or psirefl_in != psirefl_psi:
self.warning(
"Psiref vector missmatch. Calculated value corresponds to:")
self.warning("%8s %9.5f %9.5f %9.5f " %
("Ref = ", psirefh_psi, psirefk_psi, psirefl_psi))
self.warning("Use setaz for setting it consistently")
try:
self.output("%s %7.5f" % (
"Azimuth (Psi - calculated) = ", self.psidevice.Position))
except:
self.warning(
"Not able to read psi. Check if environment Psi is defined")
parameter_names = self.diffrac.modeparametersnames
if parameter_names != None:
i = 0
for par in parameter_names:
if par == "psi":
parameter_values = self.diffrac.modeparametersvalues
self.info("%s %7.5f" %
("Azimuth (Psi - set) = ", parameter_values[i]))
i = i + 1
self.output("%s %7.5f" % ("Wavelength = ", self.diffrac.WaveLength))
self.output("")
str_pos1 = "%7.5f" % self.getDevice(
self.angle_device_names[self.labelmotor["Delta"]]).Position
str_pos2 = "%7.5f" % self.getDevice(
self.angle_device_names[self.labelmotor["Theta"]]).Position
str_pos3 = "%7.5f" % self.getDevice(
self.angle_device_names[self.labelmotor["Chi"]]).Position
str_pos4 = "%7.5f" % self.getDevice(
self.angle_device_names[self.labelmotor["Phi"]]).Position
str_pos5 = "%7.5f" % self.getDevice(
self.angle_device_names[self.labelmotor["Mu"]]).Position
str_pos6 = "%7.5f" % self.getDevice(
self.angle_device_names[self.labelmotor["Gamma"]]).Position
self.output("%10s %11s %12s %11s %10s %11s" %
("Delta", "Theta", "Chi", "Phi", "Mu", "Gamma"))
self.output("%10s %11s %12s %11s %10s %11s" %
(str_pos1, str_pos2, str_pos3, str_pos4, str_pos5, str_pos6))
self.setEnv('Q', [self.h_device.position, self.k_device.position,
self.l_device.position, self.diffrac.WaveLength])
class freeze(Macro, _diffrac):
""" Set psi value for psi constant modes """
param_def = [
['parameter', Type.String, None, "Parameter to freeze"],
['value', Type.Float, None, "Value to be frozen"]
]
def prepare(self, parameter, value):
_diffrac.prepare(self)
def run(self, parameter, value):
if parameter == "psi":
engine_restore = self.diffrac.engine
mode_restore = self.diffrac.enginemode
if mode_restore != "psi_constant_vertical" and mode_restore != "psi_constant_horizontal":
self.warning(
"Psi frozen to set value. But current mode is not set to psi_constant_vertical or psi_constant_horizontal ")
self.diffrac.write_attribute("engine", "hkl")
self.diffrac.write_attribute("enginemode", "psi_constant_vertical")
parameter_values = self.diffrac.modeparametersvalues
parameter_values[3] = value
self.diffrac.write_attribute(
"modeparametersvalues", parameter_values)
self.diffrac.write_attribute(
"enginemode", "psi_constant_horizontal")
parameter_values = self.diffrac.modeparametersvalues
parameter_values[3] = value
self.diffrac.write_attribute(
"modeparametersvalues", parameter_values)
self.diffrac.write_attribute("engine", engine_restore)
self.diffrac.write_attribute("enginemode", mode_restore)
else:
self.error("Only implemented for parameter psi. Nothing done")
class setmode(iMacro, _diffrac):
"""Set operation mode."""
param_def = [
['new_mode', Type.Integer, -1, "Mode to be set"]
]
def prepare(self, new_mode):
_diffrac.prepare(self)
def run(self, new_mode):
modes = self.diffrac.enginemodelist
if new_mode == -1:
self.output("Available modes:")
imode = 1
old_mode = self.diffrac.read_attribute("enginemode").value
for mode in modes:
if mode == old_mode:
def_mode = imode
self.output(" %d -> %s " % (imode, mode))
imode = imode + 1
old_mode = self.diffrac.read_attribute("enginemode").value
self.output("")
a = self.input("Your choice?", default_value=def_mode)
imode = 1
for mode in modes:
if imode == int(a):
def_mode = imode
imode = imode + 1
self.diffrac.write_attribute("enginemode", modes[def_mode - 1])
self.output("")
self.output("Now using %s mode" % modes[def_mode - 1])
return
if new_mode > len(modes):
self.output(
"Wrong index mode -> only from 1 to %d allowed:" % len(modes))
imode = 1
for mode in modes:
self.output(" %d -> %s " % (imode, mode))
imode = imode + 1
return
else:
self.diffrac.write_attribute("enginemode", modes[new_mode - 1])
self.output("Now using %s mode" % modes[new_mode - 1])
self.execMacro('savecrystal')
class getmode(Macro, _diffrac):
"""Get operation mode."""
def prepare(self):
_diffrac.prepare(self)
def run(self):
self.output(self.diffrac.enginemode)
class setlat(iMacro, _diffrac):
"""Set the crystal lattice parameters a, b, c, alpha, beta and gamma
for the currently active diffraction pseudo motor controller."""
param_def = [
['a', Type.Float, -999, "Lattice 'a' parameter"],
['b', Type.Float, -999, "Lattice 'b' parameter"],
['c', Type.Float, -999, "Lattice 'c' parameter"],
['alpha', Type.Float, -999, "Lattice 'alpha' parameter"],
['beta', Type.Float, -999, "Lattice 'beta' parameter"],
['gamma', Type.Float, -999, "Lattice 'gamma' parameter"]
]
def prepare(self, a, b, c, alpha, beta, gamma):
_diffrac.prepare(self)
def run(self, a, b, c, alpha, beta, gamma):
if gamma == -999:
a = self.diffrac.a
b = self.diffrac.b
c = self.diffrac.c
alpha = self.diffrac.alpha
beta = self.diffrac.beta
gamma = self.diffrac.gamma
self.output("")
self.output("Enter real space lattice parameters:")
a = self.input(" Lattice a?", default_value=a,
data_type=Type.String)
b = self.input(" Lattice b?", default_value=b,
data_type=Type.String)
c = self.input(" Lattice c?", default_value=c,
data_type=Type.String)
alpha = self.input(" Lattice alpha?",
default_value=alpha, data_type=Type.String)
beta = self.input(" Lattice beta?",
default_value=beta, data_type=Type.String)
gamma = self.input(" Lattice gamma?",
default_value=gamma, data_type=Type.String)
self.output("")
self.diffrac.write_attribute("a", float(a))
self.diffrac.write_attribute("b", float(b))
self.diffrac.write_attribute("c", float(c))
self.diffrac.write_attribute("alpha", float(alpha))
self.diffrac.write_attribute("beta", float(beta))
self.diffrac.write_attribute("gamma", float(gamma))
else:
self.diffrac.write_attribute("a", a)
self.diffrac.write_attribute("b", b)
self.diffrac.write_attribute("c", c)
self.diffrac.write_attribute("alpha", alpha)
self.diffrac.write_attribute("beta", beta)
self.diffrac.write_attribute("gamma", gamma)
self.execMacro('computeub')
class or0(Macro, _diffrac):
"""Set primary orientation reflection."""
param_def = [
['H', Type.Float, None, "H value"],
['K', Type.Float, None, "K value"],
['L', Type.Float, None, "L value"],
]
def prepare(self, H, K, L):
_diffrac.prepare(self)
def run(self, H, K, L):
# Check collinearity
hkl_ref1 = _diffrac.get_hkl_ref1(self)
if len(hkl_ref1) > 1:
check = _diffrac.check_collinearity(
self, H, K, L, hkl_ref1[0], hkl_ref1[1], hkl_ref1[2])
if check:
self.warning(
"Can not orient: or0 %9.5f %9.5f %9.5f are parallel to or1" % (H, K, L))
return
values = [0, H, K, L]
self.diffrac.write_attribute("SubstituteReflection", values)
self.execMacro('computeub')
class or1(Macro, _diffrac):
"""Set secondary orientation reflection."""
param_def = [
['H', Type.Float, None, "H value"],
['K', Type.Float, None, "K value"],
['L', Type.Float, None, "L value"],
]
def prepare(self, H, K, L):
_diffrac.prepare(self)
def run(self, H, K, L):
# Check collinearity
hkl_ref0 = _diffrac.get_hkl_ref0(self)
if len(hkl_ref0) > 1:
check = _diffrac.check_collinearity(
self, hkl_ref0[0], hkl_ref0[1], hkl_ref0[2], H, K, L)
if check:
self.warning(
"Can not orient: or0 is parallel to or1 %9.5f %9.5f %9.5f" % (H, K, L))
return
values = [1, H, K, L]
self.diffrac.write_attribute("SubstituteReflection", values)
self.execMacro('computeub')
class setor0(Macro, _diffrac):
"""Set primary orientation reflection choosing hkl and angle values"""
param_def = [
['H', Type.Float, -999, "H value"],
['K', Type.Float, -999, "K value"],
['L', Type.Float, -999, "L value"],
['mu', Type.Float, -999, "Mu value"],
['theta', Type.Float, -999, "Theta value"],
['chi', Type.Float, -999, "Chi value"],
['phi', Type.Float, -999, "Phi value"],
['gamma', Type.Float, -999, "Gamma value"],
['delta', Type.Float, -999, "Delta value"],
]
def prepare(self, H, K, L, mu, theta, chi, phi, gamma, delta):
_diffrac.prepare(self)
def run(self, H, K, L, mu, theta, chi, phi, gamma, delta):
setorn, pars = self.createMacro(
"setorn", 0, H, K, L, mu, theta, chi, phi, gamma, delta)
self.runMacro(setorn)
class setor1(Macro, _diffrac):
"""Set secondary orientation reflection choosing hkl and angle values"""
param_def = [
['H', Type.Float, -999, "H value"],
['K', Type.Float, -999, "K value"],
['L', Type.Float, -999, "L value"],
['mu', Type.Float, -999, "Mu value"],
['theta', Type.Float, -999, "Theta value"],
['chi', Type.Float, -999, "Chi value"],
['phi', Type.Float, -999, "Phi value"],
['gamma', Type.Float, -999, "Gamma value"],
['delta', Type.Float, -999, "Delta value"],
]
def prepare(self, H, K, L, mu, theta, chi, phi, gamma, delta):
_diffrac.prepare(self)
def run(self, H, K, L, mu, theta, chi, phi, gamma, delta):
setorn, pars = self.createMacro(
"setorn", 1, H, K, L, mu, theta, chi, phi, gamma, delta)
self.runMacro(setorn)
class setorn(iMacro, _diffrac):
"""Set orientation reflection indicated by the index."""
param_def = [
['ref_id', Type.Integer, None, "reflection index (starting at 0)"],
['H', Type.Float, -999, "H value"],
['K', Type.Float, -999, "K value"],
['L', Type.Float, -999, "L value"],
['mu', Type.Float, -999, "Mu value"],
['theta', Type.Float, -999, "Theta value"],
['chi', Type.Float, -999, "Chi value"],
['phi', Type.Float, -999, "Phi value"],
['gamma', Type.Float, -999, "Gamma value"],
['delta', Type.Float, -999, "Delta value"],
]
def prepare(self, ref_id, H, K, L, mu, theta, chi, phi, gamma, delta):
_diffrac.prepare(self)
def run(self, ref_id, H, K, L, mu, theta, chi, phi, gamma, delta):
if delta == -999:
reflections = []
try:
reflections = self.diffrac.reflectionlist
except:
pass
tmp_ref = {}
hkl_names = ["h", "k", "l"]
if reflections != None:
if len(reflections) > ref_id:
for i in range(1, 4):
tmp_ref[hkl_names[i - 1]] = reflections[ref_id][i]
for i in range(6, 12):
tmp_ref[self.angle_names[i - 6]
] = reflections[ref_id][i]
else:
for i in range(0, 3):
tmp_ref[hkl_names[i]] = 0
for i in range(0, 6):
tmp_ref[self.angle_names[i]] = 0
else:
for i in range(0, 3):
tmp_ref[hkl_names[i]] = 0
for i in range(0, 6):
tmp_ref[self.angle_names[i]] = 0
self.output("")
if ref_id == 0:
ref_txt = "primary-reflection"
elif ref_id == 1:
ref_txt = "secondary-reflection"
else:
ref_txt = "reflection " + str(ref_id)
self.output("Enter %s angles" % ref_txt)
delta = float(self.input(" Delta?", default_value=tmp_ref[
"delta"], data_type=Type.String))
theta = float(self.input(" Theta? ", default_value=tmp_ref[
"omega"], data_type=Type.String))
chi = float(self.input(" Chi?", default_value=tmp_ref[
"chi"], data_type=Type.String))
phi = float(self.input(" Phi?", default_value=tmp_ref[
"phi"], data_type=Type.String))
gamma = float(self.input(" Gamma?", default_value=tmp_ref[
"gamma"], data_type=Type.String))
mu = float(self.input(" Mu?", default_value=tmp_ref[
"mu"], data_type=Type.String))
self.output("")
self.output("Enter %s HKL coordinates" % ref_txt)
H = float(self.input(" H?", default_value=tmp_ref[
"h"], data_type=Type.String))
K = float(self.input(" K?", default_value=tmp_ref[
"k"], data_type=Type.String))
L = float(self.input(" L?", default_value=tmp_ref[
"l"], data_type=Type.String))
self.output("")
# Check collinearity
if ref_id == 0:
hkl_ref = _diffrac.get_hkl_ref1(self)
if ref_id == 1:
hkl_ref = _diffrac.get_hkl_ref0(self)
if ref_id < 2:
if len(hkl_ref) > 1:
check = _diffrac.check_collinearity(
self, hkl_ref[0], hkl_ref[1], hkl_ref[2], H, K, L)
if check:
self.warning(
"Can not orient: ref0 is parallel to ref1 %9.5f %9.5f %9.5f" % (H, K, L))
return
# Set reflection
values = [ref_id, H, K, L]
self.diffrac.write_attribute("SubstituteReflection", values)
# Adjust angles
self.angle_values = {"mu": mu, "omega": theta,
"chi": chi, "phi": phi, "gamma": gamma, "delta": delta}
values = []
values.append(ref_id)
for angle_name in self.angle_names:
values.append(self.angle_values[angle_name])
self.diffrac.write_attribute("AdjustAnglesToReflection", values)
# Recompute u
self.execMacro('computeub')
class setaz(iMacro, _diffrac):
""" Set hkl values of the psi reference vector"""
param_def = [
['PsiH', Type.Float, -999, "H value of psi reference vector"],
['PsiK', Type.Float, -999, "K value of psi reference vector"],
['PsiL', Type.Float, -999, "L value of psi reference vector"],
]
def prepare(self, PsiH, PsiK, PsiL):
_diffrac.prepare(self)
def run(self, PsiH, PsiK, PsiL):
engine_restore = self.diffrac.engine
mode_restore = self.diffrac.enginemode
if PsiL == -999:
self.diffrac.write_attribute("engine", "hkl")
self.diffrac.write_attribute("enginemode", "psi_constant_vertical")
azh = self.diffrac.read_attribute("psirefh").value
azk = self.diffrac.read_attribute("psirefk").value
azl = self.diffrac.read_attribute("psirefl").value
self.output("")
self.output("Enter azimuthal reference H K L:")
a1 = self.input(" Azimuthal H?", default_value=azh,
data_type=Type.String)
a2 = self.input(" Azimuthal K?", default_value=azk,
data_type=Type.String)
a3 = self.input(" Azimuthal L?", default_value=azl,
data_type=Type.String)
PsiH = float(a1)
PsiK = float(a2)
PsiL = float(a3)
self.diffrac.write_attribute("engine", "hkl")
self.diffrac.write_attribute("enginemode", "psi_constant_vertical")
self.diffrac.write_attribute("psirefh", PsiH)
self.diffrac.write_attribute("psirefk", PsiK)
self.diffrac.write_attribute("psirefl", PsiL)
self.diffrac.write_attribute("enginemode", "psi_constant_horizontal")
self.diffrac.write_attribute("psirefh", PsiH)
self.diffrac.write_attribute("psirefk", PsiK)
self.diffrac.write_attribute("psirefl", PsiL)
self.diffrac.write_attribute("engine", "psi")
self.diffrac.write_attribute("psirefh", PsiH)
self.diffrac.write_attribute("psirefk", PsiK)
self.diffrac.write_attribute("psirefl", PsiL)
self.diffrac.write_attribute("engine", engine_restore)
self.diffrac.write_attribute("enginemode", mode_restore)
self.execMacro('savecrystal')
class computeub(Macro, _diffrac):
""" Compute UB matrix with reflections 0 and 1 """
def prepare(self):
_diffrac.prepare(self)
def run(self):
reflections = self.diffrac.reflectionlist
if reflections != None:
if len(reflections) > 1:
self.output("Computing UB with reflections 0 and 1")
values = [0,1]
self.diffrac.write_attribute("ComputeUB", values)
self.execMacro('savecrystal')
else:
self.warning("UB can not be computed. Only one reflection")
else:
self.warning("UB can not be computed. No reflection")
class addreflection(Macro, _diffrac):
""" Add reflection at the botton of reflections list """
param_def = [
['H', Type.Float, None, "H value"],
['K', Type.Float, None, "K value"],
['L', Type.Float, None, "L value"],
['affinement', Type.Float, -999., "Affinement"]
]
def prepare(self, H, K, L, affinement):
_diffrac.prepare(self)
def run(self, H, K, L, affinement):
values = [H, K, L]
if affinement != -999.:
values.append(affinement)
self.diffrac.write_attribute("AddReflection", values)
class affine(Macro, _diffrac):
"""Affine current crystal.
Fine tunning of lattice parameters and UB matrix based on
current crystal reflections. Reflections with affinement
set to 0 are not used. A new crystal with the post fix
(affine) is created and set as current crystal"""
def prepare(self):
_diffrac.prepare(self)
def run(self):
self.diffrac.write_attribute("AffineCrystal", 0)
class orswap(Macro, _diffrac):
"""Swap values for primary and secondary vectors."""
def prepare(self):
_diffrac.prepare(self)
def run(self):
self.diffrac.write_attribute("SwapReflections01", 0)
self.output("Orientation vectors swapped.")
self.execMacro('computeub')
class newcrystal(iMacro, _diffrac):
""" Create a new crystal (if it does not exist) and select it. """
param_def = [
['crystal_name', Type.String, "", 'Name of the crystal to add and select']
]
def prepare(self, crystal_name):
_diffrac.prepare(self)
def run(self, crystal_name):
crystal_list = self.diffrac.crystallist
to_add = 1
i = 1
if crystal_name == "":
crystal_name = self.diffrac.crystal
self.output("Available crystals:")
for crystal in crystal_list:
self.output("(%s) %s" % (i, crystal))
if crystal_name == crystal:
iselname = crystal
i = i + 1
a = self.input("New crystal?", default_value=iselname,
data_type=Type.String)
try:
a1 = int(a)
i = 1
for crystal in crystal_list:
if a1 == i:
a = crystal
i = i + 1
if a1 > i - 1:
a = iselname
except:
pass
if a != iselname:
crystal_name = a
else:
crystal_name = iselname
for crystal in crystal_list:
if crystal_name == crystal:
to_add = 0
if to_add:
self.diffrac.write_attribute("addcrystal", crystal_name)
self.diffrac.write_attribute("crystal", crystal_name)
self.output("")
self.output("Crystal selected: %s " % crystal_name)
if to_add:
a = self.input(" Lattice a?", default_value=5.43,
data_type=Type.String)
b = self.input(" Lattice b?", default_value=5.43,
data_type=Type.String)
c = self.input(" Lattice c?", default_value=5.43,
data_type=Type.String)
alpha = self.input(" Lattice alpha?",
default_value=90, data_type=Type.String)
beta = self.input(" Lattice beta?",
default_value=90, data_type=Type.String)
gamma = self.input(" Lattice gamma?",
default_value=90, data_type=Type.String)
self.output("")
self.diffrac.write_attribute("a", float(a))
self.diffrac.write_attribute("b", float(b))
self.diffrac.write_attribute("c", float(c))
self.diffrac.write_attribute("alpha", float(alpha))
self.diffrac.write_attribute("beta", float(beta))
self.diffrac.write_attribute("gamma", float(gamma))
class hscan(aNscan, Macro, _diffrac):
"Scan h axis"
param_def = [
['start_pos', Type.Float, None, 'Scan start position'],
['final_pos', Type.Float, None, 'Scan final position'],
['nr_interv', Type.Integer, None, 'Number of scan intervals'],
['integ_time', Type.Float, None, 'Integration time'],
]
def prepare(self, start_pos, final_pos, nr_interv, integ_time):
_diffrac.prepare(self)
aNscan._prepare(self, [self.h_device], [start_pos], [final_pos], nr_interv, integ_time)
class kscan(aNscan, Macro, _diffrac):
"Scan k axis"
param_def = [
['start_pos', Type.Float, None, 'Scan start position'],
['final_pos', Type.Float, None, 'Scan final position'],
['nr_interv', Type.Integer, None, 'Number of scan intervals'],
['integ_time', Type.Float, None, 'Integration time'],
]
def prepare(self, start_pos, final_pos, nr_interv, integ_time):
_diffrac.prepare(self)
aNscan._prepare(self, [self.k_device], [start_pos], [final_pos], nr_interv, integ_time)
class lscan(aNscan, Macro, _diffrac):
"Scan l axis"
param_def = [
['start_pos', Type.Float, None, 'Scan start position'],
['final_pos', Type.Float, None, 'Scan final position'],
['nr_interv', Type.Integer, None, 'Number of scan intervals'],
['integ_time', Type.Float, None, 'Integration time'],
]
def prepare(self, start_pos, final_pos, nr_interv, integ_time):
_diffrac.prepare(self)
aNscan._prepare(self, [self.l_device], [start_pos], [final_pos], nr_interv, integ_time)
class hklscan(aNscan, Macro, _diffrac):
"Scan h k l axes"
param_def = [
['h_start_pos', Type.Float, None, 'Scan h start position'],
['h_final_pos', Type.Float, None, 'Scan h final position'],
['k_start_pos', Type.Float, None, 'Scan k start position'],
['k_final_pos', Type.Float, None, 'Scan k final position'],
['l_start_pos', Type.Float, None, 'Scan l start position'],
['l_final_pos', Type.Float, None, 'Scan l final position'],
['nr_interv', Type.Integer, None, 'Number of scan intervals'],
['integ_time', Type.Float, None, 'Integration time'],
]
def prepare(self, h_start_pos, h_final_pos, k_start_pos, k_final_pos, l_start_pos, l_final_pos, nr_interv, integ_time):
_diffrac.prepare(self)
aNscan._prepare(self, [self.h_device, self.k_device, self.l_device], [h_start_pos, k_start_pos, l_start_pos], [h_final_pos, k_final_pos, l_final_pos], nr_interv, integ_time)
class th2th(Macro):
"""th2th - scan:
Relative scan around current position in del and th with d_th=2*d_delta
"""
param_def = [
['rel_start_pos', Type.Float, -999, 'Scan start position'],
['rel_final_pos', Type.Float, -999, 'Scan final position'],
['nr_interv', Type.Integer, -999, 'Number of scan intervals'],
['integ_time', Type.Float, -999, 'Integration time']
]
def run(self, rel_start_pos, rel_final_pos, nr_interv, integ_time):
if ((integ_time != -999)):
motor_del = self.getObj("del")
motor_th = self.getObj("th")
pos_del = motor_del.getPosition()
pos_th = motor_th.getPosition()
scan = self.d2scan(motor_del, rel_start_pos, rel_final_pos,
motor_th, rel_start_pos / 2, rel_final_pos / 2,
nr_interv, integ_time)
else:
self.output(
"Usage: th2th tth_start_rel tth_stop_rel intervals time")
count_scan = 1
class HookPars:
pass
def hook_pre_move(self, hook_pars):
global count_scan
self.execMacro('freeze', 'psi', hook_pars.psi_save + +
hook_pars.angle_start + (count_scan - 1) * hook_pars.angle_interv)
self.execMacro('ubr', hook_pars.h, hook_pars.k, hook_pars.l)
count_scan = count_scan + 1
class luppsi(Macro, _diffrac):
"""psi scan:
Relative scan psi angle
[TODO] Still not tested
"""
param_def = [
['rel_start_angle', Type.Float, -999, 'Relative start scan angle'],
['rel_final_angle', Type.Float, -999, 'Relative final scan angle'],
['nr_interv', Type.Integer, -999, 'Number of scan intervals'],
['integ_time', Type.Float, -999, 'Integration time']
]
def prepare(self, H, K, L, AnglesIndex):
_diffrac.prepare(self)
def run(self, rel_start_angle, rel_final_angle, nr_interv, integ_time):
global count_scan
count_scan = 1
if ((integ_time != -999)):
self.diffrac.write_attribute("engine", "hkl")
self.diffrac.write_attribute("enginemode", "psi_constant_vertical")
h = self.h_device.position
k = self.k_device.position
l = self.l_device.position
psi_positions = []
try:
psi_save = self.psidevice.Position
except:
self.error(
"Not able to read psi. Check if environment Psi is defined")
return
angle_interv = abs(rel_final_angle - rel_start_angle) / nr_interv
# Construct scan macro
self.output(self.psidevice.alias())
psi_motor = self.getMotor(self.psidevice.alias())
self.output(psi_motor)
macro, pars = self.createMacro('dscan %s %f %f %d %f ' %
(self.psidevice.alias(), rel_start_angle, rel_final_angle, nr_interv, integ_time))
# Parameters for scan hook function
hook_pars = HookPars()
hook_pars.psi_save = psi_save
hook_pars.angle_interv = angle_interv
hook_pars.angle_start = rel_start_angle
hook_pars.h = h
hook_pars.k = k
hook_pars.l = l
f = lambda: hook_pre_move(self, hook_pars)
macro.hooks = [
(f, ["pre-move"]),
]
# Start the scan
self.runMacro(macro)
# Return to start position
self.info("Return to start position " + str(psi_save))
self.execMacro('freeze', 'psi', psi_save)
self.execMacro('ubr', h, k, l)
self.psidevice.write_attribute("Position", psi_save)
else:
self.output(
"Usage: luppsi rel_startangle rel_stopangle n_intervals time")
class savecrystal(Macro, _diffrac):
"""
Save crystal information to file
"""
def prepare(self):
_diffrac.prepare(self)
def run(self):
self.info("Be aware: changes in crystal file format are still possible.")
self.diffrac.write_attribute("SaveCrystal", 1)
class loadcrystal(iMacro, _diffrac):
"""
Load crystal information from file
"""
def prepare(self):
_diffrac.prepare(self)
def run(self):
self.info("Be aware: changes in crystal file format are still possible.")
active_dir = ""
try:
files = os.listdir(os.path.expanduser('~') + '/crystals/')
active_dir = os.path.expanduser('~') + '/crystals/'
except:
self.output(
"Directory for loading files %s/crystals does not exist" % os.path.expanduser('~'))
newdir = self.input("Type new directory")
try:
files = os.listdir(newdir)
active_dir = newdir
except:
self.output("New directory %s not found" % newdir)
return
res = filter(lambda x: x.endswith('.txt'), files)
if len(res) == 0:
self.output("No crystals available in set directory. Nothing done")
return
i = 1
for filename in res:
filename = filename.split('.')[0]
self.output("(%s) %s" % (i, filename))
i = i + 1
a0 = self.input("Your choice? ")
try:
a1 = int(a0)
i = 1
for filename in res:
if i == int(a0) and i < len(res) + 1:
file = filename
i = i + 1
if a1 < len(res) + 1:
self.output("")
self.output("File to load %s" % active_dir + file)
else:
self.output("Input out of range!")
self.diffrac.write_attribute("loadcrystal", active_dir + file)
self.diffrac.read_attribute("loadcrystal")
except:
if a0 != "":
self.output("Wrong input!")
else:
self.output("An input file has to be given. Nothing done")
class latticecal(iMacro, _diffrac):
"""
Calibrate lattice parameters a, b or c to current 2theta value
"""
param_def = [
["parameter", Type.String, "", "Parameter"],
]
def prepare(self, parameter):
_diffrac.prepare(self)
def run(self, parameter):
if parameter != "":
if parameter == "a" or parameter == "b" or parameter == "c":
if parameter == "a":
a0 = self.diffrac.a
self.output("Old lattice parameter %s = %s" %
(parameter, a0))
h0 = self.h_device.position
h1 = round(h0)
a1 = h1 / h0 * a0
self.output("New lattice parameter %s = %s" %
(parameter, a1))
self.diffrac.write_attribute("a", a1)
if parameter == "b":
a0 = self.diffrac.b
self.output("Old lattice parameter %s = %s" %
(parameter, a0))
h0 = self.k_device.position
h1 = round(h0)
a1 = h1 / h0 * a0
self.output("New lattice parameter %s = %s" %
(parameter, a1))
self.diffrac.write_attribute("b", a1)
if parameter == "c":
a0 = self.diffrac.c
self.output("Old lattice parameter %s = %s" %
(parameter, a0))
h0 = self.l_device.position
h1 = round(h0)
a1 = h1 / h0 * a0
self.output("New lattice parameter %s = %s" %
(parameter, a1))
self.diffrac.write_attribute("c", a1)
self.execMacro('computeub')
else:
self.output("Lattice parameter a, b or c")
else:
self.output(
"Calibration of lattice parameters a, b or c to current 2theta value")
self.output("usage: latticecal parameter")
class _blockprintmove(Macro, _diffrac):
"""This macro is internal and reserved to the hkl infrastucture
"""
param_def = [
['flagprint', Type.Integer, 0, '1 for printing']
]
def prepare(self, flagprint):
_diffrac.prepare(self)
def run(self, flagprint):
moving = 1
tmp_dev = {}
for angle in self.angle_names:
tmp_dev[angle] = self.getDevice(self.angle_device_names[angle])
while(moving):
moving = 0
for angle in self.angle_names:
if tmp_dev[angle].state() == 6:
moving = 1
if flagprint == 1:
self.outputBlock(" %7.5f %7.5f %7.5f" % (
self.h_device.position, self.k_device.position, self.l_device.position))
self.flushOutput()
self.checkPoint()
time.sleep(1.0)
if flagprint == 1:
self.outputBlock(" %7.5f %7.5f %7.5f" % (
self.h_device.position, self.k_device.position, self.l_device.position))
self.flushOutput()
class _diff_scan(Macro):
"""Perfoms an scan keeping the data for further analysis/moves.
This macro is internal and reserved to the hkl infrastucture.
"""
param_def = [
['motor', Type.Motor, None, 'Motor to move'],
['start_pos', Type.Float, None, 'Scan start position'],
['final_pos', Type.Float, None, 'Scan final position'],
['nr_interv', Type.Integer, None, 'Number of scan intervals'],
['integ_time', Type.Float, None, 'Integration time'],
['channel', Type.ExpChannel, None, 'Channel to analize']
]
def run(self, motor, start_pos, final_pos, nr_interv, integ_time, channel):
ascan, pars= self.createMacro("ascan",motor, start_pos, final_pos, nr_interv, integ_time)
self.runMacro(ascan)
channel_fullname = channel.getFullName()
motor_name = motor.getName()
arr_data = []
arr_motpos = []
for record in ascan.data.records:
record_data = record.data
arr_data.append(record_data[channel_fullname])
arr_motpos.append(record_data[motor_name])
# Find motor position corresponding to the maximum of channel values
idx_max = np.argmax(arr_data)
pos_max = arr_motpos[idx_max]
self.output("Position to move")
self.output(pos_max)
|
sagiss/sardana
|
src/sardana/macroserver/macros/hkl.py
|
Python
|
lgpl-3.0
| 54,709
|
[
"CRYSTAL"
] |
319e0a05e7815cec6eb2d4be42a382a012502f74025850523d587bf8249ee55a
|
r"""
Gradient analyses (:mod:`skbio.stats.gradient`)
===============================================
.. currentmodule:: skbio.stats.gradient
This module provides functionality for performing gradient analyses.
The algorithms included in this module mainly allows performing analysis of
volatility on time series data, but they can be applied to any data that
contains a gradient.
Classes
-------
.. autosummary::
:toctree: generated/
GradientANOVA
AverageGradientANOVA
TrajectoryGradientANOVA
FirstDifferenceGradientANOVA
WindowDifferenceGradientANOVA
GroupResults
CategoryResults
GradientANOVAResults
Examples
--------
Assume we have the following coordinates:
>>> import numpy as np
>>> import pandas as pd
>>> from skbio.stats.gradient import AverageGradientANOVA
>>> coord_data = {'PC.354': np.array([0.2761, -0.0341, 0.0633, 0.1004]),
... 'PC.355': np.array([0.2364, 0.2186, -0.0301, -0.0225]),
... 'PC.356': np.array([0.2208, 0.0874, -0.3519, -0.0031]),
... 'PC.607': np.array([-0.1055, -0.4140, -0.15, -0.116]),
... 'PC.634': np.array([-0.3716, 0.1154, 0.0721, 0.0898])}
>>> coords = pd.DataFrame.from_dict(coord_data, orient='index')
the following metadata map:
>>> metadata_map = {'PC.354': {'Treatment': 'Control', 'Weight': '60'},
... 'PC.355': {'Treatment': 'Control', 'Weight': '55'},
... 'PC.356': {'Treatment': 'Control', 'Weight': '50'},
... 'PC.607': {'Treatment': 'Fast', 'Weight': '65'},
... 'PC.634': {'Treatment': 'Fast', 'Weight': '68'}}
>>> metadata_map = pd.DataFrame.from_dict(metadata_map, orient='index')
and the following array with the proportion explained of each coord:
>>> prop_expl = np.array([25.6216, 15.7715, 14.1215, 11.6913, 9.8304])
Then to compute the average trajectory of this data:
>>> av = AverageGradientANOVA(coords, prop_expl, metadata_map,
... trajectory_categories=['Treatment'],
... sort_category='Weight')
>>> trajectory_results = av.get_trajectories()
Check the algorithm used to compute the trajectory_results:
>>> print(trajectory_results.algorithm)
avg
Check if we weighted the data or not:
>>> print(trajectory_results.weighted)
False
Check the results of one of the categories:
>>> print(trajectory_results.categories[0].category)
Treatment
>>> print(trajectory_results.categories[0].probability)
0.0118478282382
Check the results of one group of one of the categories:
>>> print(trajectory_results.categories[0].groups[0].name)
Control
>>> print(trajectory_results.categories[0].groups[0].trajectory)
[ 3.52199973 2.29597001 3.20309816]
>>> print(trajectory_results.categories[0].groups[0].info)
{'avg': 3.007022633956606}
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from copy import deepcopy
from collections import defaultdict
from numbers import Integral
import numpy as np
from natsort import realsorted
from scipy.stats import f_oneway
from skbio.util._decorator import experimental
def _weight_by_vector(trajectories, w_vector):
r"""weights the values of `trajectories` given a weighting vector
`w_vector`.
Each value in `trajectories` will be weighted by the 'rate of change'
to 'optimal rate of change' ratio. The 'rate of change' of a vector
measures how each point in the vector changes with respect to its
predecessor point. The 'optimal rate of change' is the rate of change
in which each point in the vector performs the same change than its
predecessor, meaning that when calling this function over evenly spaced
`w_vector` values, no change will be reflected on the output.
Parameters
----------
trajectories: pandas.DataFrame
Values to weight
w_vector: pandas.Series
Values used to weight `trajectories`
Returns
-------
pandas.DataFrame
A weighted version of `trajectories`.
Raises
------
ValueError
If `trajectories` and `w_vector` don't have equal lengths
If `w_vector` is not a gradient
TypeError
If `trajectories` and `w_vector` are not iterables
"""
try:
if len(trajectories) != len(w_vector):
raise ValueError("trajectories (%d) & w_vector (%d) must be equal "
"lengths" % (len(trajectories), len(w_vector)))
except TypeError:
raise TypeError("trajectories and w_vector must be iterables")
# check no repeated values are passed in the weighting vector
if len(set(w_vector)) != len(w_vector):
raise ValueError("The weighting vector must be a gradient")
# no need to weight in case of a one element vector
if len(w_vector) == 1:
return trajectories
# Cast to float so divisions have a floating point resolution
total_length = float(max(w_vector) - min(w_vector))
# Reflects the expected gradient between subsequent values in w_vector
# the first value isn't weighted so subtract one from the number of
# elements
optimal_gradient = total_length/(len(w_vector)-1)
# for all elements apply the weighting function
for i, idx in enumerate(trajectories.index):
# Skipping the first element is it doesn't need to be weighted
if i != 0:
trajectories.ix[idx] = (trajectories.ix[idx] * optimal_gradient /
(np.abs((w_vector[i] - w_vector[i-1]))))
return trajectories
def _ANOVA_trajectories(category, res_by_group):
r"""Run ANOVA over `res_by_group`
If ANOVA cannot be run in the current category (because either there is
only one group in category or there is a group with only one member)
the result CategoryResults instance has `probability` and `groups` set
to None and message is set to a string explaining why ANOVA was not run
Returns
-------
CategoryResults
An instance of CategoryResults holding the results of the trajectory
analysis applied on `category`
"""
# If there is only one group under category we cannot run ANOVA
if len(res_by_group) == 1:
return CategoryResults(category, None, None,
'Only one value in the group.')
# Check if groups can be tested using ANOVA. ANOVA testing requires
# all elements to have at least size greater to one.
values = [res.trajectory.astype(float) for res in res_by_group]
if any([len(value) == 1 for value in values]):
return CategoryResults(category, None, None,
'This group can not be used. All groups '
'should have more than 1 element.')
# We are ok to run ANOVA
_, p_val = f_oneway(*values)
return CategoryResults(category, p_val, res_by_group, None)
class GroupResults:
"""Store the trajectory results of a group of a metadata category
Attributes
----------
name : str
The name of the group within the metadata category
trajectory : array like
The result trajectory in an 1-D numpy array
mean : float
The mean of the trajectory
info : dict
Any extra information computed by the trajectory algorithm. Depends on
the algorithm
message : str
A message with information of the execution of the algorithm
"""
@experimental(as_of="0.4.0")
def __init__(self, name, trajectory, mean, info, message):
self.name = name
self.trajectory = trajectory
self.mean = mean
self.info = info
self.message = message
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results for a category group to files
in text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectory analysis data to. Must have a
`write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
raw_f : file-like object
File-like object to write trajectories trajectory values. Must have
a `write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
"""
out_f.write('For group "%s", the group means is: %f\n'
% (self.name, self.mean))
raw_f.write('For group "%s":\n' % self.name)
if self.message:
out_f.write('%s\n' % self.message)
raw_f.write('%s\n' % self.message)
out_f.write('The info is: %s\n'
% sorted(((k, v) for k, v in self.info.items())))
raw_f.write('The trajectory is:\n[%s]\n'
% ", ".join(map(str, self.trajectory)))
class CategoryResults:
"""Store the trajectory results of a metadata category
Attributes
----------
category : str
The name of the category
probability : float
The ANOVA probability that the category groups are independent
groups : list of GroupResults
The trajectory results for each group in the category
message : str
A message with information of the execution of the algorithm
"""
@experimental(as_of="0.4.0")
def __init__(self, category, probability, groups, message):
self.category = category
self.probability = probability
self.groups = groups
self.message = message
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results for a category to files in
text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectory analysis data to. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
raw_f : file-like object
File-like object to write trajectory raw values. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
"""
if self.probability is None:
out_f.write('Grouped by "%s": %s\n'
% (self.category, self.message))
else:
out_f.write('Grouped by "%s", probability: %f\n'
% (self.category, self.probability))
raw_f.write('Grouped by "%s"\n' % self.category)
for group in self.groups:
group.to_files(out_f, raw_f)
class GradientANOVAResults:
"""Store the trajectory results
Attributes
----------
algorithm : str
The algorithm used to compute trajectories
weighted : bool
If true, a weighting vector was used
categories : list of CategoryResults
The trajectory results for each metadata category
"""
@experimental(as_of="0.4.0")
def __init__(self, algorithm, weighted, categories):
self.algorithm = algorithm
self.weighted = weighted
self.categories = categories
@experimental(as_of="0.4.0")
def to_files(self, out_f, raw_f):
r"""Save the trajectory analysis results to files in text format.
Parameters
----------
out_f : file-like object
File-like object to write trajectories analysis data to. Must have
a `write` method. It is the caller's responsibility to close
`out_f` when done (if necessary)
raw_f : file-like object
File-like object to write trajectories raw values. Must have a
`write` method. It is the caller's responsibility to close `out_f`
when done (if necessary)
"""
out_f.write('Trajectory algorithm: %s\n' % self.algorithm)
raw_f.write('Trajectory algorithm: %s\n' % self.algorithm)
if self.weighted:
out_f.write('** This output is weighted **\n')
raw_f.write('** This output is weighted **\n')
out_f.write('\n')
raw_f.write('\n')
for cat_results in self.categories:
cat_results.to_files(out_f, raw_f)
out_f.write('\n')
raw_f.write('\n')
class GradientANOVA:
r"""Base class for the Trajectory algorithms
Parameters
----------
coords : pandas.DataFrame
The coordinates for each sample id
prop_expl : array like
The numpy 1-D array with the proportion explained by each axis in
coords
metadata_map : pandas.DataFrame
The metadata map, indexed by sample ids and columns are metadata
categories
trajectory_categories : list of str, optional
A list of metadata categories to use to create the trajectories. If
None is passed, the trajectories for all metadata categories are
computed. Default: None, compute all of them
sort_category : str, optional
The metadata category to use to sort the trajectories. Default: None
axes : int, optional
The number of axes to account while doing the trajectory specific
calculations. Pass 0 to compute all of them. Default: 3
weighted : bool, optional
If true, the output is weighted by the space between samples in the
`sort_category` column
Raises
------
ValueError
If any category of `trajectory_categories` is not present in
`metadata_map`
If `sort_category` is not present in `metadata_map`
If `axes` is not between 0 and the maximum number of axes available
If `weighted` is True and no `sort_category` is provided
If `weighted` is True and the values under `sort_category` are not
numerical
If `coords` and `metadata_map` does not have samples in common
"""
# Should be defined by the derived classes
_alg_name = None
@experimental(as_of="0.4.0")
def __init__(self, coords, prop_expl, metadata_map,
trajectory_categories=None, sort_category=None, axes=3,
weighted=False):
if not trajectory_categories:
# If trajectory_categories is not provided, use all the categories
# present in the metadata map
trajectory_categories = metadata_map.keys()
else:
# Check that trajectory_categories are in metadata_map
for category in trajectory_categories:
if category not in metadata_map:
raise ValueError("Category %s not present in metadata."
% category)
# Check that sort_categories is in metadata_map
if sort_category and sort_category not in metadata_map:
raise ValueError("Sort category %s not present in metadata."
% sort_category)
if axes == 0:
# If axes == 0, we should compute the trajectories for all axes
axes = len(prop_expl)
elif axes > len(prop_expl) or axes < 0:
# Axes should be 0 <= axes <= len(prop_expl)
raise ValueError("axes should be between 0 and the max number of "
"axes available (%d), found: %d "
% (len(prop_expl), axes))
# Restrict coordinates to those axes that we actually need to compute
self._coords = coords.ix[:, :axes-1]
self._prop_expl = prop_expl[:axes]
self._metadata_map = metadata_map
self._weighted = weighted
# Remove any samples from coords not present in mapping file
# and remove any samples from metadata_map not present in coords
self._normalize_samples()
# Create groups
self._make_groups(trajectory_categories, sort_category)
# Compute the weighting_vector
self._weighting_vector = None
if weighted:
if not sort_category:
raise ValueError("You should provide a sort category if you "
"want to weight the trajectories")
try:
self._weighting_vector = \
self._metadata_map[sort_category].astype(np.float64)
except ValueError:
raise ValueError("The sorting category must be numeric")
# Initialize the message buffer
self._message_buffer = []
@experimental(as_of="0.4.0")
def get_trajectories(self):
r"""Compute the trajectories for each group in each category and run
ANOVA over the results to test group independence.
Returns
-------
GradientANOVAResults
An instance of GradientANOVAResults holding the results.
"""
result = GradientANOVAResults(self._alg_name, self._weighted, [])
# Loop through all the categories that we should compute
# the trajectories
for cat, cat_groups in self._groups.items():
# Loop through all the category values present in the current
# category and compute the trajectory for each of them
res_by_group = []
for group in sorted(cat_groups, key=lambda k: str(k)):
res_by_group.append(
self._get_group_trajectories(group, cat_groups[group]))
result.categories.append(_ANOVA_trajectories(cat, res_by_group))
return result
def _normalize_samples(self):
r"""Ensures that `self._coords` and `self._metadata_map` have the same
sample ids
Raises
------
ValueError
If `coords` and `metadata_map` does not have samples in common
"""
# Figure out the sample ids in common
coords_sample_ids = set(self._coords.index)
mm_sample_ids = set(self._metadata_map.index)
sample_ids = coords_sample_ids.intersection(mm_sample_ids)
# Check if they actually have sample ids in common
if not sample_ids:
raise ValueError("Coordinates and metadata map had no samples "
"in common")
# Need to take a subset of coords
if coords_sample_ids != sample_ids:
self._coords = self._coords.ix[sample_ids]
# Need to take a subset of metadata_map
if mm_sample_ids != sample_ids:
self._metadata_map = self._metadata_map.ix[sample_ids]
def _make_groups(self, trajectory_categories, sort_category):
r"""Groups the sample ids in `self._metadata_map` by the values in
`trajectory_categories`
Creates `self._groups`, a dictionary keyed by category and values are
dictionaries in which the keys represent the group name within the
category and values are ordered lists of sample ids
If `sort_category` is not None, the sample ids are sorted based on the
values under this category in the metadata map. Otherwise, they are
sorted using the sample id.
Parameters
----------
trajectory_categories : list of str
A list of metadata categories to use to create the groups.
Default: None, compute all of them
sort_category : str or None
The category from self._metadata_map to use to sort groups
"""
# If sort_category is provided, we used the value of such category to
# sort. Otherwise, we use the sample id.
if sort_category:
def sort_val(sid):
return self._metadata_map[sort_category][sid]
else:
def sort_val(sid):
return sid
self._groups = defaultdict(dict)
for cat in trajectory_categories:
# Group samples by category
gb = self._metadata_map.groupby(cat)
for g, df in gb:
self._groups[cat][g] = realsorted(df.index, key=sort_val)
def _get_group_trajectories(self, group_name, sids):
r"""Compute the trajectory results for `group_name` containing the
samples `sids`.
Weights the data if `self._weighted` is True and ``len(sids) > 1``
Parameters
----------
group_name : str
The name of the group
sids : list of str
The sample ids in the group
Returns
-------
GroupResults
The trajectory results for the given group
Raises
------
RuntimeError
If sids is an empty list
"""
# We multiply the coord values with the prop_expl
trajectories = self._coords.ix[sids] * self._prop_expl
if trajectories.empty:
# Raising a RuntimeError since in a usual execution this should
# never happen. The only way this can happen is if the user
# directly calls this method, which shouldn't be done
# (that's why the method is private)
raise RuntimeError("No samples to process, an empty list cannot "
"be processed")
# The weighting can only be done over trajectories with a length
# greater than 1
if self._weighted and len(sids) > 1:
trajectories_copy = deepcopy(trajectories)
try:
trajectories = _weight_by_vector(trajectories_copy,
self._weighting_vector[sids])
except (FloatingPointError, ValueError):
self._message_buffer.append("Could not weight group, no "
"gradient in the the "
"weighting vector.\n")
trajectories = trajectories_copy
return self._compute_trajectories_results(group_name,
trajectories.ix[sids])
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectories computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Raises
------
NotImplementedError
This is the base class
"""
raise NotImplementedError("No algorithm is implemented on the base "
"class.")
class AverageGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the RMS average algorithm
For each group in a category, it computes the average point among the
samples in such group and then computes the norm of each sample from the
averaged one.
See Also
--------
GradientANOVA
"""
_alg_name = 'avg'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the average
trajectories method
"""
center = np.average(trajectories, axis=0)
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(center)])
calc = {'avg': trajectory[0]}
else:
trajectory = np.array([np.linalg.norm(row[1].get_values() - center)
for row in trajectories.iterrows()])
calc = {'avg': np.average(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class TrajectoryGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the RMS trajectory algorithm
For each group in a category, each component of the result trajectory is
computed as taking the sorted list of samples in the group and taking the
norm of the coordinates of the 2nd sample minus 1st sample, 3rd sample
minus 2nd sample and so on.
See Also
--------
GradientANOVA
"""
_alg_name = 'trajectory'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the trajectory
method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'2-norm': trajectory[0]}
else:
# Loop through all the rows in trajectories and create '2-norm'
# by taking the norm of the 2nd row - 1st row, 3rd row - 2nd row...
trajectory = \
np.array([np.linalg.norm(trajectories.ix[i+1].get_values() -
trajectories.ix[i].get_values())
for i in range(len(trajectories) - 1)])
calc = {'2-norm': np.linalg.norm(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class FirstDifferenceGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the first difference algorithm
It calculates the norm for all the time-points and then calculates the
first difference for each resulting point
See Also
--------
GradientANOVA
"""
_alg_name = 'diff'
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the first difference
method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'mean': trajectory[0], 'std': 0}
elif len(trajectories) == 2:
trajectory = np.array([np.linalg.norm(trajectories[1] -
trajectories[0])])
calc = {'mean': trajectory[0], 'std': 0}
else:
vec_norm = \
np.array([np.linalg.norm(trajectories.ix[i+1].get_values() -
trajectories.ix[i].get_values())
for i in range(len(trajectories) - 1)])
trajectory = np.diff(vec_norm)
calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
class WindowDifferenceGradientANOVA(GradientANOVA):
r"""Perform trajectory analysis using the modified first difference
algorithm
It calculates the norm for all the time-points and subtracts the mean of
the next number of elements specified in `window_size` and the current
element.
Parameters
----------
coords : pandas.DataFrame
The coordinates for each sample id
prop_expl : array like
The numpy 1-D array with the proportion explained by each axis in
coords
metadata_map : pandas.DataFrame
The metadata map, indexed by sample ids and columns are metadata
categories
window_size : int or long
The window size to use while computing the differences
Raises
------
ValueError
If the window_size is not a positive integer
See Also
--------
GradientANOVA
"""
_alg_name = 'wdiff'
@experimental(as_of="0.4.0")
def __init__(self, coords, prop_expl, metadata_map, window_size, **kwargs):
super(WindowDifferenceGradientANOVA, self).__init__(coords, prop_expl,
metadata_map,
**kwargs)
if not isinstance(window_size, Integral) or window_size < 1:
raise ValueError("The window_size must be a positive integer")
self._window_size = window_size
def _compute_trajectories_results(self, group_name, trajectories):
r"""Do the actual trajectory computation over trajectories
If the first difference cannot be calculated of the provided window
size, no difference is applied and a message is added to the results.
Parameters
----------
group_name : str
The name of the group
trajectories : pandas.DataFrame
The sorted trajectories for each sample in the group
Returns
-------
GroupResults
The trajectory results for `group_name` using the windowed
difference method
"""
if len(trajectories) == 1:
trajectory = np.array([np.linalg.norm(trajectories)])
calc = {'mean': trajectory, 'std': 0}
elif len(trajectories) == 2:
trajectory = np.array([np.linalg.norm(trajectories[1] -
trajectories[0])])
calc = {'mean': trajectory, 'std': 0}
else:
vec_norm = \
np.array([np.linalg.norm(trajectories.ix[i+1].get_values() -
trajectories.ix[i].get_values())
for i in range(len(trajectories) - 1)])
# windowed first differences won't be able on every group,
# specially given the variation of size that a trajectory tends
# to have
if len(vec_norm) <= self._window_size:
trajectory = vec_norm
self._message_buffer.append("Cannot calculate the first "
"difference with a window of size "
"(%d)." % self._window_size)
else:
# Replicate the last element as many times as required
for idx in range(0, self._window_size):
vec_norm = np.append(vec_norm, vec_norm[-1:], axis=0)
trajectory = []
for idx in range(0, len(vec_norm) - self._window_size):
# Meas has to be over axis 0 so it handles arrays of arrays
element = np.mean(vec_norm[(idx + 1):
(idx + 1 + self._window_size)],
axis=0)
trajectory.append(element - vec_norm[idx])
trajectory = np.array(trajectory)
calc = {'mean': np.mean(trajectory), 'std': np.std(trajectory)}
msg = ''.join(self._message_buffer) if self._message_buffer else None
# Reset the message buffer
self._message_buffer = []
return GroupResults(group_name, trajectory, np.mean(trajectory),
calc, msg)
|
anderspitman/scikit-bio
|
skbio/stats/gradient.py
|
Python
|
bsd-3-clause
| 32,196
|
[
"scikit-bio"
] |
85859aeb986956c54af387bad56f11b96ef006f6a875a969863ed9a753269104
|
##
# Copyright 2009-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Ferret, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: George Fanourgakis (The Cyprus Institute)
"""
import os,re,fileinput,sys
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_Ferret(ConfigureMake):
"""Support for building/installing Ferret."""
def configure_step(self):
"""Configure Ferret build."""
buildtype = "x86_64-linux"
try:
os.chdir('FERRET')
except OSError, err:
raise EasyBuildError("Failed to change to FERRET dir: %s", err)
deps = ['HDF5', 'netCDF', 'Java']
for name in deps:
if not get_software_root(name):
raise EasyBuildError("%s module not loaded?", name)
fn = "site_specific.mk"
for line in fileinput.input(fn, inplace=1, backup='.orig'):
line = re.sub(r"^BUILDTYPE\s*=.*", "BUILDTYPE = %s" % buildtype, line)
line = re.sub(r"^INSTALL_FER_DIR =.*", "INSTALL_FER_DIR = %s" % self.installdir, line)
for name in deps:
line = re.sub(r"^(%s.*DIR\s*)=.*" % name.upper(), r"\1 = %s" % get_software_root(name), line)
sys.stdout.write(line)
comp_vars = {
'CC':'CC',
'CFLAGS':'CFLAGS',
'CPPFLAGS':'CPPFLAGS',
'FC':'F77',
}
fn = 'xgks/CUSTOMIZE.%s' % buildtype
for line in fileinput.input(fn, inplace=1, backup='.orig'):
for x,y in comp_vars.items():
line = re.sub(r"^(%s\s*)=.*" % x, r"\1=%s" % os.getenv(y), line)
line = re.sub(r"^(FFLAGS\s*=').*-m64 (.*)", r"\1%s \2" % os.getenv('FFLAGS'), line)
line = re.sub(r"^(LD_X11\s*)=.*", r"\1='-L/usr/lib64/X11 -lX11'", line)
sys.stdout.write(line)
comp_vars = {
'CC':'CC',
'CXX':'CXX',
'F77':'F77',
'FC':'F77',
}
fns = [
'fer/platform_specific_flags.mk.%s' % buildtype,
'ppl/platform_specific_flags.mk.%s' % buildtype,
'external_functions/ef_utility/platform_specific_flags.mk.%s' % buildtype,
]
for fn in fns:
for line in fileinput.input(fn, inplace=1, backup='.orig'):
for x,y in comp_vars.items():
line = re.sub(r"^(\s*%s\s*)=.*" % x, r"\1 = %s" % os.getenv(y), line)
if self.toolchain.comp_family() == toolchain.INTELCOMP:
line = re.sub(r"^(\s*LD\s*)=.*", r"\1 = %s -nofor-main" % os.getenv("F77"), line)
for x in ["CFLAGS", "FFLAGS"]:
line = re.sub(r"^(\s*%s\s*=\s*\$\(CPP_FLAGS\)).*\\" % x, r"\1 %s \\" % os.getenv(x), line)
sys.stdout.write(line)
def sanity_check_step(self):
"""Custom sanity check for Ferret."""
custom_paths = {
'files': ["bin/ferret_v%s" % self.version],
'dirs': [],
}
super(EB_Ferret, self).sanity_check_step(custom_paths=custom_paths)
|
ocaisa/easybuild-easyblocks
|
easybuild/easyblocks/f/ferret.py
|
Python
|
gpl-2.0
| 4,569
|
[
"NetCDF"
] |
ca41e5252943aa7f884307ce1f492449e23f745839c165131985ef897ad970f7
|
import os
import re
import json
import copy
import cgi
import random
import string
import pytest
import responses
from urlobject import URLObject
from nylas import APIClient
# pylint: disable=redefined-outer-name,too-many-lines
#### HANDLING PAGINATION ####
# Currently, the Nylas API handles pagination poorly: API responses do not expose
# any information about pagination, so the client does not know whether there is
# another page of data or not. For example, if the client sends an API request
# without a limit specified, and the response contains 100 items, how can it tell
# if there are 100 items in total, or if there more items to fetch on the next page?
# It can't! The only way to know is to ask for the next page (by repeating the API
# request with `offset=100`), and see if you get more items or not.
# If it does not receive more items, it can assume that it has retrieved all the data.
#
# This file contains mocks for several API endpoints, including "list" endpoints
# like `/messages` and `/events`. The mocks for these list endpoints must be smart
# enough to check for an `offset` query param, and return an empty list if the
# client requests more data than the first page. If the mock does not
# check for this `offset` query param, and returns the same mock data over and over,
# any SDK method that tries to fetch *all* of a certain type of data
# (like `client.messages.all()`) will never complete.
def generate_id(size=25, chars=string.ascii_letters + string.digits):
return "".join(random.choice(chars) for _ in range(size))
@pytest.fixture
def message_body():
return {
"busy": True,
"calendar_id": "94rssh7bd3rmsxsp19kiocxze",
"description": None,
"id": "cv4ei7syx10uvsxbs21ccsezf",
"location": "1 Infinite loop, Cupertino",
"message_id": None,
"namespace_id": "384uhp3aj8l7rpmv9s2y2rukn",
"object": "event",
"owner": None,
"participants": [],
"read_only": False,
"status": "confirmed",
"title": "The rain song",
"when": {
"end_time": 1441056790,
"object": "timespan",
"start_time": 1441053190,
},
}
@pytest.fixture
def access_token():
return "l3m0n_w4ter"
@pytest.fixture
def account_id():
return "4ennivvrcgsqytgybfk912dto"
@pytest.fixture
def api_url():
return "https://localhost:2222"
@pytest.fixture
def client_id():
return "fake-client-id"
@pytest.fixture
def client_secret():
return "nyl4n4ut"
@pytest.fixture
def api_client(api_url):
return APIClient(
client_id=None, client_secret=None, access_token=None, api_server=api_url
)
@pytest.fixture
def api_client_with_client_id(access_token, api_url, client_id, client_secret):
return APIClient(
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
api_server=api_url,
)
@pytest.fixture
def mocked_responses():
rmock = responses.RequestsMock(assert_all_requests_are_fired=False)
with rmock:
yield rmock
@pytest.fixture
def mock_save_draft(mocked_responses, api_url):
save_endpoint = re.compile(api_url + "/drafts")
response_body = json.dumps(
{"id": "4dl0ni6vxomazo73r5oydo16k", "version": "4dw0ni6txomazo33r5ozdo16j"}
)
mocked_responses.add(
responses.POST,
save_endpoint,
content_type="application/json",
status=200,
body=response_body,
match_querystring=True,
)
@pytest.fixture
def mock_account(mocked_responses, api_url, account_id):
response_body = json.dumps(
{
"account_id": account_id,
"email_address": "ben.bitdiddle1861@gmail.com",
"id": account_id,
"name": "Ben Bitdiddle",
"object": "account",
"provider": "gmail",
"organization_unit": "label",
"billing_state": "paid",
"linked_at": 1500920299,
"sync_state": "running",
}
)
mocked_responses.add(
responses.GET,
re.compile(api_url + "/account(?!s)/?"),
content_type="application/json",
status=200,
body=response_body,
)
@pytest.fixture
def mock_accounts(mocked_responses, api_url, account_id, client_id):
accounts = [
{
"account_id": account_id,
"email_address": "ben.bitdiddle1861@gmail.com",
"id": account_id,
"name": "Ben Bitdiddle",
"object": "account",
"provider": "gmail",
"organization_unit": "label",
"billing_state": "paid",
"linked_at": 1500920299,
"sync_state": "running",
}
]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
if offset:
return (200, {}, json.dumps([]))
return (200, {}, json.dumps(accounts))
def update_callback(request):
response = accounts[0]
payload = json.loads(request.body)
if payload["metadata"]:
response["metadata"] = payload["metadata"]
return 200, {}, json.dumps(response)
def delete_callback(request):
response = {"success": True}
return 200, {}, json.dumps(response)
url_re = "{base}(/a/{client_id})?/accounts/?".format(
base=api_url, client_id=client_id
)
mocked_responses.add_callback(
responses.GET,
re.compile(url_re),
content_type="application/json",
callback=list_callback,
)
mocked_responses.add_callback(
responses.PUT,
re.compile(url_re),
content_type="application/json",
callback=update_callback,
)
mocked_responses.add_callback(
responses.DELETE,
re.compile(url_re),
content_type="application/json",
callback=delete_callback,
)
@pytest.fixture
def mock_folder_account(mocked_responses, api_url, account_id):
response_body = json.dumps(
{
"email_address": "ben.bitdiddle1861@office365.com",
"id": account_id,
"name": "Ben Bitdiddle",
"account_id": account_id,
"object": "account",
"provider": "eas",
"organization_unit": "folder",
}
)
mocked_responses.add(
responses.GET,
api_url + "/account",
content_type="application/json",
status=200,
body=response_body,
match_querystring=True,
)
@pytest.fixture
def mock_labels(mocked_responses, api_url, account_id):
labels = [
{
"display_name": "Important",
"id": "anuep8pe5ugmxrucchrzba2o8",
"name": "important",
"account_id": account_id,
"object": "label",
},
{
"display_name": "Trash",
"id": "f1xgowbgcehk235xiy3c3ek42",
"name": "trash",
"account_id": account_id,
"object": "label",
},
{
"display_name": "Sent Mail",
"id": "ah14wp5fvypvjjnplh7nxgb4h",
"name": "sent",
"account_id": account_id,
"object": "label",
},
{
"display_name": "All Mail",
"id": "ah14wp5fvypvjjnplh7nxgb4h",
"name": "all",
"account_id": account_id,
"object": "label",
},
{
"display_name": "Inbox",
"id": "dc11kl3s9lj4760g6zb36spms",
"name": "inbox",
"account_id": account_id,
"object": "label",
},
]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
if offset:
return (200, {}, json.dumps([]))
return (200, {}, json.dumps(labels))
endpoint = re.compile(api_url + "/labels.*")
mocked_responses.add_callback(
responses.GET,
endpoint,
content_type="application/json",
callback=list_callback,
)
@pytest.fixture
def mock_label(mocked_responses, api_url, account_id):
response_body = json.dumps(
{
"display_name": "Important",
"id": "anuep8pe5ugmxrucchrzba2o8",
"name": "important",
"account_id": account_id,
"object": "label",
}
)
url = api_url + "/labels/anuep8pe5ugmxrucchrzba2o8"
mocked_responses.add(
responses.GET,
url,
content_type="application/json",
status=200,
body=response_body,
)
@pytest.fixture
def mock_folder(mocked_responses, api_url, account_id):
folder = {
"display_name": "My Folder",
"id": "anuep8pe5ug3xrupchwzba2o8",
"name": None,
"account_id": account_id,
"object": "folder",
}
response_body = json.dumps(folder)
url = api_url + "/folders/anuep8pe5ug3xrupchwzba2o8"
mocked_responses.add(
responses.GET,
url,
content_type="application/json",
status=200,
body=response_body,
)
def request_callback(request):
payload = json.loads(request.body)
if "display_name" in payload:
folder.update(payload)
return (200, {}, json.dumps(folder))
def delete_callback(request):
payload = {"successful": True}
return 200, {}, json.dumps(payload)
mocked_responses.add_callback(
responses.PUT, url, content_type="application/json", callback=request_callback
)
mocked_responses.add_callback(
responses.DELETE, url, content_type="application/json", callback=delete_callback
)
@pytest.fixture
def mock_messages(mocked_responses, api_url, account_id):
messages = [
{
"id": "1234",
"to": [{"email": "foo@yahoo.com", "name": "Foo"}],
"from": [{"email": "bar@gmail.com", "name": "Bar"}],
"subject": "Test Message",
"account_id": account_id,
"object": "message",
"labels": [{"name": "inbox", "display_name": "Inbox", "id": "abcd"}],
"starred": False,
"unread": True,
"date": 1265077342,
},
{
"id": "1238",
"to": [{"email": "foo2@yahoo.com", "name": "Foo Two"}],
"from": [{"email": "bar2@gmail.com", "name": "Bar Two"}],
"subject": "Test Message 2",
"account_id": account_id,
"object": "message",
"labels": [{"name": "inbox", "display_name": "Inbox", "id": "abcd"}],
"starred": False,
"unread": True,
"date": 1265085342,
},
{
"id": "12",
"to": [{"email": "foo3@yahoo.com", "name": "Foo Three"}],
"from": [{"email": "bar3@gmail.com", "name": "Bar Three"}],
"subject": "Test Message 3",
"account_id": account_id,
"object": "message",
"labels": [{"name": "archive", "display_name": "Archive", "id": "gone"}],
"starred": False,
"unread": False,
"date": 1265093842,
},
]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
if offset:
return (200, {}, json.dumps([]))
return (200, {}, json.dumps(messages))
endpoint = re.compile(api_url + "/messages")
mocked_responses.add_callback(
responses.GET, endpoint, content_type="application/json", callback=list_callback
)
@pytest.fixture
def mock_message(mocked_responses, api_url, account_id):
base_msg = {
"id": "1234",
"to": [{"email": "foo@yahoo.com", "name": "Foo"}],
"from": [{"email": "bar@gmail.com", "name": "Bar"}],
"subject": "Test Message",
"account_id": account_id,
"object": "message",
"labels": [{"name": "inbox", "display_name": "Inbox", "id": "abcd"}],
"starred": False,
"unread": True,
}
response_body = json.dumps(base_msg)
def request_callback(request):
payload = json.loads(request.body)
if "labels" in payload:
labels = [
{"name": "test", "display_name": "test", "id": l}
for l in payload["labels"]
]
base_msg["labels"] = labels
if "metadata" in payload:
base_msg["metadata"] = payload["metadata"]
return (200, {}, json.dumps(base_msg))
endpoint = re.compile(api_url + "/messages/1234")
mocked_responses.add(
responses.GET,
endpoint,
content_type="application/json",
status=200,
body=response_body,
)
mocked_responses.add_callback(
responses.PUT,
endpoint,
content_type="application/json",
callback=request_callback,
)
mocked_responses.add(
responses.DELETE, endpoint, content_type="application/json", status=200, body=""
)
@pytest.fixture
def mock_threads(mocked_responses, api_url, account_id):
threads = [
{
"id": "5678",
"subject": "Test Thread",
"account_id": account_id,
"object": "thread",
"folders": [{"name": "inbox", "display_name": "Inbox", "id": "abcd"}],
"starred": True,
"unread": False,
"first_message_timestamp": 1451703845,
"last_message_timestamp": 1483326245,
"last_message_received_timestamp": 1483326245,
"last_message_sent_timestamp": 1483232461,
}
]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
if offset:
return (200, {}, json.dumps([]))
return (200, {}, json.dumps(threads))
endpoint = re.compile(api_url + "/threads")
mocked_responses.add_callback(
responses.GET,
endpoint,
content_type="application/json",
callback=list_callback,
)
@pytest.fixture
def mock_thread(mocked_responses, api_url, account_id):
base_thrd = {
"id": "5678",
"subject": "Test Thread",
"account_id": account_id,
"object": "thread",
"folders": [{"name": "inbox", "display_name": "Inbox", "id": "abcd"}],
"starred": True,
"unread": False,
"first_message_timestamp": 1451703845,
"last_message_timestamp": 1483326245,
"last_message_received_timestamp": 1483326245,
"last_message_sent_timestamp": 1483232461,
}
response_body = json.dumps(base_thrd)
def request_callback(request):
payload = json.loads(request.body)
if "folder" in payload:
folder = {"name": "test", "display_name": "test", "id": payload["folder"]}
base_thrd["folders"] = [folder]
return (200, {}, json.dumps(base_thrd))
endpoint = re.compile(api_url + "/threads/5678")
mocked_responses.add(
responses.GET,
endpoint,
content_type="application/json",
status=200,
body=response_body,
)
mocked_responses.add_callback(
responses.PUT,
endpoint,
content_type="application/json",
callback=request_callback,
)
@pytest.fixture
def mock_labelled_thread(mocked_responses, api_url, account_id):
base_thread = {
"id": "111",
"subject": "Labelled Thread",
"account_id": account_id,
"object": "thread",
"folders": [{"name": "inbox", "display_name": "Inbox", "id": "abcd"}],
"starred": True,
"unread": False,
"labels": [
{
"display_name": "Important",
"id": "anuep8pe5ugmxrucchrzba2o8",
"name": "important",
"account_id": account_id,
"object": "label",
},
{
"display_name": "Existing",
"id": "dfslhgy3rlijfhlsujnchefs3",
"name": "existing",
"account_id": account_id,
"object": "label",
},
],
"first_message_timestamp": 1451703845,
"last_message_timestamp": 1483326245,
"last_message_received_timestamp": 1483326245,
"last_message_sent_timestamp": 1483232461,
}
response_body = json.dumps(base_thread)
def request_callback(request):
payload = json.loads(request.body)
if "labels" in payload:
existing_labels = {label["id"]: label for label in base_thread["labels"]}
new_labels = []
for label_id in payload["labels"]:
if label_id in existing_labels:
new_labels.append(existing_labels[label_id])
else:
new_labels.append(
{
"name": "updated",
"display_name": "Updated",
"id": label_id,
"account_id": account_id,
"object": "label",
}
)
copied = copy.copy(base_thread)
copied["labels"] = new_labels
return (200, {}, json.dumps(copied))
endpoint = re.compile(api_url + "/threads/111")
mocked_responses.add(
responses.GET,
endpoint,
content_type="application/json",
status=200,
body=response_body,
)
mocked_responses.add_callback(
responses.PUT,
endpoint,
content_type="application/json",
callback=request_callback,
)
@pytest.fixture
def mock_drafts(mocked_responses, api_url):
drafts = [
{
"bcc": [],
"body": "Cheers mate!",
"cc": [],
"date": 1438684486,
"events": [],
"files": [],
"folder": None,
"from": [],
"id": "2h111aefv8pzwzfykrn7hercj",
"namespace_id": "384uhp3aj8l7rpmv9s2y2rukn",
"object": "draft",
"reply_to": [],
"reply_to_message_id": None,
"snippet": "",
"starred": False,
"subject": "Here's an attachment",
"thread_id": "clm33kapdxkposgltof845v9s",
"to": [{"email": "helena@nylas.com", "name": "Helena Handbasket"}],
"unread": False,
"version": 0,
}
]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
if offset:
return (200, {}, json.dumps([]))
return (200, {}, json.dumps(drafts))
mocked_responses.add_callback(
responses.GET,
api_url + "/drafts",
content_type="application/json",
callback=list_callback,
)
@pytest.fixture
def mock_draft_saved_response(mocked_responses, api_url):
draft_json = {
"bcc": [],
"body": "Cheers mate!",
"cc": [],
"date": 1438684486,
"events": [],
"files": [],
"folder": None,
"from": [],
"id": "2h111aefv8pzwzfykrn7hercj",
"namespace_id": "384uhp3aj8l7rpmv9s2y2rukn",
"object": "draft",
"reply_to": [],
"reply_to_message_id": None,
"snippet": "",
"starred": False,
"subject": "Here's an attachment",
"thread_id": "clm33kapdxkposgltof845v9s",
"to": [{"email": "helena@nylas.com", "name": "Helena Handbasket"}],
"unread": False,
"version": 0,
}
def create_callback(_request):
return (200, {}, json.dumps(draft_json))
def update_callback(request):
try:
payload = json.loads(request.body)
except ValueError:
return (200, {}, json.dumps(draft_json))
stripped_payload = {key: value for key, value in payload.items() if value}
updated_draft_json = copy.copy(draft_json)
updated_draft_json.update(stripped_payload)
updated_draft_json["version"] += 1
return (200, {}, json.dumps(updated_draft_json))
mocked_responses.add_callback(
responses.POST,
api_url + "/drafts",
content_type="application/json",
callback=create_callback,
)
mocked_responses.add_callback(
responses.PUT,
api_url + "/drafts/2h111aefv8pzwzfykrn7hercj",
content_type="application/json",
callback=update_callback,
)
@pytest.fixture
def mock_draft_deleted_response(mocked_responses, api_url):
mocked_responses.add(
responses.DELETE,
api_url + "/drafts/2h111aefv8pzwzfykrn7hercj",
content_type="application/json",
status=200,
body="",
)
@pytest.fixture
def mock_draft_sent_response(mocked_responses, api_url):
body = {
"bcc": [],
"body": "",
"cc": [],
"date": 1438684486,
"events": [],
"files": [],
"folder": None,
"from": [{"email": "benb@nylas.com"}],
"id": "2h111aefv8pzwzfykrn7hercj",
"namespace_id": "384uhp3aj8l7rpmv9s2y2rukn",
"object": "draft",
"reply_to": [],
"reply_to_message_id": None,
"snippet": "",
"starred": False,
"subject": "Stay polish, stay hungary",
"thread_id": "clm33kapdxkposgltof845v9s",
"to": [{"email": "helena@nylas.com", "name": "Helena Handbasket"}],
"unread": False,
"version": 0,
}
values = [(400, {}, "Couldn't send email"), (200, {}, json.dumps(body))]
def callback(request):
payload = json.loads(request.body)
assert payload["draft_id"] == "2h111aefv8pzwzfykrn7hercj"
return values.pop()
mocked_responses.add_callback(
responses.POST,
api_url + "/send",
callback=callback,
content_type="application/json",
)
@pytest.fixture
def mock_draft_send_unsaved_response(mocked_responses, api_url):
def callback(request):
payload = json.loads(request.body)
payload["draft_id"] = "2h111aefv8pzwzfykrn7hercj"
return 200, {}, json.dumps(payload)
mocked_responses.add_callback(
responses.POST,
api_url + "/send",
callback=callback,
content_type="application/json",
)
@pytest.fixture
def mock_files(mocked_responses, api_url, account_id):
files_content = {"3qfe4k3siosfjtjpfdnon8zbn": b"Hello, World!"}
files_metadata = {
"3qfe4k3siosfjtjpfdnon8zbn": {
"id": "3qfe4k3siosfjtjpfdnon8zbn",
"content_type": "text/plain",
"filename": "hello.txt",
"account_id": account_id,
"object": "file",
"size": len(files_content["3qfe4k3siosfjtjpfdnon8zbn"]),
}
}
mocked_responses.add(
responses.GET,
api_url + "/files",
body=json.dumps(list(files_metadata.values())),
)
for file_id in files_content:
mocked_responses.add(
responses.POST,
"{base}/files/{file_id}".format(base=api_url, file_id=file_id),
body=json.dumps(files_metadata[file_id]),
)
mocked_responses.add(
responses.GET,
"{base}/files/{file_id}/download".format(base=api_url, file_id=file_id),
body=files_content[file_id],
)
def create_callback(request):
uploaded_lines = request.body.decode("utf8").splitlines()
content_disposition = uploaded_lines[1]
_, params = cgi.parse_header(content_disposition)
filename = params.get("filename", None)
content = "".join(uploaded_lines[3:-1])
size = len(content.encode("utf8"))
body = [
{
"id": generate_id(),
"content_type": "text/plain",
"filename": filename,
"account_id": account_id,
"object": "file",
"size": size,
}
]
return (200, {}, json.dumps(body))
mocked_responses.add_callback(
responses.POST, api_url + "/files", callback=create_callback
)
@pytest.fixture
def mock_event_create_response(mocked_responses, api_url, message_body):
def callback(_request):
try:
payload = json.loads(_request.body)
except ValueError:
return 400, {}, ""
payload["id"] = "cv4ei7syx10uvsxbs21ccsezf"
return 200, {}, json.dumps(payload)
mocked_responses.add_callback(
responses.POST, api_url + "/events", callback=callback
)
put_body = {"title": "loaded from JSON", "ignored": "ignored"}
mocked_responses.add(
responses.PUT,
api_url + "/events/cv4ei7syx10uvsxbs21ccsezf",
body=json.dumps(put_body),
)
@pytest.fixture
def mock_event_generate_ics(mocked_responses, api_url, message_body):
mocked_responses.add(
responses.POST, api_url + "/events/to-ics", body=json.dumps({"ics": ""})
)
@pytest.fixture
def mock_scheduler_create_response(mocked_responses, api_url, message_body):
def callback(_request):
try:
payload = json.loads(_request.body)
except ValueError:
return 400, {}, ""
payload["id"] = "cv4ei7syx10uvsxbs21ccsezf"
return 200, {}, json.dumps(payload)
mocked_responses.add_callback(
responses.POST, "https://api.schedule.nylas.com/manage/pages", callback=callback
)
mocked_responses.add(
responses.PUT,
"https://api.schedule.nylas.com/manage/pages/cv4ei7syx10uvsxbs21ccsezf",
body=json.dumps(message_body),
)
@pytest.fixture
def mock_event_create_response_with_limits(mocked_responses, api_url, message_body):
def callback(request):
url = URLObject(request.url)
limit = int(url.query_dict.get("limit") or 50)
body = [message_body for _ in range(0, limit)]
return 200, {}, json.dumps(body)
mocked_responses.add_callback(responses.GET, api_url + "/events", callback=callback)
@pytest.fixture
def mock_event_create_notify_response(mocked_responses, api_url, message_body):
mocked_responses.add(
responses.POST,
api_url + "/events?notify_participants=true&other_param=1",
body=json.dumps(message_body),
)
@pytest.fixture
def mock_send_rsvp(mocked_responses, api_url, message_body):
mocked_responses.add(
responses.POST,
re.compile(api_url + "/send-rsvp"),
body=json.dumps(message_body),
)
@pytest.fixture
def mock_components_create_response(mocked_responses, api_url, message_body):
def callback(_request):
try:
payload = json.loads(_request.body)
except ValueError:
return 400, {}, ""
payload["id"] = "cv4ei7syx10uvsxbs21ccsezf"
return 200, {}, json.dumps(payload)
mocked_responses.add_callback(
responses.POST, re.compile(api_url + "/component/*"), callback=callback
)
mocked_responses.add(
responses.PUT,
re.compile(api_url + "/component/*"),
body=json.dumps(message_body),
)
@pytest.fixture
def mock_thread_search_response(mocked_responses, api_url):
snippet = (
"Hey Helena, Looking forward to getting together for dinner on Friday. "
"What can I bring? I have a couple bottles of wine or could put together"
)
response_body = json.dumps(
[
{
"id": "evh5uy0shhpm5d0le89goor17",
"object": "thread",
"account_id": "awa6ltos76vz5hvphkp8k17nt",
"subject": "Dinner Party on Friday",
"unread": False,
"starred": False,
"last_message_timestamp": 1398229259,
"last_message_received_timestamp": 1398229259,
"first_message_timestamp": 1298229259,
"participants": [
{"name": "Ben Bitdiddle", "email": "ben.bitdiddle@gmail.com"}
],
"snippet": snippet,
"folders": [
{
"name": "inbox",
"display_name": "INBOX",
"id": "f0idlvozkrpj3ihxze7obpivh",
}
],
"message_ids": [
"251r594smznew6yhiocht2v29",
"7upzl8ss738iz8xf48lm84q3e",
"ah5wuphj3t83j260jqucm9a28",
],
"draft_ids": ["251r594smznew6yhi12312saq"],
"version": 2,
}
]
)
mocked_responses.add(
responses.GET,
api_url + "/threads/search?q=Helena",
body=response_body,
status=200,
content_type="application/json",
match_querystring=True,
)
@pytest.fixture
def mock_message_search_response(mocked_responses, api_url):
snippet = (
"Sounds good--that bottle of Pinot should go well with the meal. "
"I'll also bring a surprise for dessert. :) "
"Do you have ice cream? Looking fo"
)
response_body = json.dumps(
[
{
"id": "84umizq7c4jtrew491brpa6iu",
"object": "message",
"account_id": "14e5bn96uizyuhidhcw5rfrb0",
"thread_id": "5vryyrki4fqt7am31uso27t3f",
"subject": "Re: Dinner on Friday?",
"from": [{"name": "Ben Bitdiddle", "email": "ben.bitdiddle@gmail.com"}],
"to": [{"name": "Bill Rogers", "email": "wbrogers@mit.edu"}],
"cc": [],
"bcc": [],
"reply_to": [],
"date": 1370084645,
"unread": True,
"starred": False,
"folder": {
"name": "inbox",
"display_name": "INBOX",
"id": "f0idlvozkrpj3ihxze7obpivh",
},
"snippet": snippet,
"body": "<html><body>....</body></html>",
"files": [],
"events": [],
},
{
"id": "84umizq7asdf3aw491brpa6iu",
"object": "message",
"account_id": "14e5bakdsfljskidhcw5rfrb0",
"thread_id": "5vryyralskdjfwlj1uso27t3f",
"subject": "Re: Dinner on Friday?",
"from": [{"name": "Ben Bitdiddle", "email": "ben.bitdiddle@gmail.com"}],
"to": [{"name": "Bill Rogers", "email": "wbrogers@mit.edu"}],
"cc": [],
"bcc": [],
"reply_to": [],
"date": 1370084645,
"unread": True,
"starred": False,
"folder": {
"name": "inbox",
"display_name": "INBOX",
"id": "f0idlvozkrpj3ihxze7obpivh",
},
"snippet": snippet,
"body": "<html><body>....</body></html>",
"files": [],
"events": [],
},
]
)
mocked_responses.add(
responses.GET,
api_url + "/messages/search?q=Pinot",
body=response_body,
status=200,
content_type="application/json",
match_querystring=True,
)
@pytest.fixture
def mock_calendars(mocked_responses, api_url):
calendars = [
{
"id": "8765",
"events": [
{
"title": "Pool party",
"location": "Local Community Pool",
"participants": ["Alice", "Bob", "Claire", "Dot"],
}
],
}
]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
if offset:
return (200, {}, json.dumps([]))
return (200, {}, json.dumps(calendars))
endpoint = re.compile(api_url + "/calendars")
mocked_responses.add_callback(
responses.GET,
endpoint,
content_type="application/json",
callback=list_callback,
)
@pytest.fixture
def mock_contacts(mocked_responses, account_id, api_url):
contact1 = {
"id": "5x6b54whvcz1j22ggiyorhk9v",
"object": "contact",
"account_id": account_id,
"given_name": "Charlie",
"middle_name": None,
"surname": "Bucket",
"birthday": "1964-10-05",
"suffix": None,
"nickname": None,
"company_name": None,
"job_title": "Student",
"manager_name": None,
"office_location": None,
"notes": None,
"picture_url": "{base}/contacts/{id}/picture".format(
base=api_url, id="5x6b54whvcz1j22ggiyorhk9v"
),
"emails": [{"email": "charlie@gmail.com", "type": None}],
"im_addresses": [],
"physical_addresses": [],
"phone_numbers": [],
"web_pages": [],
}
contact2 = {
"id": "4zqkfw8k1d12h0k784ipeh498",
"object": "contact",
"account_id": account_id,
"given_name": "William",
"middle_name": "J",
"surname": "Wonka",
"birthday": "1955-02-28",
"suffix": None,
"nickname": None,
"company_name": None,
"job_title": "Chocolate Artist",
"manager_name": None,
"office_location": "Willy Wonka Factory",
"notes": None,
"picture_url": None,
"emails": [{"email": "scrumptious@wonka.com", "type": None}],
"im_addresses": [],
"physical_addresses": [],
"phone_numbers": [],
"web_pages": [{"type": "work", "url": "http://www.wonka.com"}],
}
contact3 = {
"id": "9fn1aoi2i00qv6h1zpag6b26w",
"object": "contact",
"account_id": account_id,
"given_name": "Oompa",
"middle_name": None,
"surname": "Loompa",
"birthday": None,
"suffix": None,
"nickname": None,
"company_name": None,
"job_title": None,
"manager_name": None,
"office_location": "Willy Wonka Factory",
"notes": None,
"picture_url": None,
"emails": [],
"im_addresses": [],
"physical_addresses": [],
"phone_numbers": [],
"web_pages": [],
}
contacts = [contact1, contact2, contact3]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
if offset:
return (200, {}, json.dumps([]))
return (200, {}, json.dumps(contacts))
def create_callback(request):
payload = json.loads(request.body)
payload["id"] = generate_id()
return (200, {}, json.dumps(payload))
for contact in contacts:
mocked_responses.add(
responses.GET,
re.compile(api_url + "/contacts/" + contact["id"]),
content_type="application/json",
status=200,
body=json.dumps(contact),
)
if contact.get("picture_url"):
mocked_responses.add(
responses.GET,
contact["picture_url"],
content_type="image/jpeg",
status=200,
body=os.urandom(50),
stream=True,
)
else:
mocked_responses.add(
responses.GET,
"{base}/contacts/{id}/picture".format(base=api_url, id=contact["id"]),
status=404,
body="",
)
mocked_responses.add_callback(
responses.GET,
re.compile(api_url + "/contacts"),
content_type="application/json",
callback=list_callback,
)
mocked_responses.add_callback(
responses.POST,
api_url + "/contacts",
content_type="application/json",
callback=create_callback,
)
@pytest.fixture
def mock_contact(mocked_responses, account_id, api_url):
contact = {
"id": "9hga75n6mdvq4zgcmhcn7hpys",
"object": "contact",
"account_id": account_id,
"given_name": "Given",
"middle_name": "Middle",
"surname": "Sur",
"birthday": "1964-10-05",
"suffix": "Jr",
"nickname": "Testy",
"company_name": "Test Data Inc",
"job_title": "QA Tester",
"manager_name": "George",
"office_location": "Over the Rainbow",
"source": "inbox",
"notes": "This is a note",
"picture_url": "{base}/contacts/{id}/picture".format(
base=api_url, id="9hga75n6mdvq4zgcmhcn7hpys"
),
"emails": [
{"type": "first", "email": "one@example.com"},
{"type": "second", "email": "two@example.com"},
{"type": "primary", "email": "abc@example.com"},
{"type": "primary", "email": "xyz@example.com"},
{"type": None, "email": "unknown@example.com"},
],
"im_addresses": [
{"type": "aim", "im_address": "SmarterChild"},
{"type": "gtalk", "im_address": "fake@gmail.com"},
{"type": "gtalk", "im_address": "fake2@gmail.com"},
],
"physical_addresses": [
{
"type": "home",
"format": "structured",
"street_address": "123 Awesome Street",
"postal_code": "99989",
"state": "CA",
"country": "America",
}
],
"phone_numbers": [
{"type": "home", "number": "555-555-5555"},
{"type": "mobile", "number": "555-555-5555"},
{"type": "mobile", "number": "987654321"},
],
"web_pages": [
{"type": "profile", "url": "http://www.facebook.com/abc"},
{"type": "profile", "url": "http://www.twitter.com/abc"},
{"type": None, "url": "http://example.com"},
],
}
def update_callback(request):
try:
payload = json.loads(request.body)
except ValueError:
return (200, {}, json.dumps(contact))
stripped_payload = {key: value for key, value in payload.items() if value}
updated_contact_json = copy.copy(contact)
updated_contact_json.update(stripped_payload)
return (200, {}, json.dumps(updated_contact_json))
mocked_responses.add(
responses.GET,
"{base}/contacts/{id}".format(base=api_url, id=contact["id"]),
content_type="application/json",
status=200,
body=json.dumps(contact),
)
mocked_responses.add(
responses.GET,
contact["picture_url"],
content_type="image/jpeg",
status=200,
body=os.urandom(50),
stream=True,
)
mocked_responses.add_callback(
responses.PUT,
"{base}/contacts/{id}".format(base=api_url, id=contact["id"]),
content_type="application/json",
callback=update_callback,
)
@pytest.fixture
def mock_events(mocked_responses, api_url):
events = [
{
"id": "1234abcd5678",
"message_id": "evh5uy0shhpm5d0le89goor17",
"ical_uid": "19960401T080045Z-4000F192713-0052@example.com",
"title": "Pool party",
"location": "Local Community Pool",
"participants": [
{
"comment": None,
"email": "kelly@nylas.com",
"name": "Kelly Nylanaut",
"status": "noreply",
},
{
"comment": None,
"email": "sarah@nylas.com",
"name": "Sarah Nylanaut",
"status": "no",
},
],
"metadata": {},
},
{
"id": "9876543cba",
"message_id": None,
"ical_uid": None,
"title": "Event Without Message",
"description": "This event does not have a corresponding message ID.",
"metadata": {},
},
{
"id": "1231241zxc",
"message_id": None,
"ical_uid": None,
"title": "Event With Metadata",
"description": "This event uses metadata to store custom values.",
"metadata": {"platform": "python", "event_type": "meeting"},
},
]
def list_callback(request):
url = URLObject(request.url)
offset = int(url.query_dict.get("offset") or 0)
metadata_key = url.query_multi_dict.get("metadata_key")
metadata_value = url.query_multi_dict.get("metadata_value")
metadata_pair = url.query_multi_dict.get("metadata_pair")
if offset:
return (200, {}, json.dumps([]))
if metadata_key or metadata_value or metadata_pair:
results = []
for event in events:
if (
metadata_key
and set(metadata_key) & set(event["metadata"])
or metadata_value
and set(metadata_value) & set(event["metadata"].values())
):
results.append(event)
elif metadata_pair:
for pair in metadata_pair:
key_value = pair.split(":")
if (
key_value[0] in event["metadata"]
and event["metadata"][key_value[0]] == key_value[1]
):
results.append(event)
return (200, {}, json.dumps(results))
return (200, {}, json.dumps(events))
endpoint = re.compile(api_url + "/events")
mocked_responses.add_callback(
responses.GET, endpoint, content_type="application/json", callback=list_callback
)
@pytest.fixture
def mock_schedulers(mocked_responses, api_url):
scheduler_list = [
{
"app_client_id": "test-client-id",
"app_organization_id": 12345,
"config": {
"appearance": {
"color": "#0068D3",
"company_name": "",
"logo": "",
"show_autoschedule": "true",
"show_nylas_branding": "false",
"show_timezone_options": "true",
"show_week_view": "true",
"submit_text": "Submit",
},
"locale": "en",
"reminders": [],
"timezone": "America/Los_Angeles",
},
"created_at": "2021-10-22",
"edit_token": "test-edit-token-1",
"id": 90210,
"modified_at": "2021-10-22",
"name": "test-1",
"slug": "test1",
},
{
"app_client_id": "test-client-id",
"app_organization_id": 12345,
"config": {
"calendar_ids": {
"test-calendar-id": {
"availability": ["availability-id"],
"booking": "booking-id",
}
},
"event": {
"capacity": -1,
"duration": 45,
"location": "Location TBD",
"title": "test-event",
},
"locale": "en",
"reminders": [],
"timezone": "America/Los_Angeles",
},
"created_at": "2021-10-22",
"edit_token": "test-edit-token-2",
"id": 90211,
"modified_at": "2021-10-22",
"name": "test-2",
"slug": "test2",
},
]
def list_callback(arg=None):
return 200, {}, json.dumps(scheduler_list)
def return_one_callback(arg=None):
return 200, {}, json.dumps(scheduler_list[0])
info_endpoint = re.compile("https://api.schedule.nylas.com/schedule/.*/info")
mocked_responses.add_callback(
responses.GET,
"https://api.schedule.nylas.com/manage/pages",
content_type="application/json",
callback=list_callback,
)
mocked_responses.add_callback(
responses.GET,
info_endpoint,
content_type="application/json",
callback=return_one_callback,
)
@pytest.fixture
def mock_scheduler_get_available_calendars(mocked_responses, api_url):
calendars = [
{
"calendars": [
{"id": "calendar-id", "name": "Emailed events", "read_only": "true"},
],
"email": "swag@nylas.com",
"id": "scheduler-id",
"name": "Python Tester",
}
]
def list_callback(arg=None):
return 200, {}, json.dumps(calendars)
calendars_url = "https://api.schedule.nylas.com/manage/pages/{id}/calendars".format(
id="cv4ei7syx10uvsxbs21ccsezf"
)
mocked_responses.add_callback(
responses.GET,
calendars_url,
content_type="application/json",
callback=list_callback,
)
@pytest.fixture
def mock_scheduler_upload_image(mocked_responses, api_url):
upload = {
"filename": "test.png",
"originalFilename": "test.png",
"publicUrl": "https://public.nylas.com/test.png",
"signedUrl": "https://signed.nylas.com/test.png",
}
def list_callback(arg=None):
return 200, {}, json.dumps(upload)
calendars_url = (
"https://api.schedule.nylas.com/manage/pages/{id}/upload-image".format(
id="cv4ei7syx10uvsxbs21ccsezf"
)
)
mocked_responses.add_callback(
responses.PUT,
calendars_url,
content_type="application/json",
callback=list_callback,
)
@pytest.fixture
def mock_scheduler_provider_availability(mocked_responses, api_url):
response = {
"busy": [
{
"end": 1636731958,
"start": 1636728347,
},
],
"email": "test@example.com",
"name": "John Doe",
}
def callback(arg=None):
return 200, {}, json.dumps(response)
provider_url = re.compile(
"https://api.schedule.nylas.com/schedule/availability/(google|o365)"
)
mocked_responses.add_callback(
responses.GET,
provider_url,
callback=callback,
)
@pytest.fixture
def mock_scheduler_timeslots(mocked_responses, api_url):
scheduler_time_slots = [
{
"account_id": "test-account-id",
"calendar_id": "test-calendar-id",
"emails": ["test@example.com"],
"end": 1636731958,
"host_name": "www.hostname.com",
"start": 1636728347,
},
]
booking_confirmation = {
"account_id": "test-account-id",
"additional_field_values": {
"test": "yes",
},
"calendar_event_id": "test-event-id",
"calendar_id": "test-calendar-id",
"edit_hash": "test-edit-hash",
"end_time": 1636731958,
"id": 123,
"is_confirmed": False,
"location": "Earth",
"recipient_email": "recipient@example.com",
"recipient_locale": "en_US",
"recipient_name": "Recipient Doe",
"recipient_tz": "America/New_York",
"start_time": 1636728347,
"title": "Test Booking",
}
cancel_payload = {
"success": True,
}
def list_timeslots(arg=None):
return 200, {}, json.dumps(scheduler_time_slots)
def book_timeslot(arg=None):
return 200, {}, json.dumps(booking_confirmation)
def confirm_booking(arg=None):
booking_confirmation["is_confirmed"] = True
return 200, {}, json.dumps(booking_confirmation)
def cancel_booking(arg=None):
return 200, {}, json.dumps(cancel_payload)
timeslots_url = re.compile("https://api.schedule.nylas.com/schedule/.*/timeslots")
confirm_url = re.compile("https://api.schedule.nylas.com/schedule/.*/.*/confirm")
cancel_url = re.compile("https://api.schedule.nylas.com/schedule/.*/.*/cancel")
mocked_responses.add_callback(
responses.GET,
timeslots_url,
callback=list_timeslots,
)
mocked_responses.add_callback(
responses.POST,
timeslots_url,
callback=book_timeslot,
)
mocked_responses.add_callback(
responses.POST,
confirm_url,
callback=confirm_booking,
)
mocked_responses.add_callback(
responses.POST,
cancel_url,
callback=cancel_booking,
)
@pytest.fixture
def mock_components(mocked_responses, api_url):
components = [
{
"active": True,
"settings": {},
"allowed_domains": [],
"id": "component-id",
"name": "PyTest Component",
"public_account_id": "account-id",
"public_application_id": "application-id",
"type": "agenda",
"created_at": "2021-10-22T18:02:10.000Z",
"updated_at": "2021-10-22T18:02:10.000Z",
"accessed_at": None,
"public_token_id": "token-id",
},
]
def list_callback(arg=None):
return 200, {}, json.dumps(components)
endpoint = re.compile(api_url + "/component/*")
mocked_responses.add_callback(
responses.GET, endpoint, content_type="application/json", callback=list_callback
)
@pytest.fixture
def mock_create_webhook(mocked_responses, api_url, client_id):
webhook = {"application_id": "application-id", "id": "webhook-id", "version": "1.0"}
def callback(request):
try:
payload = json.loads(request.body)
except ValueError:
return 400, {}, ""
if (
"callback_url" not in payload
and ("triggers" not in payload and type(payload["triggers"]) is not list)
and "state" not in payload
):
return 400, {}, ""
webhook["callback_url"] = payload["callback_url"]
webhook["triggers"] = payload["triggers"]
webhook["state"] = payload["state"]
return 200, {}, json.dumps(webhook)
endpoint = "{base}/a/{client_id}/webhooks".format(base=api_url, client_id=client_id)
mocked_responses.add_callback(
responses.POST,
endpoint,
callback=callback,
content_type="application/json",
)
@pytest.fixture
def mock_webhooks(mocked_responses, api_url, client_id):
webhook = {
"application_id": "application-id",
"callback_url": "https://your-server.com/webhook",
"id": "webhook-id",
"state": "active",
"triggers": ["message.created"],
"version": "2.0",
}
def list_callback(request):
return 200, {}, json.dumps([webhook])
def single_callback(request):
webhook["id"] = get_id_from_url(request.url)
return 200, {}, json.dumps(webhook)
def update_callback(request):
try:
payload = json.loads(request.body)
except ValueError:
return 400, {}, ""
if "state" in payload:
webhook["state"] = payload["state"]
webhook["id"] = get_id_from_url(request.url)
return 200, {}, json.dumps(webhook)
def delete_callback(request):
return 200, {}, json.dumps({"success": True})
def get_id_from_url(url):
path = URLObject(url).path
return path.rsplit("/", 1)[-1]
endpoint_single = re.compile(
"{base}/a/{client_id}/webhooks/*".format(base=api_url, client_id=client_id)
)
endpoint_list = "{base}/a/{client_id}/webhooks".format(
base=api_url, client_id=client_id
)
mocked_responses.add_callback(
responses.GET,
endpoint_list,
content_type="application/json",
callback=list_callback,
)
mocked_responses.add_callback(
responses.GET,
endpoint_single,
content_type="application/json",
callback=single_callback,
)
mocked_responses.add_callback(
responses.PUT,
endpoint_single,
content_type="application/json",
callback=update_callback,
)
mocked_responses.add_callback(
responses.DELETE,
endpoint_single,
content_type="application/json",
callback=delete_callback,
)
@pytest.fixture
def mock_resources(mocked_responses, api_url):
resources = [
{
"object": "room_resource",
"email": "training-room-1A@google.com",
"name": "Google Training Room",
"building": "San Francisco",
"capacity": "10",
"floor_name": "7",
"floor_number": None,
},
{
"object": "room_resource",
"email": "training-room@outlook.com",
"name": "Microsoft Training Room",
"building": "Seattle",
"capacity": "5",
"floor_name": "Office",
"floor_number": "2",
},
]
endpoint = re.compile(api_url + "/resources")
mocked_responses.add(
responses.GET,
endpoint,
body=json.dumps(resources),
status=200,
content_type="application/json",
)
@pytest.fixture
def mock_job_statuses(mocked_responses, api_url):
job_status = [
{
"account_id": "test_account_id",
"action": "save_draft",
"created_at": 1622846160,
"id": "test_id",
"job_status_id": "test_job_status_id",
"object": "message",
"status": "successful",
},
{
"account_id": "test_account_id",
"action": "update_event",
"created_at": 1622846160,
"id": "test_id_2",
"job_status_id": "test_job_status_id_2",
"object": "event",
"status": "successful",
},
]
endpoint = re.compile(api_url + "/job-statuses")
mocked_responses.add(
responses.GET,
endpoint,
body=json.dumps(job_status),
status=200,
content_type="application/json",
)
@pytest.fixture
def mock_account_management(mocked_responses, api_url, account_id, client_id):
account = {
"account_id": account_id,
"email_address": "ben.bitdiddle1861@gmail.com",
"id": account_id,
"name": "Ben Bitdiddle",
"object": "account",
"provider": "gmail",
"organization_unit": "label",
"billing_state": "paid",
}
paid_response = json.dumps(account)
account["billing_state"] = "cancelled"
cancelled_response = json.dumps(account)
upgrade_url = "{base}/a/{client_id}/accounts/{id}/upgrade".format(
base=api_url, id=account_id, client_id=client_id
)
downgrade_url = "{base}/a/{client_id}/accounts/{id}/downgrade".format(
base=api_url, id=account_id, client_id=client_id
)
mocked_responses.add(
responses.POST,
upgrade_url,
content_type="application/json",
status=200,
body=paid_response,
)
mocked_responses.add(
responses.POST,
downgrade_url,
content_type="application/json",
status=200,
body=cancelled_response,
)
@pytest.fixture
def mock_revoke_all_tokens(mocked_responses, api_url, account_id, client_id):
revoke_all_url = "{base}/a/{client_id}/accounts/{id}/revoke-all".format(
base=api_url, id=account_id, client_id=client_id
)
mocked_responses.add(
responses.POST,
revoke_all_url,
content_type="application/json",
status=200,
body=json.dumps({"success": True}),
)
@pytest.fixture
def mock_application_details(mocked_responses, api_url, client_id):
application_details_url = "{base}/a/{client_id}".format(
base=api_url, client_id=client_id
)
def modify_endpoint(request):
return 200, {}, json.dumps(json.loads(request.body))
mocked_responses.add(
responses.GET,
application_details_url,
content_type="application/json",
status=200,
body=json.dumps(
{
"application_name": "My New App Name",
"icon_url": "http://localhost:5555/icon.png",
"redirect_uris": [
"http://localhost:5555/login_callback",
"localhost",
"https://customerA.myapplication.com/login_callback",
],
}
),
)
mocked_responses.add_callback(
responses.PUT,
application_details_url,
content_type="application/json",
callback=modify_endpoint,
)
@pytest.fixture
def mock_ip_addresses(mocked_responses, api_url, client_id):
ip_addresses_url = "{base}/a/{client_id}/ip_addresses".format(
base=api_url, client_id=client_id
)
mocked_responses.add(
responses.GET,
ip_addresses_url,
content_type="application/json",
status=200,
body=json.dumps(
{
"ip_addresses": [
"39.45.235.23",
"23.10.341.123",
"12.56.256.654",
"67.20.987.231",
],
"updated_at": 1552072984,
}
),
)
@pytest.fixture
def mock_token_info(mocked_responses, api_url, account_id, client_id):
token_info_url = "{base}/a/{client_id}/accounts/{id}/token-info".format(
base=api_url, id=account_id, client_id=client_id
)
mocked_responses.add(
responses.POST,
token_info_url,
content_type="application/json",
status=200,
body=json.dumps(
{
"created_at": 1563496685,
"scopes": "calendar,email,contacts",
"state": "valid",
"updated_at": 1563496685,
}
),
)
@pytest.fixture
def mock_free_busy(mocked_responses, api_url):
free_busy_url = "{base}/calendars/free-busy".format(base=api_url)
def free_busy_callback(request):
payload = json.loads(request.body)
email = payload["emails"][0]
resp_data = [
{
"object": "free_busy",
"email": email,
"time_slots": [
{
"object": "time_slot",
"status": "busy",
"start_time": 1409594400,
"end_time": 1409598000,
},
{
"object": "time_slot",
"status": "busy",
"start_time": 1409598000,
"end_time": 1409599000,
},
],
}
]
return 200, {}, json.dumps(resp_data)
mocked_responses.add_callback(
responses.POST,
free_busy_url,
content_type="application/json",
callback=free_busy_callback,
)
@pytest.fixture
def mock_availability(mocked_responses, api_url):
availability_url = "{base}/calendars/availability".format(base=api_url)
def availability_callback(request):
payload = json.loads(request.body)
resp_data = {
"object": "availability",
"time_slots": [
{
"object": "time_slot",
"status": "free",
"start_time": 1409594400,
"end_time": 1409598000,
},
{
"object": "time_slot",
"status": "free",
"start_time": 1409598000,
"end_time": 1409599000,
},
],
}
return 200, {}, json.dumps(resp_data)
mocked_responses.add_callback(
responses.POST,
availability_url,
content_type="application/json",
callback=availability_callback,
)
mocked_responses.add_callback(
responses.POST,
"{url}/consecutive".format(url=availability_url),
content_type="application/json",
callback=availability_callback,
)
@pytest.fixture
def mock_sentiment_analysis(mocked_responses, api_url, account_id):
sentiment_url = "{base}/neural/sentiment".format(base=api_url)
def sentiment_callback(request):
payload = json.loads(request.body)
if "message_id" in payload:
response = [
{
"account_id": account_id,
"processed_length": 11,
"sentiment": "NEUTRAL",
"sentiment_score": 0.30000001192092896,
"text": "hello world",
}
]
else:
response = {
"account_id": account_id,
"processed_length": len(payload["text"]),
"sentiment": "NEUTRAL",
"sentiment_score": 0.30000001192092896,
"text": payload["text"],
}
return 200, {}, json.dumps(response)
mocked_responses.add_callback(
responses.PUT,
sentiment_url,
content_type="application/json",
callback=sentiment_callback,
)
@pytest.fixture
def mock_extract_signature(mocked_responses, api_url, account_id):
signature_url = "{base}/neural/signature".format(base=api_url)
def signature_callback(request):
payload = json.loads(request.body)
response = {
"account_id": account_id,
"body": "This is the body<div>Nylas Swag</div><div>Software Engineer</div><div>123-456-8901</div><div>swag@nylas.com</div><img src='https://example.com/logo.png' alt='https://example.com/link.html'></a>",
"signature": "Nylas Swag\n\nSoftware Engineer\n\n123-456-8901\n\nswag@nylas.com",
"date": 1624029503,
"from": [
{
"email": "swag@nylas.com",
"name": "Nylas Swag",
},
],
"id": "abc123",
"model_version": "0.0.1",
"object": "message",
"provider_name": "gmail",
"subject": "Subject",
"to": [
{
"email": "me@nylas.com",
"name": "me",
},
],
}
if "parse_contacts" not in payload or payload["parse_contacts"] is True:
response["contacts"] = {
"job_titles": ["Software Engineer"],
"links": [
{
"description": "string",
"url": "https://example.com/link.html",
},
],
"phone_numbers": ["123-456-8901"],
"emails": ["swag@nylas.com"],
"names": [
{
"first_name": "Nylas",
"last_name": "Swag",
},
],
}
return 200, {}, json.dumps([response])
mocked_responses.add_callback(
responses.PUT,
signature_url,
content_type="application/json",
callback=signature_callback,
)
@pytest.fixture
def mock_categorize(mocked_responses, api_url, account_id):
categorize_url = "{base}/neural/categorize".format(base=api_url)
def categorize_callback(request):
response = {
"account_id": account_id,
"body": "This is a body",
"categorizer": {
"categorized_at": 1627076720,
"category": "feed",
"model_version": "6194f733",
"subcategories": ["ooo"],
},
"date": 1624029503,
"from": [
{
"email": "swag@nylas.com",
"name": "Nylas Swag",
},
],
"id": "abc123",
"object": "message",
"provider_name": "gmail",
"subject": "Subject",
"to": [
{
"email": "me@nylas.com",
"name": "me",
},
],
}
return 200, {}, json.dumps([response])
def recategorize_callback(request):
response = {
"account_id": account_id,
"category": "conversation",
"is_primary_label": "true",
"message_id": "abc123",
"recategorized_at": "2021-07-17T00:04:22.006193",
"recategorized_from": {
"category": "feed",
"model_version": "6194f733",
"subcategories": ["ooo"],
},
"subcategories": ["ooo"],
}
return 200, {}, json.dumps(response)
mocked_responses.add_callback(
responses.PUT,
categorize_url,
content_type="application/json",
callback=categorize_callback,
)
mocked_responses.add_callback(
responses.POST,
"{}/feedback".format(categorize_url),
content_type="application/json",
callback=recategorize_callback,
)
@pytest.fixture
def mock_ocr_request(mocked_responses, api_url, account_id):
ocr_url = "{base}/neural/ocr".format(base=api_url)
def ocr_callback(request):
response = {
"account_id": account_id,
"content_type": "application/pdf",
"filename": "sample.pdf",
"id": "abc123",
"object": "file",
"ocr": ["This is page 1", "This is page 2"],
"processed_pages": 2,
"size": 20,
}
return 200, {}, json.dumps(response)
mocked_responses.add_callback(
responses.PUT,
ocr_url,
content_type="application/json",
callback=ocr_callback,
)
@pytest.fixture
def mock_clean_conversation(mocked_responses, api_url, account_id):
conversation_url = "{base}/neural/conversation".format(base=api_url)
file_url = "{base}/files/1781777f666586677621".format(base=api_url)
def conversation_callback(request):
response = {
"account_id": account_id,
"body": "<img src='cid:1781777f666586677621' /> This is the body",
"conversation": "<img src='cid:1781777f666586677621' /> This is the conversation",
"date": 1624029503,
"from": [
{
"email": "swag@nylas.com",
"name": "Nylas Swag",
},
],
"id": "abc123",
"model_version": "0.0.1",
"object": "message",
"provider_name": "gmail",
"subject": "Subject",
"to": [
{
"email": "me@nylas.com",
"name": "me",
},
],
}
return 200, {}, json.dumps([response])
def file_callback(request):
response = {
"id": "1781777f666586677621",
"content_type": "image/png",
"filename": "hello.png",
"account_id": account_id,
"object": "file",
"size": 123,
}
return 200, {}, json.dumps(response)
mocked_responses.add_callback(
responses.PUT,
conversation_url,
content_type="application/json",
callback=conversation_callback,
)
mocked_responses.add_callback(
responses.GET,
file_url,
content_type="application/json",
callback=file_callback,
)
@pytest.fixture
def mock_deltas_since(mocked_responses, api_url):
deltas = {
"cursor_start": "start_cursor",
"cursor_end": "end_cursor",
"deltas": [
{
"attributes": {
"account_id": "aid-5678",
"given_name": "First",
"surname": "Last",
"id": "id-1234",
"object": "contact",
},
"cursor": "contact_cursor",
"event": "create",
"id": "delta-1",
"object": "contact",
},
{
"attributes": {
"account_id": "aid-5678",
"content_type": "text/plain",
"filename": "sample.txt",
"id": "id-1234",
"object": "file",
"size": 123,
},
"cursor": "file_cursor",
"event": "create",
"id": "delta-2",
"object": "file",
},
{
"attributes": {
"account_id": "aid-5678",
"to": [{"email": "foo", "name": "bar"}],
"subject": "foo",
"id": "id-1234",
"object": "message",
},
"cursor": "message_cursor",
"event": "create",
"id": "delta-3",
"object": "message",
},
{
"attributes": {
"account_id": "aid-5678",
"to": [{"email": "foo", "name": "bar"}],
"subject": "foo",
"id": "id-1234",
"object": "draft",
},
"cursor": "draft_cursor",
"event": "create",
"id": "delta-4",
"object": "draft",
},
{
"attributes": {
"account_id": "aid-5678",
"subject": "Subject",
"id": "id-1234",
"object": "thread",
},
"cursor": "thread_cursor",
"event": "create",
"id": "delta-5",
"object": "thread",
},
{
"attributes": {
"id": "id-1234",
"title": "test event",
"when": {"time": 1409594400, "object": "time"},
"participants": [
{
"name": "foo",
"email": "bar",
"status": "noreply",
"comment": "This is a comment",
"phone_number": "416-000-0000",
},
],
"ical_uid": "id-5678",
"master_event_id": "master-1234",
"original_start_time": 1409592400,
},
"cursor": "event_cursor",
"event": "create",
"id": "delta-6",
"object": "event",
},
{
"attributes": {
"account_id": "aid-5678",
"id": "id-1234",
"object": "folder",
"name": "inbox",
"display_name": "name",
},
"cursor": "folder_cursor",
"event": "create",
"id": "delta-7",
"object": "folder",
},
{
"attributes": {
"account_id": "aid-5678",
"id": "id-1234",
"object": "label",
"name": "inbox",
},
"cursor": "label_cursor",
"event": "create",
"id": "delta-8",
"object": "label",
},
],
}
def callback(request):
return 200, {}, json.dumps(deltas)
mocked_responses.add_callback(
responses.GET,
"{base}/delta".format(base=api_url),
callback=callback,
content_type="application/json",
)
@pytest.fixture
def mock_delta_cursor(mocked_responses, api_url):
def callback(request):
return 200, {}, json.dumps({"cursor": "cursor"})
mocked_responses.add_callback(
responses.POST,
"{base}/delta/latest_cursor".format(base=api_url),
callback=callback,
content_type="application/json",
)
@pytest.fixture
def mock_delta_stream(mocked_responses, api_url):
delta = {
"attributes": {
"account_id": "aid-5678",
"given_name": "First",
"surname": "Last",
"id": "id-1234",
"object": "contact",
},
"cursor": "contact_cursor",
"event": "create",
"id": "delta-1",
"object": "contact",
}
def stream_callback(request):
return 200, {}, json.dumps(delta)
def longpoll_callback(request):
response = {
"cursor_start": "start_cursor",
"cursor_end": "end_cursor",
"deltas": [delta],
}
return 200, {}, json.dumps(response)
mocked_responses.add_callback(
responses.GET,
"{base}/delta/streaming".format(base=api_url),
callback=stream_callback,
content_type="application/json",
)
mocked_responses.add_callback(
responses.GET,
"{base}/delta/longpoll".format(base=api_url),
callback=longpoll_callback,
content_type="application/json",
)
@pytest.fixture
def mock_outbox(mocked_responses, api_url):
outbox_job_status = {
"job_status_id": "job-status-id",
"status": "pending",
"original_data": {
"subject": "With Love, from Nylas",
"to": [{"name": "Me", "email": "test@email.com"}],
"body": "This email was sent using the Nylas email API. Visit https://nylas.com for details.",
},
"account_id": "account-id",
}
def return_job_status(request):
response = outbox_job_status
payload = json.loads(request.body)
if "send_at" in payload:
response["original_data"]["send_at"] = payload["send_at"]
response["original_data"]["original_send_at"] = payload["send_at"]
response["original_data"]["retry_limit_datetime"] = payload["send_at"]
if "retry_limit_datetime" in payload:
response["original_data"]["retry_limit_datetime"] = payload[
"retry_limit_datetime"
]
return 200, {}, json.dumps(response)
def delete_callback(request):
return 200, {}, ""
outbox_endpoint = "{base}/v2/outbox".format(base=api_url)
endpoint_single = re.compile("{outbox_url}/*".format(outbox_url=outbox_endpoint))
mocked_responses.add_callback(
responses.POST,
outbox_endpoint,
callback=return_job_status,
content_type="application/json",
)
mocked_responses.add_callback(
responses.PATCH,
endpoint_single,
callback=return_job_status,
content_type="application/json",
)
mocked_responses.add_callback(
responses.DELETE,
endpoint_single,
callback=delete_callback,
content_type="application/json",
)
@pytest.fixture
def mock_outbox_send_grid(mocked_responses, api_url):
send_grid_verification = {
"results": {"domain_verified": True, "sender_verified": True}
}
def return_status(request):
return 200, {}, json.dumps(send_grid_verification)
def delete_callback(request):
return 200, {}, ""
verification_url = "{base}/v2/outbox/onboard/verified_status".format(base=api_url)
delete_url = "{base}/v2/outbox/onboard/subuser".format(base=api_url)
mocked_responses.add_callback(
responses.GET,
verification_url,
callback=return_status,
content_type="application/json",
)
mocked_responses.add_callback(
responses.DELETE,
delete_url,
callback=delete_callback,
content_type="application/json",
)
|
nylas/nylas-python
|
tests/conftest.py
|
Python
|
mit
| 76,424
|
[
"VisIt"
] |
9b2e94610a39686fe829101c50a02a2574b80c98cf7e35ecd89fe0b3e750546f
|
#!/usr/bin/env python
""" A unittest script for the IHMPSession module. """
import unittest
from cutlass import iHMPSession
from CutlassTestUtil import CutlassTestUtil
# pylint: disable=W0703, C1801
class IHMPSessionTest(unittest.TestCase):
""" A unit test class for the IHMPSession module. """
username = "test"
password = "test"
util = None
@classmethod
def setUpClass(cls):
""" Setup for the unittest. """
cls.util = CutlassTestUtil()
def testCreateSession(self):
""" Test the constructor for creating sessions. """
success = False
session = None
try:
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
success = True
except Exception:
pass
self.failUnless(success)
self.failIf(session is None)
def testUsername(self):
""" Test the username property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.stringTypeTest(self, session, "username")
self.util.stringPropertyTest(self, session, "username")
def testPassword(self):
""" Test the password property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.stringTypeTest(self, session, "password")
self.util.stringPropertyTest(self, session, "password")
def testPort(self):
""" Test the port property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.intTypeTest(self, session, "port")
self.util.intPropertyTest(self, session, "port")
def testSSL(self):
""" Test the ssl property. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
self.util.boolTypeTest(self, session, "ssl")
self.util.boolPropertyTest(self, session, "ssl")
def testCreate16SDnaPrep(self):
""" Test the create_16s_dna_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
prep = session.create_16s_dna_prep()
self.failIf(prep is None)
from cutlass import SixteenSDnaPrep
self.failUnless(isinstance(prep, SixteenSDnaPrep))
def testCreate16SRawSeqSet(self):
""" Test the create_16s_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_16s_raw_seq_set()
self.failIf(seq_set is None)
from cutlass import SixteenSRawSeqSet
self.failUnless(isinstance(seq_set, SixteenSRawSeqSet))
def testCreate16STrimmedSeqSet(self):
""" Test the create_16s_trimmed_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_16s_trimmed_seq_set()
self.failIf(seq_set is None)
from cutlass import SixteenSTrimmedSeqSet
self.failUnless(isinstance(seq_set, SixteenSTrimmedSeqSet))
def testCreateAbundanceMatrix(self):
""" Test the create_abundance_matrix() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
matrix = session.create_abundance_matrix()
self.failIf(matrix is None)
from cutlass import AbundanceMatrix
self.failUnless(isinstance(matrix, AbundanceMatrix))
def testCreateAnnotation(self):
""" Test the create_annotation() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
annot = session.create_annotation()
self.failIf(annot is None)
from cutlass import Annotation
self.failUnless(isinstance(annot, Annotation))
def testCreateClusteredSeqSet(self):
""" Test the create_clustered_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
css = session.create_clustered_seq_set()
self.failIf(css is None)
from cutlass import ClusteredSeqSet
self.failUnless(isinstance(css, ClusteredSeqSet))
def testCreateCytokine(self):
""" Test the create_cytokine() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
cytokine = session.create_cytokine()
self.failIf(cytokine is None)
from cutlass import Cytokine
self.failUnless(isinstance(cytokine, Cytokine))
def testCreateHostAssayPrep(self):
""" Test the create_host_assay_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
host_assay_prep = session.create_host_assay_prep()
self.failIf(host_assay_prep is None)
from cutlass import HostAssayPrep
self.failUnless(isinstance(host_assay_prep, HostAssayPrep))
def testCreateHostSeqPrep(self):
""" Test the create_host_seq_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
prep = session.create_host_seq_prep()
self.failIf(prep is None)
from cutlass import HostSeqPrep
self.failUnless(isinstance(prep, HostSeqPrep))
def testCreateHostTranscriptomicsRawSeqSet(self):
""" Test the create_host_transcriptomics_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
htrss = session.create_host_transcriptomics_raw_seq_set()
self.failIf(htrss is None)
from cutlass import HostTranscriptomicsRawSeqSet
self.failUnless(isinstance(htrss, HostTranscriptomicsRawSeqSet))
def testCreateHostWgsRawSeqSet(self):
""" Test the create_host_wgs_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
ss = session.create_host_wgs_raw_seq_set()
self.failIf(ss is None)
from cutlass import HostWgsRawSeqSet
self.failUnless(isinstance(ss, HostWgsRawSeqSet))
def testCreateMetabolome(self):
""" Test the create_metabolome() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
metabolome = session.create_metabolome()
self.failIf(metabolome is None)
from cutlass import Metabolome
self.failUnless(isinstance(metabolome, Metabolome))
def testCreateMicrobTranscriptomicsRawSeqSet(self):
""" Test the create_microb_transcriptomics_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
raw_seq_set = session.create_microb_transcriptomics_raw_seq_set()
self.failIf(raw_seq_set is None)
from cutlass import MicrobTranscriptomicsRawSeqSet
self.failUnless(isinstance(raw_seq_set, MicrobTranscriptomicsRawSeqSet))
def testCreateMicrobiomeAssayPrep(self):
""" Test the create_microbiome_assay_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
microbiome_assay_prep = session.create_microbiome_assay_prep()
self.failIf(microbiome_assay_prep is None)
from cutlass import MicrobiomeAssayPrep
self.failUnless(isinstance(microbiome_assay_prep, MicrobiomeAssayPrep))
def testCreateProject(self):
""" Test the create_project() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
project = session.create_project()
self.failIf(project is None)
from cutlass import Project
self.failUnless(isinstance(project, Project))
def testCreateProteome(self):
""" Test the create_proteome() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
proteome = session.create_proteome()
self.failIf(proteome is None)
from cutlass import Proteome
self.failUnless(isinstance(proteome, Proteome))
def testCreateSample(self):
""" Test the create_sample() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
sample = session.create_sample()
self.failIf(sample is None)
from cutlass import Sample
self.failUnless(isinstance(sample, Sample))
def testCreateSerology(self):
""" Test the create_serology() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
sero = session.create_serology()
self.failIf(sero is None)
from cutlass import Serology
self.failUnless(isinstance(sero, Serology))
def testCreateSubject(self):
""" Test the create_subject() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
subject = session.create_subject()
self.failIf(subject is None)
from cutlass import Subject
self.failUnless(isinstance(subject, Subject))
def testCreateSubjectAttribute(self):
""" Test the create_subject_attribute() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
subject_attr = session.create_subject_attr()
self.failIf(subject_attr is None)
from cutlass import SubjectAttribute
self.failUnless(isinstance(subject_attr, SubjectAttribute))
def testCreateStudy(self):
""" Test the create_study() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
study = session.create_study()
self.failIf(study is None)
from cutlass import Study
self.failUnless(isinstance(study, Study))
def testCreateVisit(self):
""" Test the create_visit() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
visit = session.create_visit()
self.failIf(visit is None)
from cutlass import Visit
self.failUnless(isinstance(visit, Visit))
def testCreateVisitAttribute(self):
""" Test the create_visit_attribute() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
visit_attr = session.create_visit_attr()
self.failIf(visit_attr is None)
from cutlass import VisitAttribute
self.failUnless(isinstance(visit_attr, VisitAttribute))
def testWgsAssembledSeqSet(self):
""" Test the create_wgs_assembled_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_wgs_assembled_seq_set()
self.failIf(seq_set is None)
from cutlass import WgsAssembledSeqSet
self.failUnless(isinstance(seq_set, WgsAssembledSeqSet))
def testWgsDnaPrep(self):
""" Test the create_wgs_dna_prep() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
prep = session.create_wgs_dna_prep()
self.failIf(prep is None)
from cutlass import WgsDnaPrep
self.failUnless(isinstance(prep, WgsDnaPrep))
def testWgsRawSeqSet(self):
""" Test the create_wgs_raw_seq_set() method. """
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
seq_set = session.create_wgs_raw_seq_set()
self.failIf(seq_set is None)
from cutlass import WgsRawSeqSet
self.failUnless(isinstance(seq_set, WgsRawSeqSet))
def testCreateObjectMethods(self):
"""
Test the create_XXX() methods, where XXX is the name
of a particular node type.
"""
session = iHMPSession(IHMPSessionTest.username, IHMPSessionTest.password)
node_types = [
"16s_dna_prep", "16s_raw_seq_set", "16s_trimmed_seq_set",
"annotation", "abundance_matrix", "clustered_seq_set",
"cytokine", "host_assay_prep", "host_epigenetics_raw_seq_set",
"host_seq_prep", "host_transcriptomics_raw_seq_set",
"host_wgs_raw_seq_set", "lipidome", "metabolome",
"microbiome_assay_prep", "microb_transcriptomics_raw_seq_set",
"project", "proteome", "sample", "sample_attr", "serology",
"study", "subject", "subject_attr", "viral_seq_set", "visit",
"visit_attr", "wgs_assembled_seq_set", "wgs_raw_seq_set",
"wgs_dna_prep"
]
for node_type in node_types:
instance = session.create_object(node_type)
self.failIf(instance is None)
if __name__ == '__main__':
unittest.main()
|
ihmpdcc/cutlass
|
tests/test_ihmp_session.py
|
Python
|
mit
| 12,662
|
[
"VisIt"
] |
9e162e25703ee51dea3dee986f29e00327c1f8b999dce73a9217566b16d2359d
|
from pylab import *
from scipy.spatial.distance import pdist, squareform, cdist
import theano
import theano.tensor as T
from .sinkhorn import sinkhorn_log, _sinkhorn_log, SinkhornOptions
from ..math_utils.kernels import _squared_distances, _gaussian_cross_kernels
from collections import namedtuple
VarifoldOptions = namedtuple('VarifoldOptions', 'orientation_weight orientation_order')
class Varifold :
"""
Encodes a Varifold as a sum of weighted diracs on R^n x G_{n-1}(R^n) ~= R^n x (R^n/{R}) .
\mu_Q = \sum_i weight_i \dirac_(point_i, normals_i^{\orth})
"""
def __init__(self, points, normals, weights) :
assert (points.shape == normals.shape), "A varifold is given by an array of coordinates + an array of directions + a vector of weights."
assert (isvector(weights) and (len(weights) == len(points)) ), "Points, normals and weights should have the same length."
#assert ( all( abs(sqrt(sum( normals ** 2, 1 )) - 1) < 0.001 ) ), "Tangents should be unit vectors, as length is encoded in 'weights'."
self.points = points
self.normals = normals
self.weights = weights
self.dimension = self.points.shape[1]
class Varifolds :
# Theano symbolic methods ======================================================
@staticmethod
def _kernel_matching(q1_x, q1_mu, q1_n, xt_x, xt_mu, xt_n, radius) :
"""
Theano symbolic implementation of the kernel_matching method.
As of today, we only implemented the Cauchy-Binet angular kernel.
"""
K_qq, K_qx, K_xx = _gaussian_cross_kernels(q1_x, xt_x, radius)
V_qq = (q1_n.dot(q1_n.T)) ** 4
V_qx = (q1_n.dot(xt_n.T)) ** 4
V_xx = (xt_n.dot(xt_n.T)) ** 4
q1_mu = q1_mu.dimshuffle(0,'x') # column
xt_mu = xt_mu.dimshuffle(0,'x')
return [.5 * ( T.sum(K_qq * V_qq * q1_mu.dot(q1_mu.T)) \
+ T.sum(K_xx * V_xx * xt_mu.dot(xt_mu.T)) \
- 2*T.sum(K_qx * V_qx * q1_mu.dot(xt_mu.T)) ), 0.*q1_n.dot(xt_n.T)]
@staticmethod
def _sinkhorn_matching(q1_x, q1_mu, q1_n, xt_x, xt_mu, xt_n, cost_options, sinkhorn_options) :
"""
Theano symbolic implementation of the sinkhorn data attachment term.
We use a cost ("quadratic distance") function of the form
C( (x_i, n_i), (y_j, m_j) ) = .5 * |x_i-y_j|^2 * (1. + a * (1 - (n_i.m_j)^{k}) )
where x_i, y_j are positions and n_i, m_j two orientations, encoded as unit-length vectors.
Remember that two curve/surface elements will be "matched" by the sinkhorn algorithm
if the associated pairwise cost is *small* : the adjunction of a factor
" a * (1 - (n_i.m_j)^{k}) "
therefore allows us to match preferentially shape elements whose orientations are "similar",
i.e. such that (n_i.m_j)^{k} = cos^k(theta) ~ 1, where theta is the angle between the normals.
'a' : cost_options.orientation_weight controls the orientation influence (a = 0 -> simple measure matching)
'k' : cost_options.orientation_order controls the angular selectivity.
k = 1 will result in a current-like data attachment term.
k = 2 is akin to a Cauchy-Binet kernel, which cannot distinguish two rotated cross
(cos^2(theta) + cos^2(pi/2+theta) = 1 for any value of theta).
We recommend using selective *even* values of k in the range 4-8, depending on your data.
"""
rho = sinkhorn_options.rho
# Cost function :
C = .5 * _squared_distances(q1_x, xt_x) * (1. + cost_options.orientation_weight * (1 - (q1_n.dot(xt_n.T))**cost_options.orientation_order ))
#C = .5 * _squared_distances(q1_x, xt_x) + .5 * ( cost_options.orientation_weight * (1 - (q1_n.dot(xt_n.T))**cost_options.orientation_order ))
mu = q1_mu
nu = xt_mu
if rho == inf : # Balanced transport : we normalize the total weights
mu = mu / T.sum(mu)
nu = nu / T.sum(nu)
return _sinkhorn_log( mu, nu, C, sinkhorn_options)
# Legacy Python methods ======================================================
@staticmethod
def kernel_matching(Q, Xt, s) :
"""
Implementation of the kernel data attachment term :
d(Q, Xt) = .5 * sum_{i,j} mu_i*mu_j * (v_i, v_j)^2 k( | Q_i - Q_j | )
- .5 * 2*sum_{i,j} mu_i*nu_j * (v_i, w_j)^2 k( | Q_i - Xt_j | )
+ .5 * sum_{i,j} nu_i*nu_j * (w_i, w_j)^2 k( | Xt_i - Xt_j | )
where
Q = sum_i mu_i \dirac_{ ( Q_i, v_i^{\orth}) }
Xt = sum_j nu_i \dirac_{ (Xt_i, w_i^{\orth}) }
and where k( d ) = exp( - d^2/(2*s^2) ) is a gaussian kernel
with std = s.
This can be seen as a ``quadratic'' matching tool between curves/surfaces :
whereas the Current approach was comparing tangent spaces / normals
making an angle 'theta'
with the 'linear' term cos (theta),
we are now using the 'quadratic' term cos^2(theta),
which is orientation-independent.
Given two segments [a->b] and [c->d], we have replaced
( R_(-90)b-a , R_(-90)d-c ) = ( b-a, d-c )
= |b-a|*|d-c|* cos(theta)
with
|b-a|*|d-c|*(n_{b-a},n_{d-c})^2 = |b-a|*|d-c|*cos^2(theta)
Instead of a quadratic kernel, one could also use a pseudo-gaussian kernel
exp( -cos^2(theta) / (2 s^2) ),
thus gaining a new parameter 's' (angular sensitivity)
at a greater computational cost.
"""
# We use a Gaussian kernel
kernel = lambda x : exp(- x / (2* s ** 2)) # kernel is given |x|^2 as input
kernelp = lambda x : - exp(- x / (2* s ** 2)) / (2* s ** 2)
# Simpler variable names...
q = Q.points
xt = Xt.points
mu = Q.weights
nu = Xt.weights
v = Q.normals
w = Xt.normals
# Compute the squared distances between points in the euclidean space
q_dists = squareform(pdist( q, 'sqeuclidean'))
cross_dists = cdist( q, xt, 'sqeuclidean')
xt_dists = squareform(pdist(xt, 'sqeuclidean'))
# Matrices of scalar products between normals ('SG' stands for Squared Grassmanian)
Gvv = v @ v.T
Gvw = v @ w.T
Gww = w @ w.T
SGvv = Gvv**2
SGvw = Gvw**2
SGww = Gww**2
# Matrices of products mu_i mu_j, ... ('T' stands for Tensor product)
Tmumu = atleast_2d(mu).T * mu
Tmunu = atleast_2d(mu).T * nu
Tnunu = atleast_2d(nu).T * nu
# We're gonna need those two for later calculations
ker_qq = kernel(q_dists)
ker_qxt = kernel(cross_dists)
K_qq = Tmumu * SGvv * ker_qq
K_qxt = Tmunu * SGvw * ker_qxt
K_xtxt = Tnunu * SGww * kernel(xt_dists)
# Total data attachment term :
C = .5 * ( sum(K_qq) - 2*sum(K_qxt) + sum(K_xtxt) )
# Computation of the directional derivatives
# with respect to the dirac positions
Kp_qq = Tmumu * SGvv * kernelp(q_dists)
Kp_qxt = Tmunu * SGvw * kernelp(cross_dists)
dq = zeros(q.shape)
for d in range(q.shape[1]) :
qi_min_qj = atleast_2d(q[:,d]).T - atleast_2d( q[:,d])
qi_min_xtj = atleast_2d(q[:,d]).T - atleast_2d(xt[:,d])
dq[:,d] = ( sum( qi_min_qj * Kp_qq , 1) \
- 2* sum( qi_min_xtj * Kp_qxt, 1) )
# Computation of the directional derivatives
# with respect to the normals v
dv = zeros(v.shape)
for d in range(v.shape[1]) :
dv[:,d] = sum( 2 * (v[:,d] - atleast_2d(v[:,d]).T * Gvv) * Gvv * ker_qq , 1) \
- sum( 2 * (w[:,d] - atleast_2d(v[:,d]).T * Gvw) * Gvw * ker_qxt, 1)
# Computation of the directional derivatives
# with respect to the weights mu
dmu = zeros(mu.shape)
dmu = sum( mu * SGvv * ker_qq , 1) \
- sum( nu * SGvw * ker_qxt, 1)
dV = Varifold(dq, dv, dmu)
return (C, dV)
|
jeanfeydy/lddmm-ot
|
LDDMM_Python/lddmm_python/modules/data_attachment/varifolds.py
|
Python
|
mit
| 7,383
|
[
"DIRAC",
"Gaussian"
] |
4fc04faf3478dc39eebb3d34ca01b018964e9b47a5432dfeefb4a8e05f3f6fa1
|
#/urs/bin/env python
__version__ = "0.0.1-dev"
import os
from distutils.core import setup
classes = """
Development Status :: 1 - Planning
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Statitics
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
setup(name='machivellian',
version="0.0.1-dev",
license='BSD2',
description="Library for testing monte carlo effect sizes",
long_description=("Library for testing monte carlo effect sizes"),
author="J W Debelius",
author_email="jdebelius@ucsd.edu",
maintainer="J W Debelius",
maintainer_email="jdebelius@ucsd.com",
packages=['machivellian', 'machivellian.tests'],
install_requires=['IPython >= 4.2.0',
'matplotlib >= 1.5.1',
'numpy >= 1.10.0',
'pandas >= 0.18.0',
'scipy >= 0.15.1',
'scikit-bio >= 0.4.2',
'nose >= 1.3.7',
],
)
|
jwdebelius/Machiavellian
|
setup.py
|
Python
|
bsd-3-clause
| 1,305
|
[
"scikit-bio"
] |
e044d4b29de9968033c0ace64f422db52bbda200e05f7940f24403dc1152e9fb
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.cornell_shci.shci import SHCI, SHCISCF
|
gkc1000/pyscf
|
pyscf/cornell_shci/__init__.py
|
Python
|
apache-2.0
| 685
|
[
"PySCF"
] |
922c940383e7b385412b97e9f7ca08922c8481d1faa54c5a86a50843e7de497e
|
from __future__ import with_statement
import os
import re
import platform
import time
import fnmatch
import tempfile
from os import environ
from sos.utilities import (ImporterHelper,
import_module,
shell_out)
from sos.plugins import IndependentPlugin, ExperimentalPlugin
from sos import _sos as _
from textwrap import fill
from six import print_
from six.moves import input
def import_policy(name):
policy_fqname = "sos.policies.%s" % name
try:
return import_module(policy_fqname, Policy)
except ImportError:
return None
def load(cache={}, sysroot=None):
if 'policy' in cache:
return cache.get('policy')
import sos.policies
helper = ImporterHelper(sos.policies)
for module in helper.get_modules():
for policy in import_policy(module):
if policy.check():
cache['policy'] = policy(sysroot=sysroot)
if 'policy' not in cache:
cache['policy'] = GenericPolicy()
return cache['policy']
class PackageManager(object):
"""Encapsulates a package manager. If you provide a query_command to the
constructor it should print each package on the system in the following
format:
package name|package.version\n
You may also subclass this class and provide a get_pkg_list method to
build the list of packages and versions.
"""
query_command = None
timeout = 30
chroot = None
def __init__(self, query_command=None, chroot=None):
self.packages = {}
if query_command:
self.query_command = query_command
if chroot:
self.chroot = chroot
def all_pkgs_by_name(self, name):
"""
Return a list of packages that match name.
"""
return fnmatch.filter(self.all_pkgs().keys(), name)
def all_pkgs_by_name_regex(self, regex_name, flags=0):
"""
Return a list of packages that match regex_name.
"""
reg = re.compile(regex_name, flags)
return [pkg for pkg in self.all_pkgs().keys() if reg.match(pkg)]
def pkg_by_name(self, name):
"""
Return a single package that matches name.
"""
pkgmatches = self.all_pkgs_by_name(name)
if (len(pkgmatches) != 0):
return self.all_pkgs_by_name(name)[-1]
else:
return None
def get_pkg_list(self):
"""
returns a dictionary of packages in the following format:
{'package_name': {'name': 'package_name', '
version': 'major.minor.version'}}
"""
if self.query_command:
cmd = self.query_command
pkg_list = shell_out(
cmd, timeout=self.timeout, chroot=self.chroot
).splitlines()
for pkg in pkg_list:
if '|' not in pkg:
continue
name, version = pkg.split("|")
self.packages[name] = {
'name': name,
'version': version.split(".")
}
return self.packages
def all_pkgs(self):
"""
Return a list of all packages.
"""
if not self.packages:
self.packages = self.get_pkg_list()
return self.packages
def pkg_nvra(self, pkg):
fields = pkg.split("-")
version, release, arch = fields[-3:]
name = "-".join(fields[:-3])
return (name, version, release, arch)
class Policy(object):
msg = _("""\
This command will collect system configuration and diagnostic information \
from this %(distro)s system. An archive containing the collected information \
will be generated in %(tmpdir)s.
For more information on %(vendor)s visit:
%(vendor_url)s
The generated archive may contain data considered sensitive and its content \
should be reviewed by the originating organization before being passed to \
any third party.
No changes will be made to system configuration.
%(vendor_text)s
""")
distro = "Unknown"
vendor = "Unknown"
vendor_url = "http://www.example.com/"
vendor_text = ""
PATH = ""
_in_container = False
_host_sysroot = '/'
def __init__(self, sysroot=None):
"""Subclasses that choose to override this initializer should call
super() to ensure that they get the required platform bits attached.
super(SubClass, self).__init__(). Policies that require runtime
tests to construct PATH must call self.set_exec_path() after
modifying PATH in their own initializer."""
self._parse_uname()
self.report_name = self.hostname
self.case_id = None
self.package_manager = PackageManager()
self._valid_subclasses = []
self.set_exec_path()
self._host_sysroot = sysroot
def get_valid_subclasses(self):
return [IndependentPlugin] + self._valid_subclasses
def set_valid_subclasses(self, subclasses):
self._valid_subclasses = subclasses
def del_valid_subclasses(self):
del self._valid_subclasses
valid_subclasses = property(get_valid_subclasses,
set_valid_subclasses,
del_valid_subclasses,
"list of subclasses that this policy can "
"process")
def check(self):
"""
This function is responsible for determining if the underlying system
is supported by this policy.
"""
return False
def in_container(self):
""" Returns True if sos is running inside a container environment.
"""
return self._in_container
def host_sysroot(self):
return self._host_sysroot
def dist_version(self):
"""
Return the OS version
"""
pass
def get_preferred_archive(self):
"""
Return the class object of the prefered archive format for this
platform
"""
from sos.archive import TarFileArchive
return TarFileArchive
def get_archive_name(self):
"""
This function should return the filename of the archive without the
extension.
"""
if self.case_id:
self.report_name += "." + self.case_id
return "sosreport-%s-%s" % (self.report_name,
time.strftime("%Y%m%d%H%M%S"))
def get_tmp_dir(self, opt_tmp_dir):
if not opt_tmp_dir:
return tempfile.gettempdir()
return opt_tmp_dir
def match_plugin(self, plugin_classes):
if len(plugin_classes) > 1:
for p in plugin_classes:
# Give preference to the first listed tagging class
# so that e.g. UbuntuPlugin is chosen over DebianPlugin
# on an Ubuntu installation.
if issubclass(p, self.valid_subclasses[0]):
return p
return plugin_classes[0]
def validate_plugin(self, plugin_class, experimental=False):
"""
Verifies that the plugin_class should execute under this policy
"""
valid_subclasses = [IndependentPlugin] + self.valid_subclasses
if experimental:
valid_subclasses += [ExperimentalPlugin]
return any(issubclass(plugin_class, class_) for
class_ in valid_subclasses)
def pre_work(self):
"""
This function is called prior to collection.
"""
pass
def post_work(self):
"""
This function is called after the sosreport has been generated.
"""
pass
def pkg_by_name(self, pkg):
return self.package_manager.pkg_by_name(pkg)
def _parse_uname(self):
(system, node, release,
version, machine, processor) = platform.uname()
self.system = system
self.hostname = node
self.release = release
self.smp = version.split()[1] == "SMP"
self.machine = machine
def set_commons(self, commons):
self.commons = commons
def _set_PATH(self, path):
environ['PATH'] = path
def set_exec_path(self):
self._set_PATH(self.PATH)
def is_root(self):
"""This method should return true if the user calling the script is
considered to be a superuser"""
return (os.getuid() == 0)
def get_preferred_hash_name(self):
"""Returns the string name of the hashlib-supported checksum algorithm
to use"""
return "md5"
def display_results(self, archive, directory, checksum):
# Display results is called from the tail of SoSReport.final_work()
#
# Logging is already shutdown and all terminal output must use the
# print() call.
# make sure a report exists
if not archive and not directory:
return False
self._print()
if archive:
self._print(_("Your sosreport has been generated and saved "
"in:\n %s") % archive)
else:
self._print(_("sosreport build tree is located at : %s" %
directory))
self._print()
if checksum:
self._print(_("The checksum is: ") + checksum)
self._print()
self._print(_("Please send this file to your support "
"representative."))
self._print()
def _print(self, msg=None):
"""A wrapper around print that only prints if we are not running in
quiet mode"""
if not self.commons['cmdlineopts'].quiet:
if msg:
print_(msg)
else:
print_()
def get_msg(self):
"""This method is used to prepare the preamble text to display to
the user in non-batch mode. If your policy sets self.distro that
text will be substituted accordingly. You can also override this
method to do something more complicated."""
width = 72
_msg = self.msg % {'distro': self.distro, 'vendor': self.vendor,
'vendor_url': self.vendor_url,
'vendor_text': self.vendor_text,
'tmpdir': self.commons['tmpdir']}
_fmt = ""
for line in _msg.splitlines():
_fmt = _fmt + fill(line, width, replace_whitespace=False) + '\n'
return _fmt
class GenericPolicy(Policy):
"""This Policy will be returned if no other policy can be loaded. This
should allow for IndependentPlugins to be executed on any system"""
def get_msg(self):
return self.msg % {'distro': self.system}
class LinuxPolicy(Policy):
"""This policy is meant to be an abc class that provides common
implementations used in Linux distros"""
distro = "Linux"
vendor = "None"
PATH = "/bin:/sbin:/usr/bin:/usr/sbin"
_preferred_hash_name = None
def __init__(self, sysroot=None):
super(LinuxPolicy, self).__init__(sysroot=sysroot)
def get_preferred_hash_name(self):
if self._preferred_hash_name:
return self._preferred_hash_name
checksum = "md5"
try:
fp = open("/proc/sys/crypto/fips_enabled", "r")
except:
self._preferred_hash_name = checksum
return checksum
fips_enabled = fp.read()
if fips_enabled.find("1") >= 0:
checksum = "sha256"
fp.close()
self._preferred_hash_name = checksum
return checksum
def default_runlevel(self):
try:
with open("/etc/inittab") as fp:
pattern = r"id:(\d{1}):initdefault:"
text = fp.read()
return int(re.findall(pattern, text)[0])
except:
return 3
def kernel_version(self):
return self.release
def host_name(self):
return self.hostname
def is_kernel_smp(self):
return self.smp
def get_arch(self):
return self.machine
def get_local_name(self):
"""Returns the name usd in the pre_work step"""
return self.host_name()
def sanitize_report_name(self, report_name):
return re.sub(r"[^-a-zA-Z.0-9]", "", report_name)
def sanitize_case_id(self, case_id):
return re.sub(r"[^-a-z,A-Z.0-9]", "", case_id)
def pre_work(self):
# this method will be called before the gathering begins
cmdline_opts = self.commons['cmdlineopts']
customer_name = cmdline_opts.customer_name
localname = customer_name if customer_name else self.get_local_name()
caseid = cmdline_opts.case_id if cmdline_opts.case_id else ""
if not cmdline_opts.batch and not \
cmdline_opts.quiet:
try:
self.report_name = input(_("Please enter your first initial "
"and last name [%s]: ") % localname)
self.case_id = input(_("Please enter the case id "
"that you are generating this "
"report for [%s]: ") % caseid)
self._print()
except:
self._print()
self.report_name = localname
if len(self.report_name) == 0:
self.report_name = localname
if customer_name:
self.report_name = customer_name
if cmdline_opts.case_id:
self.case_id = cmdline_opts.case_id
self.report_name = self.sanitize_report_name(self.report_name)
if self.case_id:
self.case_id = self.sanitize_case_id(self.case_id)
if (self.report_name == ""):
self.report_name = "default"
return
# vim: set et ts=4 sw=4 :
|
harigowtham/sos
|
sos/policies/__init__.py
|
Python
|
gpl-2.0
| 13,869
|
[
"VisIt"
] |
d46098c474cd0e7bf278d359416dbe9403416f7bde48619c2e311834641f2606
|
#!/usr/bin/python2.4
# Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for io.FileNode'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
import os
import StringIO
import unittest
from grit.node import misc
from grit.node import io
from grit.node import empty
from grit import grd_reader
from grit import util
class FileNodeUnittest(unittest.TestCase):
def testGetPath(self):
root = misc.GritNode()
root.StartParsing(u'grit', None)
root.HandleAttribute(u'latest_public_release', u'0')
root.HandleAttribute(u'current_release', u'1')
root.HandleAttribute(u'base_dir', ur'..\resource')
translations = empty.TranslationsNode()
translations.StartParsing(u'translations', root)
root.AddChild(translations)
file_node = io.FileNode()
file_node.StartParsing(u'file', translations)
file_node.HandleAttribute(u'path', ur'flugel\kugel.pdf')
translations.AddChild(file_node)
root.EndParsing()
self.failUnless(file_node.GetFilePath() ==
util.normpath(
os.path.join(ur'../resource', ur'flugel/kugel.pdf')))
def testLoadTranslations(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<file path="fr.xtb" lang="fr" />
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''), util.PathFromRoot('grit/test/data'))
grd.RunGatherers(recursive=True)
self.failUnless(True)
def testIffyness(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<grit latest_public_release="2" source_lang_id="en-US" current_release="3" base_dir=".">
<translations>
<if expr="lang == 'fr'">
<file path="fr.xtb" lang="fr" />
</if>
</translations>
<release seq="3">
<messages>
<message name="ID_HELLO">Hello!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>
</release>
</grit>'''), util.PathFromRoot('grit/test/data'))
grd.SetOutputContext('en', {})
grd.RunGatherers(recursive=True)
grd.SetOutputContext('fr', {})
grd.RunGatherers(recursive=True)
if __name__ == '__main__':
unittest.main()
|
meego-tablet-ux/meego-app-browser
|
tools/grit/grit/node/io_unittest.py
|
Python
|
bsd-3-clause
| 2,812
|
[
"xTB"
] |
e3eb34abe20f6e1238cccf59139e6cc26b8eae25b5e8861366341c803190ef78
|
from __future__ import print_function
from hotbit import Element
from hotbit import Hotbit
import numpy as np
from ase.units import Bohr,Hartree
from hotbit import Atoms
from ase.io import read
from ase.io.trajectory import Trajectory
from box import NullCalculator
from copy import copy
from sys import stdout
import os
class RepulsiveFitting:
def __init__(self,symbol1,symbol2,r_cut,s=None,k=3,txt=None,tol=0.005):
"""
Class for fitting the short-range repulsive potential.
Fitting uses eV and Angstrom also internally, only the
output file (.par) is in Hartrees and Bohrs. The weights
used in the methods append_* are the inverse of standard
deviations (weight=1/sigma). For more details of the
approach used here, look at Pekka Koskinen and Ville Makinen,
Computational Materials Science 47, 237 (2009), page 244.
Parameters:
===========
symbol1: chemical symbol for the first element
symbol2: chemical symbol for the second element
txt: output filename or None for stdout
r_cut: the repulsion cutoff
s: smoothing parameter. If None, use s = N - np.sqrt(2*N)
where N is the number of data points.
k: order of spline for V_rep'(R), cubic by default.
Uses smaller order if not enough points to fit V_rep'(R)
tol: tolerance for distances still considered the same
Usage:
======
1. Initialize class
* rep = RepulsiveFitting('Au','Au',r_cut=3.3,s=100)
2. Collect data from structures. The data collected is points r_i
and V_rep'(r_i), that is, repulsive distance and the force.
Use the append_* methods.
* rep.append_dimer(weight=0.5,calc=calc0,R=2.49,comment='Au2')
* rep.append_energy_curve(weight=1.0,calc=calc0,
traj='dimer_curve.traj',label='DFT dimer',comment='dimer curve')
3. Given the set of points [r_i,V_rep'(r_i)], fit a spline with given order.
* fit()
Fitting will produce a spline-interpolated V_rep'(r), which is then integrated
to given spline-interpolated V_rep(r).
4. Output repulsion into a file and plot the repulsion
* rep.write_par('Au_Au_no_repulsion.par',filename='Au_Au_repulsion.par')
* rep.plot('AuAu_repulsion.pdf')
"""
self.elm1=Element(symbol1)
self.elm2=Element(symbol2)
self.sym1=self.elm1.get_symbol()
self.sym2=self.elm2.get_symbol()
self.r_cut = r_cut
self.s = s
self.k = k
self.tol = tol
self.r_dimer = None
self.deriv=[]
self.comments=''
self.scale=1.025 # scaling factor for scalable systems
self.structures = []
self.v=None
if txt==None:
self.txt=stdout
else:
self.txt=open(txt,'a')
print('Fitting repulsion curve between %s and %s' % (self.sym1, self.sym2), file=self.txt)
self.colors = ['red','green','blue','cyan','yellow','orange','magenta','pink','black']
self.colori = 0
def __call__(self,r,der=0):
"""
Return repulsion or its derivative.
This is the already fitted and integrated V_rep(R).
parameters:
===========
r: radius (Angstroms)
der: der=0 for V_rep(r)
der=1 for V_rep'(r)
"""
if self.v==None:
raise AssertionError('Repulsion is not yet fitted')
return self.v(r,der=der)
def get_range(self):
"""
Return rmin,rmax for fitting points.
"""
r = np.array([s[0] for s in self.deriv])
return r.min(), r.max()
self.deriv
def plot(self, filename=None):
"""
Plot vrep and derivative together with fit info.
parameters:
===========
filename: graphics output file name
"""
try:
import pylab as pl
except:
raise AssertionError('pylab could not be imported')
r=np.linspace(0,self.r_cut)
v=[self(x,der=0) for x in r]
vp=[self(x,der=1) for x in r]
rmin=0.95*min([d[0] for d in self.deriv])
rmax = 1.1*self.r_cut
fig=pl.figure()
pl.subplots_adjust(wspace=0.25)
# Vrep
pl.subplot(1,2,1)
pl.ylabel(r'$V_{rep}(r)$ (eV)')
pl.xlabel(r'$r$ ($\AA$)')
if self.r_dimer!=None:
pl.axvline(x=self.r_dimer,c='r',ls=':')
pl.axvline(x=self.r_cut,c='r',ls=':')
pl.plot(r,v)
pl.ylim(ymin=0,ymax=self(rmin))
pl.xlim(xmin=rmin, xmax=self.r_cut)
# Vrep'
pl.subplot(1,2,2)
pl.ylabel(r'$dV_{rep}(r)/dr$ (eV/$\AA$)')
pl.xlabel(r'$r$ ($\AA$)')
pl.plot(r,vp,label=r'$dV_{rep}(r)/dr$')
for s in self.deriv:
pl.scatter( [s[0]],[s[1]],s=100*s[2],c=s[3],label=s[4])
pl.axvline(x=self.r_cut,c='r',ls=':')
if self.r_dimer!=None:
pl.axvline(x=self.r_dimer,c='r',ls=':')
ymin = 0
for point in self.deriv:
if rmin<=point[0]<=rmax: ymin = min(ymin,point[1])
ymax = np.abs(ymin)*0.1
pl.axhline(0,ls='--',c='k')
if self.r_dimer!=None:
pl.text(self.r_dimer, ymax, r'$r_{dimer}$')
pl.text(self.r_cut, ymax, r'$r_{cut}$')
pl.xlim(xmin=rmin, xmax=rmax)
pl.ylim(ymin=ymin, ymax=ymax)
#pl.subtitle('Fitting for %s and %s' % (self.sym1, self.sym2))
pl.rc('font', size=10)
pl.rc('legend',fontsize=8)
pl.legend(loc=4)
file = '%s_%s_repulsion.pdf' % (self.sym1, self.sym2)
if filename!=None:
file=filename
pl.savefig(file)
pl.clf()
def add_comment(self,s=None):
"""
Append some comment for par-file.
These comments will end up in Hotbit's output logfile each time
fitted repulsion is used in calculations. For this reason,
use as short and concise comments as possible.
parameters:
===========
s: comment as a string
"""
if s in [None, '']:
return
add='|'
if len(self.comments)==0:
add=''
self.comments+=add+s
def fit(self):
"""
Fit spline into {r, V_rep'(r)} -data points.
"""
from scipy.interpolate import splrep, splev
self.k = min(len(self.deriv),self.k)
x = np.array([self.deriv[i][0] for i in range(len(self.deriv))])
y = np.array([self.deriv[i][1] for i in range(len(self.deriv))])
w = np.array([self.deriv[i][2] for i in range(len(self.deriv))])
# sort values so that x is in ascending order
indices = x.argsort()
x, y, w = x[indices], y[indices], w[indices]
x, y, w = self._group_closeby_points(x,y,w)
# use only points that are closer than r_cut
indices = np.where(x < self.r_cut)
x, y, w = list(x[indices]), list(y[indices]), list(w[indices])
# force the spline curve to go to zero at x=r_cut
x.append(self.r_cut)
y.append(0.0)
w.append(1E3*max(w))
if self.s == None:
# from documentation of splrep in scipy.interpolate.fitpack
self.s = len(x) - np.sqrt(2*len(x))
print("\nFitting spline for V_rep'(R) with parameters", file=self.txt)
print(" k=%i, s=%0.4f, r_cut=%0.4f\n" %(self.k, self.s, self.r_cut), file=self.txt)
tck = splrep(x, y, w, s=self.s, k=self.k)
def dv_rep(r):
return splev(r, tck)
v_rep = self._integrate_vrep(dv_rep, self.r_cut)
def potential(r, der=0):
if der == 0:
return v_rep(r)
elif der == 1:
return dv_rep(r)
else:
raise NotImplementedError("Only 0th and 1st derivatives")
self.v = potential
def _group_closeby_points(self, x, y, w):
"""
If there are many y-values with almost the same x-values,
it is impossible to make spline fit to these points.
For these points the y will be the weighted average of
the y-points and the weight is the sum of the weights of
averaged y-points.
"""
accuracy = 4 # the number of decimals to maintain
pseudo_x = np.array(x*10**accuracy, dtype=int)
groups = np.zeros(len(x), dtype=int)
g = 0
for i in range(1,len(pseudo_x)):
if pseudo_x[i] != pseudo_x[i-1]:
groups[i] = groups[i-1] + 1
else:
groups[i] = groups[i-1]
new_x = []
new_y = []
new_w = []
for g in range(max(groups)+1):
same = np.where(groups == g)
new_x.append(np.average(x[same]))
new_y.append(np.dot(y[same],w[same])/np.sum(w[same]))
new_w.append(np.sum(w[same]))
return np.array(new_x), np.array(new_y), np.array(new_w)
def write_par(self, inputpar, filename=None):
"""
Write the full par-file to file.
parameters:
===========
inputpar: the par-file where the repulsion is appended
filename: output file
"""
from time import asctime
import shutil
if filename==None:
filename = 'repulsion_'+inputpar
shutil.copy(inputpar, filename)
f = open(filename, 'a')
# add comments
print("repulsion_comment=", file=f)
print("%s\nparameters r_cut = %0.4f Ang, s = %0.4f, k = %3i" % (asctime(),self.r_cut, self.s, self.k), file=f)
if len(self.structures)>1:
print("The systems used to produce this fit:", file=f)
for data in self.structures:
print("%20s %3s" % (data['filename'], data['charge']), file=f)
if len(self.comments) > 0:
print(self.comments, file=f)
print('\n\nrepulsion=', file=f)
for r in np.linspace(0.1, self.r_cut, 100):
print(r/Bohr, self(r)/Hartree, file=f)
f.close()
def _integrate_vrep(self, dv_rep, r_cut, N=100):
"""
Integrate V'_rep(r) from r_cut to zero to get the V_rep(r)
"""
from box.interpolation import SplineFunction
from scipy.integrate import quadrature
r_g = np.linspace(r_cut, 0, N)
dr = r_g[1] - r_g[0]
v_rep = np.zeros(N)
for i in range(1,len(r_g)):
v_rep[i] = v_rep[i-1]
val, err = quadrature(dv_rep, r_g[i-1], r_g[i], tol=1.0e-12, maxiter=50)
v_rep[i] += val
# SplineFunction wants the x-values in ascending order
return SplineFunction(r_g[::-1], v_rep[::-1])
def _set_calc(self,atoms,calc):
"""
Set calculator for given atoms.
"""
if type(atoms)==type(''):
a = read(atoms)
else:
a = atoms.copy()
c = copy(calc)
a.set_calculator(c)
return a,c
def _get_color(self,color):
""" Get next color in line if color==None """
if color==None:
index = self.colori
self.colori +=1
if self.colori == len(self.colors): self.colors=0
return self.colors[index]
else:
return color
#
# Fitting methods
#
def append_point(self,weight,R,dvrep,comment=None,label=None,color='g'):
"""
Add point to vrep'-fitting.
parameters:
===========
weight: fitting weight
R: radius (Angstroms)
dvrep: V_rep'(R) (eV/Angstroms)
comment: fitting comment for par file (replaced by label if None)
label: plotting label (replaced by comment if None)
"""
if comment==None: comment=label
if label==None: label=comment
self.deriv.append([R,dvrep,weight,color,label])
if comment!='_nolegend_':
self.add_comment(comment)
return R,dvrep,weight
def append_scalable_system(self,weight,calc,atoms,comment=None,label=None,color=None):
"""
Use scalable equilibrium system in repulsion fitting.
Scalable means that atoms is an equilibrium system, which
has only given bond lengths R, and whose dimensions can be
scaled E_DFT(R), and, because of equilibrium, E_DFT'(R)=0.
Hence
E_DFT'(R) = E_wr'(R) + N*V_rep'(R) = 0
==> V_rep'(R) = -E_wr'(R)/N
where E_wr = E_bs + E_coul is the DFTB energy without
repulsion.
parameters:
===========
weight: fitting weight
calc: Hotbit calculator (remember charge and k-points)
atoms: filename or ase.Atoms instance
comment: fitting comment for par file (replaced by label if None)
label: plotting label (replaced by comment if None)
color: plotting color
"""
atoms, calc = self._set_calc(atoms,calc)
if comment==None: comment=label
if label==None: label=comment
e1 = atoms.get_potential_energy()
R, N = self._get_repulsion_distances(calc)
atoms.set_cell( atoms.get_cell()*self.scale, scale_atoms=True )
e2 = atoms.get_potential_energy()
dEwr=(e2-e1)/(self.scale*R-R)
color = self._get_color(color)
comment += ';w=%.1f' %weight
self.append_point(weight,R,-dEwr/N,comment,label,color)
print('\nAdding a scalable system %s with %i bonds at R=%.4f.' %(atoms.get_chemical_symbols(),N,R), file=self.txt)
def append_dimer(self,weight,calc,R,comment=None,label='dimer',color=None):
"""
Use dimer bond length in fitting.
parameters:
===========
weight: fitting weight
calc: Hotbit calculator used in calculation
(remember Gamma-point and charge)
R: dimer bond length (Angstroms)
comment: fitting comment for par-file (replaced by label if None)
label: plotting label (replaced by comment if None)
color: plotting color
"""
if comment==None: comment=label
self.r_dimer = R
atoms = Atoms([self.sym1,self.sym2],[(0,0,0),(R,0,0)],pbc=False)
atoms.center(vacuum=5)
color = self._get_color(color)
self.append_scalable_system(weight,calc,atoms,comment=comment,label=label,color=color)
def append_equilibrium_trajectory(self,weight,calc,traj,comment=None,label=None,color=None):
"""
Calculates the V'rep(r) from a given equilibrium trajectory.
The trajectory is set of three (or more, albeit not necessary) frames
where atoms move near their equilibrium structure. To first approximation,
the energies of these frames ARE THE SAME. This method is then
equivalent to append_energy_curve method for given trajectory, with a flat
energy curve.
* Atoms should move as parallel to the fitted bonds as possible.
* Amplitude should be small enough (say, 0.01 Angstroms)
parameters:
===========
weight: fitting weight
calc: Hotbit calculator (remember charge and k-points)
traj: filename for ASE trajectory (energies need not be defined)
comment: fitting comment for par-file (replaced by comment if None)
label: plotting label (replaced by comment if None)
color: plotting color
"""
traj1 = Trajectory(traj)
atoms2 = traj1[0].copy()
calc2 = NullCalculator()
atoms2.set_calculator(calc2)
tmpfile = '_tmp.traj'
traj2 = Trajectory(tmpfile,'w',atoms2)
for atoms1 in traj1:
atoms2.set_positions(atoms1.get_positions())
atoms2.set_cell( atoms1.get_cell() )
atoms2.get_potential_energy()
traj2.write()
traj2.close()
self.append_energy_curve(weight,calc,tmpfile,comment,label,color)
os.remove(tmpfile)
if os.path.isfile(tmpfile+'.bak'):
os.remove(tmpfile+'.bak')
def append_energy_slope(self,weight,p,dEdp,p0,calc,traj,comment=None,label=None,color=None):
"""
Calculates the V'rep(r) at one point using trajectory over parameters p.
Trajectory is calculated using parameters p, giving E(p), where E is the total energy
without Vrep(r). The pair distance R=R(p). At p=p0, we set dE/dp|p=p0=dEdp, from which
we can set V'rep(R(p)) as
dEdp - dE/dp(p0)
V'rep(r) = -------------------
N * dR/dp
parameters:
===========
weight: fitting weight
p: parameter list
dEdp: slope of energy at p0
p0: the point where energy slope is set
calc: Hotbit calculator (remember charge and k-points)
traj: filename for ASE trajectory, or Trajectory
object itself
comment: fitting comment for par-file (replaced by comment if None)
label: plotting label (replaced by comment if None)
color: plotting color
"""
raise NotImplementedError('This method was never tested properly.')
from box.interpolation import SplineFunction
R, E, N = [], [], []
for atoms in traj:
a, c = self._set_calc(atoms,calc)
e = a.get_potential_energy()
r, n = self._get_repulsion_distances(c)
if n>0 and r<self.r_cut:
E.append( atoms.get_potential_energy() )
R.append(r)
N.append(n)
R,E,N = np.array(R), np.array(E), np.array(N)
if np.any(N[0]!=N):
raise NotImplementedError('The number of bonds changes during trajectory; check implementation.')
Ef = SplineFunction(p,E)
Rf = SplineFunction(p,R)
color = self._get_color(color)
comment += ';w=%.1f;N=%i' %(weight,N[0])
return self.append_point(weight,Rf(p0),(dEdp-Ef(p0,der=1))/(N[0]*Rf(p0,der=1)),comment,label,color)
def append_energy_curve(self,weight,calc,traj,comment=None,label=None,color=None):
"""
Calculates the V'rep(r) from a given ase-trajectory.
The trajectory can be anything, as long as the ONLY missing energy
from DFTB calculation is N*V_rep(R). Hence
E_DFT(R) = E_wr(R) + N*V_rep(R)
E_DFT'(R) - E_wr'(R)
V_rep'(R) = ------------------ ,
N
where R is the nn. distance,N is the number of A-B pairs taken into account,
and E_wr(R) = E_bs(R) + E_coul(R) is the DFTB energy without repulsion.
At least 3 points in energy curve needed, preferably more.
parameters:
===========
weight: fitting weight
calc: Hotbit calculator (remember charge and k-points)
traj: filename for ASE trajectory, or Trajectory
object itself
comment: fitting comment for par-file (replaced by comment if None)
label: plotting label (replaced by comment if None)
color: plotting color
"""
if comment==None: comment=label
if label==None: label=comment
if not ( isinstance(traj, type(Trajectory)) or isinstance(traj, list) ):
print("\nAppending energy curve data from %s..." %traj, file=self.txt)
traj = Trajectory(traj)
else:
print('\nAppending energy curve data...', file=self.txt)
Edft, Ewr, N, R = [], [], [], []
if len(traj)<3:
raise AssertionError('At least 3 points in energy curve required.')
for atoms in traj:
a, c = self._set_calc(atoms,calc)
e = a.get_potential_energy()
r, n = self._get_repulsion_distances(c)
if n>0 and r<self.r_cut:
Edft.append( atoms.get_potential_energy() )
Ewr.append( e )
R.append(r)
N.append(n)
Edft = np.array(Edft)
Ewr = np.array(Ewr)
N = np.array(N)
R = np.array(R)
if np.any( N-N[0]!=0 ):
raise RuntimeError('The number of bonds changes within trajectory.')
# sort radii because of spline
ind = R.argsort()
R = R[ind]
Edft = Edft[ind]
Ewr = Ewr[ind]
from box.interpolation import SplineFunction
k = min(len(Edft)-2,3)
vrep = SplineFunction(R, (Edft-Ewr)/N, k=k, s=0)
color = self._get_color(color)
for i, r in enumerate(R):
if i==0:
com = comment + ';w=%.1f' %weight
else:
label='_nolegend_'
com = None
self.append_point(weight/np.sqrt(len(R)),r, vrep(r,der=1), com, label, color)
print("Appended %i points around R=%.4f...%.4f" %(len(N),R.min(),R.max()), file=self.txt)
def append_homogeneous_cluster(self,weight,calc,atoms,comment='',label='',color=None):
"""
Use homonuclear cluster in fitting, even with different bond lengths.
Construct repulsive forces so that residual forces F_DFT-(F_wr+F_rep),
where F_DFT are DFT forces (zero if cluster in equilibrium), F_wr are
DFTB forces without repulsion, and F_rep are the repulsive forces.
That is, we minimize the function
sum_i |F_DFT_i - F_WR_i - F_rep_i|^2
with respect a and b, where V_rep'(R) = a + b*(r-r_cut). Then, add fitting points
from rmin to rmax, where these values span all pair distances below r_cut
within the cluster.
Only finite, non-periodic systems can be used.
parameters:
===========
weight: fitting weight
calc: Hotbit calculator (remember charge and no k-points)
atoms: filename or ASE.Atoms instance
comment: fitting comment for par-file (replaced by label if None)
label: plotting label (replaced by comment if None)
color: plotting color
"""
import numpy as np
if type(atoms)==type(''):
atoms = read(atoms)
N = len(atoms)
try:
f_DFT = atoms.get_forces()
print(" Use forces", file=self.txt)
except:
f_DFT = np.zeros((N,3))
print(" No forces (equilibrium cluster)", file=self.txt)
atoms, calc = self._set_calc(atoms,calc)
print("\nAppending homogeneous cluster.", file=self.txt)
f_wr = atoms.get_forces()
distances = calc.rep.get_repulsion_distances(self.sym1,self.sym2,self.r_cut)
rmin, rmax = distances.min(), distances.max()
def dvrep(r,p):
""" Auxiliary first-order polynomial for repulsion derivative """
return p[0]+p[1]*(r-self.r_cut)
def to_minimize(p,atoms,fdft,fwr):
""" Function sum_I |F_DFT_I - F_TB_I|^2 to minimize. """
N = len(atoms)
pos = atoms.get_positions()
resid = np.zeros((N,3))
frep = np.zeros((N,3))
for i in range(N):
for j in range(N):
if i==j: continue
rij = pos[j]-pos[i]
dij = np.linalg.norm(rij)
if dij>self.r_cut:
continue
else:
frep[i] += dvrep(dij,p)*rij/dij
resid = fdft - ( fwr + frep )
return sum([ np.linalg.norm(resid[i])**2 for i in range(N) ])
from scipy.optimize import fmin
p = fmin( to_minimize,[-1.0,5.0],args=(atoms,f_DFT,f_wr),xtol=1E-5,ftol=1E-5 )
print(' Cluster: V_rep(R)=%.6f + %.6f (r-%.2f)' %(p[0],p[1],self.r_cut), file=self.txt)
color = self._get_color(color)
npp = 6
rlist = np.linspace(rmin,rmax,npp)
for i,r in enumerate(rlist):
if i==0:
com = comment
com += ';w=%.1f' %weight
else:
label = '_nolegend_'
com = None
self.append_point(weight/np.sqrt(npp), r, dvrep(r,p), com, label, color)
def _get_repulsion_distances(self,calc):
"""
Return distances below r_cut for given system in calculator.
return:
=======
R: the mean repulsion distance
N: number of bonds
"""
distances = calc.rep.get_repulsion_distances(self.sym1,self.sym2,self.r_cut)
if len(distances)==0:
return 0.0,distances
R = distances.mean()
rmin, rmax = distances.min(), distances.max()
if rmax - rmin > self.tol:
atoms = calc.get_atoms()
raise AssertionError('Bond lengths in are not the same, they vary between %.6f ... %.6f' %(rmin,rmax) )
N = len(distances)
return R,N
def write_fitting_data(self, filename, pickle=True):
f = open(filename,'wb')
if pickle:
import pickle
pickle.dump(self.deriv, f)
pickle.dump(self.structures, f)
pickle.dump(self.comments, f)
else:
print(self.deriv, file=f)
print(self.structures, file=f)
print(self.comments, file=f)
f.close()
def load_fitting_data(self, filename):
import pickle
f = open(filename,'rb')
self.deriv = pickle.load(f)
self.structures = pickle.load(f)
self.comments = pickle.load(f)
f.close()
def _get_trajs_for_fitting(self):
return self.structures
class ParametrizationTest:
"""
A tool to examine how well your parametrization agrees with
given ase-trajectories.
trajectories: list of trajectories you want to compare
charges: the charges of the systems in trajectories
"""
def __init__(self, rf, pars):
from copy import copy
from hotbit import Hotbit
self.pars = pars
self.trajectories = []
self.calculators = []
for data in rf._get_trajs_for_fitting():
filename = data['filename']
del data['filename']
c = Hotbit()
for key, value in data.items():
c.__dict__[key] = data[key]
self.trajectories.append(filename)
self.calculators.append(c)
self.points = []
self.ref_points = []
self.colors = ['cyan','red','orange','#8DEE1E','magenta','green','black']
def norm_to_isolated_atoms(self, atoms):
"""
Return the constant that can be used to calculate
the binding energy of the system.
"""
delta_E = 0
for atom in atoms:
delta_E -= self.E_free[atom.symbol]
return delta_E
def get_isolated_energies(self, trajs, par):
"""
Return the energies of an isolated atoms.
"""
elements = []
energies = {}
for t in trajs:
traj = Trajectory(t)
for atom in traj[0]:
if not atom.symbol in elements:
elements.append(atom.symbol)
el1, el2 = par.split("_")[0:2]
for el in elements:
ss = "%s%s" % (el, el)
if el1 == el2 and el1 == el:
tables = {ss:par, 'rest':'default'}
calc = Hotbit(SCC=True, tables=tables)
else:
calc = Hotbit(SCC=True)
atoms = Atoms(ss, ((0,0,0),(200,0,0)))
atoms.center(vacuum=100)
atoms.set_calculator(calc)
energies[el] = atoms.get_potential_energy() / 2
return energies
def compare(self):
"""
Make a comparison for all the systems.
"""
for i_par in range(len(self.pars)):
self.compare_with_par(i_par)
def compare_with_par(self, i_par):
"""
Make a comparison to all trajectories with given parameter-file.
The i_par is the index to the self.pars.
"""
import pylab as pl
par = self.pars[i_par]
self.E_free = self.get_isolated_energies(self.trajectories, par)
temp = par.split('_')
symbols = "%s%s" % (temp[0],temp[1])
tables = {symbols:par, 'rest':'default'}
for i_traj, calc in zip(list(range(len(self.trajectories))), self.calculators):
pl.figure(i_traj)
pl.title(self.trajectories[i_traj])
if i_par == 0:
self.plot_ref(i_traj)
self.compare_trajectory(i_traj, calc, tables, i_par)
def compare_trajectory(self, i_traj, calc, tables, i_par):
"""
Calculate the energies for the frames in the trajectory
and plot them.
"""
frames = []
energies = []
trajectory = Trajectory(self.trajectories[i_traj])
for i, image in enumerate(trajectory):
e_tb = None
try:
atoms = Atoms(image)
c = copy(calc)
c.tables = tables
atoms.set_calculator(c)
e_tb = atoms.get_potential_energy()
except Exception as ex:
print(ex, file=self.txt)
if e_tb != None:
energies.append(e_tb)
frames.append(i)
delta_E = self.norm_to_isolated_atoms(trajectory[0])
for i in range(len(energies)):
energies[i] += delta_E
self.plot(frames, energies, i_traj, tables, i_par)
def plot_ref(self, i_traj):
"""
Plot the energies of a given trajectory as a function
of the frame number.
"""
import pylab as pl
e_dft = []
traj = Trajectory(self.trajectories[i_traj])
for image in traj:
e_dft.append(image.get_total_energy())
pl.plot(e_dft, c='blue', label='DFT-energies')
def plot(self, frames, points, i_traj, tables, i_par):
import pylab as pl
par = self.pars[i_par]
color = self.colors[i_par % len(self.colors)]
pl.plot(frames, points, c=color, label='TB-%s' % par)
pl.xlabel('frame #')
pl.ylabel('Energy (eV)')
pl.legend()
def run(self):
"""
Make all the comparisons with given trajectories and parameter
files and show the results.
"""
import pylab as pl
self.compare()
pl.show()
|
pekkosk/hotbit
|
hotbit/parametrization/fitting.py
|
Python
|
gpl-2.0
| 31,099
|
[
"ASE"
] |
8fecb949679705e490f10914d08f00076d11b17287d613465a49db824e8e6155
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
from unittest import TestCase
from exatomic.gaussian.editor import Editor
class TestEditor(TestCase):
"""Tests that metadata is set appropriately for Gaussian editors."""
def test_no_meta(self):
"""Test that program metadata is set by default."""
fl = Editor('', ignore=True)
self.assertTrue(fl.meta['program'] == 'gaussian')
def test_with_meta(self):
"""Test that passed metadata is respected and program is set."""
fl = Editor('', meta={'meta': 'data'}, ignore=True)
self.assertEqual(fl.meta['meta'], 'data')
self.assertEqual(fl.meta['program'], 'gaussian')
|
exa-analytics/atomic
|
exatomic/gaussian/tests/test_editor.py
|
Python
|
apache-2.0
| 770
|
[
"Gaussian"
] |
07ac40945e8bcbec2d1c5d416eecaa7f9fcca1689275d3cab9a602b19afebb9c
|
# Generated from ABS.g4 by ANTLR 4.7
from antlr4 import *
# This class defines a complete generic visitor for a parse tree produced by ABSParser.
class ABSVisitor(ParseTreeVisitor):
# Visit a parse tree produced by ABSParser#qualified_type_identifier.
def visitQualified_type_identifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#qualified_identifier.
def visitQualified_identifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#any_identifier.
def visitAny_identifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#type_use.
def visitType_use(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#type_exp.
def visitType_exp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#paramlist.
def visitParamlist(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#param_decl.
def visitParam_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#interface_name.
def visitInterface_name(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#delta_id.
def visitDelta_id(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#EffExp.
def visitEffExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#PureExp.
def visitPureExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#GetExp.
def visitGetExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#NewExp.
def visitNewExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AsyncCallExp.
def visitAsyncCallExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#SyncCallExp.
def visitSyncCallExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#OriginalCallExp.
def visitOriginalCallExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ConstructorExp.
def visitConstructorExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#FunctionExp.
def visitFunctionExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AndExp.
def visitAndExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#GreaterExp.
def visitGreaterExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#MultExp.
def visitMultExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#VarOrFieldExp.
def visitVarOrFieldExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#StringExp.
def visitStringExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#CaseExp.
def visitCaseExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AddExp.
def visitAddExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#NullExp.
def visitNullExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#EqualExp.
def visitEqualExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#VariadicFunctionExp.
def visitVariadicFunctionExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#IfExp.
def visitIfExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#OrExp.
def visitOrExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ParenExp.
def visitParenExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#LetExp.
def visitLetExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#UnaryExp.
def visitUnaryExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#IntExp.
def visitIntExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ThisExp.
def visitThisExp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#casebranch.
def visitCasebranch(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#UnderscorePattern.
def visitUnderscorePattern(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#IntPattern.
def visitIntPattern(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#StringPattern.
def visitStringPattern(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#VarPattern.
def visitVarPattern(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ConstructorPattern.
def visitConstructorPattern(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#var_or_field_ref.
def visitVar_or_field_ref(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#pure_exp_list.
def visitPure_exp_list(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#list_literal.
def visitList_literal(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#annotation.
def visitAnnotation(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#VardeclStmt.
def visitVardeclStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AssignStmt.
def visitAssignStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#SkipStmt.
def visitSkipStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ReturnStmt.
def visitReturnStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AssertStmt.
def visitAssertStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#BlockStmt.
def visitBlockStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#IfStmt.
def visitIfStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#WhileStmt.
def visitWhileStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#TryCatchFinallyStmt.
def visitTryCatchFinallyStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AwaitStmt.
def visitAwaitStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#SuspendStmt.
def visitSuspendStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DurationStmt.
def visitDurationStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ThrowStmt.
def visitThrowStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DieStmt.
def visitDieStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#MoveCogToStmt.
def visitMoveCogToStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ExpStmt.
def visitExpStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#CaseStmt.
def visitCaseStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ExpGuard.
def visitExpGuard(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AndGuard.
def visitAndGuard(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ClaimGuard.
def visitClaimGuard(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DurationGuard.
def visitDurationGuard(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#casestmtbranch.
def visitCasestmtbranch(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#datatype_decl.
def visitDatatype_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#data_constructor.
def visitData_constructor(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#data_constructor_arg.
def visitData_constructor_arg(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#typesyn_decl.
def visitTypesyn_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#exception_decl.
def visitException_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#function_decl.
def visitFunction_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#interface_decl.
def visitInterface_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#methodsig.
def visitMethodsig(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#class_decl.
def visitClass_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#field_decl.
def visitField_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#method.
def visitMethod(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#module_decl.
def visitModule_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#module_export.
def visitModule_export(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#module_import.
def visitModule_import(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#decl.
def visitDecl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#delta_decl.
def visitDelta_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaFieldParam.
def visitDeltaFieldParam(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaClassParam.
def visitDeltaClassParam(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaHasFieldCondition.
def visitDeltaHasFieldCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaHasMethodCondition.
def visitDeltaHasMethodCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaHasInterfaceCondition.
def visitDeltaHasInterfaceCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#delta_access.
def visitDelta_access(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#module_modifier.
def visitModule_modifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddFunctionModifier.
def visitDeltaAddFunctionModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddDataTypeModifier.
def visitDeltaAddDataTypeModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddTypeSynModifier.
def visitDeltaAddTypeSynModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaModifyTypeSynModifier.
def visitDeltaModifyTypeSynModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaModifyDataTypeModifier.
def visitDeltaModifyDataTypeModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddClassModifier.
def visitDeltaAddClassModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaRemoveClassModifier.
def visitDeltaRemoveClassModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaModifyClassModifier.
def visitDeltaModifyClassModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddInterfaceModifier.
def visitDeltaAddInterfaceModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaRemoveInterfaceModifier.
def visitDeltaRemoveInterfaceModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaModifyInterfaceModifier.
def visitDeltaModifyInterfaceModifier(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddFieldFragment.
def visitDeltaAddFieldFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaRemoveFieldFragment.
def visitDeltaRemoveFieldFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddMethodFragment.
def visitDeltaAddMethodFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaModifyMethodFragment.
def visitDeltaModifyMethodFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaRemoveMethodFragment.
def visitDeltaRemoveMethodFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddMethodsigFragment.
def visitDeltaAddMethodsigFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaRemoveMethodsigFragment.
def visitDeltaRemoveMethodsigFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddModuleImportFragment.
def visitDeltaAddModuleImportFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#DeltaAddModuleExportFragment.
def visitDeltaAddModuleExportFragment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#UpdateDecl.
def visitUpdateDecl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ObjectUpdateDecl.
def visitObjectUpdateDecl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ObjectUpdateAssignStmt.
def visitObjectUpdateAssignStmt(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#UpdatePreambleDecl.
def visitUpdatePreambleDecl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#productline_decl.
def visitProductline_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#feature.
def visitFeature(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#delta_clause.
def visitDelta_clause(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#deltaspec.
def visitDeltaspec(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#FIDAIDDeltaspecParam.
def visitFIDAIDDeltaspecParam(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#IntDeltaspecParam.
def visitIntDeltaspecParam(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#BoolOrIDDeltaspecParam.
def visitBoolOrIDDeltaspecParam(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#after_condition.
def visitAfter_condition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#from_condition.
def visitFrom_condition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#when_condition.
def visitWhen_condition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#FeatureApplicationCondition.
def visitFeatureApplicationCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#AndApplicationCondition.
def visitAndApplicationCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#ParenApplicationCondition.
def visitParenApplicationCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#NotApplicationCondition.
def visitNotApplicationCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#OrApplicationCondition.
def visitOrApplicationCondition(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#attr_assignment.
def visitAttr_assignment(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#product_decl.
def visitProduct_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#product_reconfiguration.
def visitProduct_reconfiguration(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#fextension.
def visitFextension(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#feature_decl.
def visitFeature_decl(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#feature_decl_group.
def visitFeature_decl_group(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#fnode.
def visitFnode(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#feature_decl_attribute.
def visitFeature_decl_attribute(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#FeatureDeclConstraintIfIn.
def visitFeatureDeclConstraintIfIn(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#FeatureDeclConstraintIfOut.
def visitFeatureDeclConstraintIfOut(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#FeatureDeclConstraintExclude.
def visitFeatureDeclConstraintExclude(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#FeatureDeclConstraintRequire.
def visitFeatureDeclConstraintRequire(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#mexp.
def visitMexp(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#boundary_int.
def visitBoundary_int(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#boundary_val.
def visitBoundary_val(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#main_block.
def visitMain_block(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#compilation_unit.
def visitCompilation_unit(self, ctx):
return self.visitChildren(ctx)
# Visit a parse tree produced by ABSParser#goal.
def visitGoal(self, ctx):
return self.visitChildren(ctx)
|
jacopoMauro/abs_deployer
|
ABS/ABSVisitor.py
|
Python
|
isc
| 21,479
|
[
"VisIt"
] |
8c29aff3c13e6db44e050996bda5b74668ff86cb5ebb4648b5342bec9360d7e6
|
# -*- coding: utf-8 -*-
# Copyright 2009-2016 Odin Hørthe Omdal
# This file is part of Medlemssys.
# Medlemssys is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Medlemssys is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Medlemssys. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import django.contrib.auth.views
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
import medlemssys.api.urls as api_urls
import medlemssys.giro.admin_views as giro_admin
import medlemssys.innhenting.views as innhenting
import medlemssys.medlem.lokallag as lokallag
import medlemssys.medlem.views as medlem
import medlemssys.statistikk.admin_views as statistikk_admin
import medlemssys.statistikk.views as statistikk
urlpatterns = [
url(r'^$',
TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^robots.txt$',
TemplateView.as_view(template_name='pages/robots.txt'), name='robots'),
url(r'^admin/innhenting/import_ocr/',
innhenting.import_ocr, name='import_ocr'),
url(r'^admin/medlem/giro/send/', giro_admin.send, name='giro_send'),
url(r'^admin/medlem/giro/manual/',
giro_admin.manual_girosearch, name='giro_manual'),
url(r'^admin/medlem/giro/gaaver/', giro_admin.gaaver, name='giro_gaaver'),
url(r'^admin/revisions/',
statistikk_admin.show_revisions, name='show_revisions'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
url(r'^accounts/login/$',
django.contrib.auth.views.login, { 'template_name': 'login.html' }),
url(r'^accounts/logout/$',
django.contrib.auth.views.logout, { 'next_page': '/' }),
url(r'^medlem/opprett', medlem.create_medlem),
url(r'^medlem/(?P<id>\d+)/endra/(?P<nykel>\w+)',
medlem.edit_medlem, name='medlem_edit'),
url(r'^medlem/ringjelister', medlem.ringjelister),
url(r'^lokallag/$', lokallag.home),
url(r'^lokallag/(?P<slug>[-\w]+)/$',
lokallag.lokallag_home, name='lokallag_home'),
url(r'^takk', TemplateView.as_view(template_name='takk.html')),
url(r'^stats/vervetopp/', statistikk.vervetopp),
url(r'^stats/vervometer/', statistikk.vervometer),
url(r'^member/search/', medlem.search, name='search'),
url(r'^api/get_members.json',
medlem.get_members_json, name='get_members_json'),
url(r'^api/', include(api_urls), name='api-root'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
except:
pass
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request,
kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied,
kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found,
kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
Velmont/medlemssys
|
medlemssys/config/urls.py
|
Python
|
agpl-3.0
| 3,909
|
[
"VisIt"
] |
998da8cdd38f51862cec1d5699e83e5e8d6ba0a3137836004185feb03d6480e7
|
import sys
sys.path.append('../CGvsPhoto')
import image_loader as il
from dsift import DsiftExtractor
# import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# from sklearn.neural_network import MLPClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
from sklearn.mixture import GaussianMixture
from sklearn.calibration import CalibratedClassifierCV
from multiprocessing import Pool
from functools import partial
import pickle
import random
def dump_model(model, path):
pickle.dump(model, open(path, 'wb'))
def load_model(path):
return(pickle.load(open(path, 'rb')))
def compute_fisher(X, gmm, alpha = 0.5):
weights = gmm.weights_
means = gmm.means_
covars = gmm.covariances_
K = weights.shape[0]
N = X.shape[0]
F = X.shape[1]
T = X.shape[2]
G = np.empty([N, 2*F*K])
gamma = np.empty([N,K,T])
for t in range(T):
gamma[:,:,t] = gmm.predict_proba(X[:,:,t])
for i in range(K):
shifted_X = (X - np.reshape(means[i],[1,F,1]))/np.reshape(covars[i], [1,F,1])
G_mu = np.sum(shifted_X*gamma[:,i:i+1, :]/(T*np.sqrt(weights[i])), axis = 2)
G_sig = np.sum(gamma[:,i:i+1, :]*(shifted_X**2 - 1)/(T*np.sqrt(2*weights[i])), axis = 2)
G[:, 2*i*F:2*(i+1)*F] = np.concatenate([G_mu, G_sig], axis = 1)
# del(G_mu, G_sig, shifted_X, gamma)
# Power normalization
G = np.sign(G)*np.power(np.abs(G), alpha)
# L2 normalization
G = G/np.reshape(np.sqrt(np.sum(G**2, axis = 1)), [N,1])
return(G)
def compute_dense_sift(data, i, batch_size, nb_mini_patch,
nb_batch, only_green = False, verbose = False):
extractor1 = DsiftExtractor(8,16,1)
extractor2 = DsiftExtractor(16,32,1)
if verbose:
print('Compute features for batch ' + str(i+1) + '/' + str(nb_batch))
features = []
for j in range(len(data)):
img = (data[j]*256).astype(np.uint8)
if not only_green:
img = np.dot(img, [0.299, 0.587, 0.114])
feaArr1,positions = extractor1.process_image(img, verbose = False)
feaArr2,positions = extractor2.process_image(img, verbose = False)
features.append(np.concatenate([feaArr1, feaArr2]).reshape([128, nb_mini_patch]))
return(features)
def gradient(W, diff, y, b):
d = np.linalg.multi_dot([np.transpose(diff), np.transpose(W), W, diff])
# print(d)
if y*(b-d) <= 1:
grad = y*W.dot(np.outer(diff, diff))
# print('Updating...')
updated = True
else:
grad = 0
updated = False
cost = max(1 - y*(b-d), 0)
return(grad, cost, updated)
def updated_W(W, phi1, phi2, y, index, b, lr, grad_mem):
diff = phi1 - phi2
grad, cost, updated = gradient(W, diff, y, b)
grad_mem[index] = grad
new_W = W - lr*sum(grad_mem.values())/len(grad_mem.values())
return(new_W, cost, updated, grad_mem)
def sample_couple(X, y):
indexes = random.sample(list(range(X.shape[0])), 2)
phi1 = X[indexes[0]]
phi2 = X[indexes[1]]
y_i = 4*(y[indexes[0]] - 0.5)*(y[indexes[1]] - 0.5)
index = indexes[0]*X.shape[0] + indexes[1]
# print(y_i)
return(phi1, phi2, y_i, index)
class Projection:
def __init__(self, red_dim = 128, treshold = 1.2, learning_rate = 0.01,
initialization = 'random'):
self.red_dim = red_dim
self.b = treshold
self.lr = learning_rate
self.init = initialization
def train(self, X, y, nb_iter = 10000):
self.nb_features = X.shape[1]
print('Initialization...')
if self.init == 'random':
self.W = np.random.rand(self.red_dim, self.nb_features)
if self.init == 'PCA':
pca = PCA(n_components = self.red_dim, whiten = True)
pca.fit(X)
self.W = pca.components_
grad_mem = dict()
cost = 0
nb_updated = 0
for i in range(nb_iter):
phi1, phi2, y_i, index = sample_couple(X, y)
new_W, current_cost, updated, grad_mem = updated_W(self.W, phi1, phi2, y_i, index, self.b, self.lr, grad_mem)
if updated:
nb_updated += 1
cost += current_cost
self.W = new_W
if (i+1)%100 == 0:
print('Cost on 100 examples for iteration ' + str(i+1) + ' : ' + str(cost/100))
print('Number of updates on 100 examples for iteration ' + str(i+1) + ' : ' + str(nb_updated))
cost = 0
nb_updated = 0
def project(self, X):
return(np.transpose(self.W.dot(np.transpose(X))))
class Texture_model:
def __init__(self, data_directory, model_directory, dump_data_directory, image_size,
keep_PCA = 64, K_gmm = 64, only_green = False, verbose = True):
self.model_name = input(" Choose a name for the model : ")
self.model_directory = model_directory
self.dump_data_directory = dump_data_directory
# Initialize hyper-parameters
self.image_size = image_size
self.keep_PCA = keep_PCA
self.K_gmm = K_gmm
self.only_green = only_green
self.nb_mini_patch = int(image_size/8 - 1)**2 + int(image_size/16 - 1)**2
self.verbose = verbose
# Initialize database
self.data = il.Database_loader(directory = data_directory,
size = image_size,
only_green = only_green)
# Initialize classifiers
self.PCAs = []
for i in range(self.nb_mini_patch):
self.PCAs.append(PCA(n_components=keep_PCA))
self.gmm = GaussianMixture(n_components=K_gmm,
covariance_type='diag')
self.clf_svm = CalibratedClassifierCV(LinearSVC())
self.projector = Projection(red_dim = 128, treshold = 1.2,
learning_rate = 0.001,
initialization = 'PCA')
def train(self, nb_train_batch, batch_size = 50,
save_fisher = False, fisher_data_name = None):
if fisher_data_name == None:
features = np.empty([nb_train_batch*batch_size, 128, self.nb_mini_patch])
y_train = np.empty([nb_train_batch*batch_size, ])
print('Training...')
data_train = []
y_train_batch = []
for i in range(nb_train_batch):
if self.verbose:
print('Getting batch ' + str(i+1) + '/' + str(nb_train_batch))
images_batch, y_batch = self.data.get_next_train_batch(batch_size = batch_size,
crop = False)
data_train.append(images_batch)
y_train_batch.append(y_batch)
pool = Pool()
to_compute = [i for i in range(nb_train_batch)]
result = pool.starmap(partial(compute_dense_sift,
batch_size = batch_size,
nb_mini_patch = self.nb_mini_patch,
nb_batch = nb_train_batch,
only_green = self.only_green,
verbose = self.verbose),
zip(data_train, to_compute))
del(data_train)
index = 0
for i in range(len(result)):
features[index:index+batch_size] = result[i]
y_train[index:index+batch_size] = y_train_batch[i][:,1]
index+=batch_size
del(result)
# print(y_train)
# for i in range(nb_mini_patch):
# # normalize(features[:,:,i])
for i in range(self.nb_mini_patch):
if self.verbose:
print('Fitting PCAs ' + str(i+1) + '/' + str(self.nb_mini_patch))
self.PCAs[i].fit(features[:,:,i])
# pca.fit(np.concatenate([features[:,:,i] for i in range(nb_mini_patch)]))
if self.verbose:
print('Dimension reduction...')
features_PCA = np.empty([nb_train_batch*batch_size, self.keep_PCA, self.nb_mini_patch])
for i in range(self.nb_mini_patch):
# features_PCA[:,:,i] = pca.transform(features[:,:,i])
features_PCA[:,:,i] = self.PCAs[i].transform(features[:,:,i])
del(features)
if self.verbose:
print('Fitting Gaussian Mixture Model...')
self.gmm.fit(np.reshape(features_PCA,
[features_PCA.shape[0]*self.nb_mini_patch,
self.keep_PCA]))
if self.verbose:
print('Computing Fisher vectors...')
fisher_train = compute_fisher(features_PCA, self.gmm)
del(features_PCA)
if save_fisher:
dump_name = input('Name of the dump file : ')
pickle.dump([fisher_train, y_train], open(self.dump_data_directory + '/' + dump_name + '.pkl', 'wb'))
# Plotting boxplot
# for i in range(fisher_train.shape[1]):
# print('Computing dataframe...')
# data_real = fisher_train[y_train == 0, i]
# data_cg = fisher_train[y_train == 1, i]
# print('Plotting boxplot...')
# plt.figure()
# plt.boxplot([data_real, data_cg])
# plt.show()
else:
data = pickle.load(open(self.dump_data_directory + '/' + fisher_data_name + '.pkl', 'rb'))
fisher_train = data[0]
y_train = data[1]
if self.verbose:
print('Fitting Projection...')
self.projector.train(fisher_train[:1000], y_train[:1000], nb_iter = 10000)
if self.verbose:
print('Projection...')
fisher_train = self.projector.project(fisher_train)
if self.verbose:
print('Fitting SVM...')
self.clf_svm.fit(fisher_train, y_train)
# clf.fit(np.reshape(features_PCA, [nb_train_batch*batch_size, n_comp*nb_mini_patch]), y_train)
# print('Fitting MLP...')
# clf_mlp.fit(fisher_train, y_train)
del(fisher_train, y_train)
if self.verbose:
print('Dumping model...')
dump_model(self, self.model_directory + '/' + self.model_name + '.pkl')
def test(self, nb_test_batch, batch_size = 50):
print('Testing...')
features_test = np.empty([nb_test_batch*batch_size, 128, self.nb_mini_patch])
y_test = np.empty([nb_test_batch*batch_size, ])
data_test = []
y_test_batch = []
for i in range(nb_test_batch):
if self.verbose:
print('Getting batch ' + str(i+1) + '/' + str(nb_test_batch))
images_batch, y_batch = self.data.get_batch_test(batch_size = batch_size,
crop = False)
data_test.append(images_batch)
y_test_batch.append(y_batch)
pool = Pool()
to_compute = [i for i in range(nb_test_batch)]
result = pool.starmap(partial(compute_dense_sift,
batch_size = batch_size,
nb_mini_patch = self.nb_mini_patch,
nb_batch = nb_test_batch,
only_green = self.only_green,
verbose = self.verbose),
zip(data_test, to_compute))
del(data_test)
index = 0
for i in range(len(result)):
features_test[index:index+batch_size] = result[i]
y_test[index:index+batch_size] = y_test_batch[i][:,1]
index+=batch_size
del(result)
if self.verbose:
print('Dimension reduction...')
features_test_PCA = np.empty([nb_test_batch*batch_size, self.keep_PCA, self.nb_mini_patch])
for i in range(self.nb_mini_patch):
# normalize(features_test[:,:,i])
# features_test_PCA[:,:,i] = pca.transform(features_test[:,:,i])
features_test_PCA[:,:,i] = self.PCAs[i].transform(features_test[:,:,i])
del(features_test)
if self.verbose:
print('Computing Fisher vectors...')
fisher_test = compute_fisher(features_test_PCA, self.gmm)
del(features_test_PCA)
if self.verbose:
print('Projection...')
fisher_test = self.projector.project(fisher_test)
if self.verbose:
print('Prediction...')
y_pred_svm = self.clf_svm.predict(fisher_test)
# y_pred = clf.predict(np.reshape(features_test_PCA, [nb_test_batch*batch_size, n_comp*nb_mini_patch]))
# y_pred_mlp = clf_mlp.predict(fisher_test)
if self.verbose:
print('Computing score...')
score_svm = accuracy_score(y_pred_svm, y_test)
# score_mlp = accuracy_score(y_pred_mlp, y_test)
print('Accuracy SVM : ' + str(score_svm))
# print('Accuracy MLP : ' + str(score_mlp))
if __name__ == '__main__':
config = 'server'
if config == 'server':
data_directory = '/work/smg/v-nicolas/level-design_raise_100/'
model_directory = '/work/smg/v-nicolas/models_texture/'
dump_data_directory = '/work/smg/v-nicolas/data_texture/'
else:
data_directory = '/home/nicolas/Database/level-design_raise_100_color/'
model_directory = '/home/nicolas/Documents/models_texture/'
dump_data_directory = '/home/nicolas/Documents/data_texture/'
image_size = 100
only_green = True
nb_train_batch = 200
nb_test_batch = 80
batch_size = 50
model = Texture_model(data_directory, model_directory, dump_data_directory,
image_size = image_size, keep_PCA = 64,
K_gmm = 32, only_green = only_green,
verbose = True)
save_data = input('Save data? (y/N) : ')
if save_data == 'y':
save_data = True
load_data = None
else:
save_data = False
load_data = input('Load data? (y/N) : ')
if load_data == 'y':
load_data = input('File to load (source directory : ' + dump_data_directory + ') : ')
else:
load_data = None
model.train(nb_train_batch, batch_size, save_fisher = save_data, fisher_data_name = load_data)
if load_data != None:
model_to_load = input('Model to load for test : ')
model = load_model(model_directory + '/' + model_to_load + '.pkl')
model.test(nb_test_batch, batch_size)
model2 = load_model(model_directory + 'model1.pkl')
|
NicoRahm/CGvsPhoto
|
Textures/texture.py
|
Python
|
mit
| 12,508
|
[
"Gaussian"
] |
1a08f41377e54e33d07ea1ed439184eb32568fb9636ae7b9d058cbe57c911380
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function, division, unicode_literals, absolute_import
import sys
import os
import warnings
import mkdocs
import mkdocs.__main__
if sys.version_info < (3, 6):
warnings.warn("Python >= 3.6 is STRONGLY recommended when building the Abinit documentation\n" * 20)
#if sys.version_info >= (3, 7):
# warnings.warn("Python >= 3.7 is not yet supported. Please use py3.6 to build the Abinit documentation\n" * 20)
#if sys.mkdocs.__version__
# We don't install with setup.py hence we have to add the directory [...]/abinit/tests to $PYTHONPATH
pack_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, pack_dir)
# This needed to import doc.tests
sys.path.insert(0, os.path.join(pack_dir, "doc"))
from abimkdocs.website import Website, HTMLValidator
def prof_main(main):
"""
Decorator for profiling main programs.
Profiling is activated by prepending the command line options
supported by the original main program with the one of following keywords:
[`prof`, `tracemalloc`, `traceopen`]
Example:
$ script.py arg --foo=1
becomes
$ script.py prof arg --foo=1
`prof`: profiles the code with `cProfile`.
In this case the decorated main accepts two new arguments:
prof_file: Name of the output file with profiling data
If not given, a temporary file is created.
sortby: Profiling data are sorted according to this value.
default is "time". See sort_stats.
`tracemalloc`: uses the tracemalloc module (py>3.4) to trace memory allocations
`traceopen`: prints the list of open files before exiting (require `psutil` module)
"""
from functools import wraps
@wraps(main)
def wrapper(*args, **kwargs):
import sys
do_prof, do_tracemalloc, do_traceopen = 3 * [False]
if len(sys.argv) > 1:
do_prof = sys.argv[1] == "prof"
do_tracemalloc = sys.argv[1] == "tracemalloc"
do_traceopen = sys.argv[1] == "traceopen"
if do_prof or do_tracemalloc or do_traceopen: sys.argv.pop(1)
if do_prof:
print("Entering profiling mode...")
import pstats, cProfile, tempfile
prof_file = kwargs.pop("prof_file", None)
if prof_file is None:
_, prof_file = tempfile.mkstemp()
print("Profiling data stored in %s" % prof_file)
sortby = kwargs.pop("sortby", "time")
cProfile.runctx("main()", globals(), locals(), prof_file)
s = pstats.Stats(prof_file)
s.strip_dirs().sort_stats(sortby).print_stats()
return 0
elif do_tracemalloc:
print("Entering tracemalloc mode...")
# Requires py3.4
try:
import tracemalloc
except ImportError:
print("Error while trying to import tracemalloc (requires py3.4)")
raise SystemExit(1)
tracemalloc.start()
retcode = main(*args, **kwargs)
snapshot = tracemalloc.take_snapshot()
top_stats = snapshot.statistics('lineno')
n = min(len(top_stats), 20)
print("[Top %d]" % n)
for stat in top_stats[:20]:
print(stat)
elif do_traceopen:
try:
import psutil
except ImportError:
print("traceopen requires psutil module")
raise SystemExit(1)
import os
p = psutil.Process(os.getpid())
retcode = main(*args, **kwargs)
print("open_files", p.open_files())
else:
retcode = main(*args, **kwargs)
return retcode
return wrapper
@prof_main
def main():
verbose = 1 if "-v" in sys.argv or "--verbose" in sys.argv else 0
strict = "-s" in sys.argv or "--strict" in sys.argv
if "--no-colors" in sys.argv:
from tests.pymods import termcolor
termcolor.enable(False)
sys.argv.remove("--no-colors")
if len(sys.argv) > 1 and sys.argv[1] == "validate":
if len(sys.argv) == 2:
return HTMLValidator(verbose).validate_website("./site")
else:
validator = HTMLValidator(verbose)
retcode = 0
for page in sys.argv[2:]:
retcode += validator.validate_htmlpage(page)
return retcode
if "--help" in sys.argv or "-h" in sys.argv:
return mkdocs.__main__.cli()
if len(sys.argv) > 1 and ("--help" not in sys.argv or "-h" not in sys.argv):
deploy = False
website = Website.build("./doc", deploy=deploy, verbose=verbose)
if len(sys.argv) > 1 and sys.argv[1] in ("build", "serve", "gh-deploy"):
website.generate_markdown_files()
if "--dry-run" in sys.argv: return 0
print("Invoking mkdocs.__main__ to build HTML from MD files. It may take a few minutes ...")
mkdocs_retcode = mkdocs.__main__.cli()
return mkdocs_retcode + len(website.warnings)
if __name__ == '__main__':
sys.exit(main())
|
abinit/abinit
|
mksite.py
|
Python
|
gpl-3.0
| 5,142
|
[
"ABINIT"
] |
1190401e2fcee1bc04328f6525f26957c193d7b37de3544852b5ebb28fc94a94
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.