text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import random
from hashlib import md5
from DIRAC.Core.Utilities.ThreadSafe import Synchronizer
from DIRAC.Core.DISET.private.BaseClient import BaseClient
from DIRAC.Core.DISET.private.MessageBroker import getGlobalMessageBroker
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, isReturnStructure
from DIRAC.Core.Utilities import Network, Time
from DIRAC.FrameworkSystem.Client.Logger import gLogger
gMsgSync = Synchronizer()
class MessageClient(BaseClient):
class MSGException(Exception):
pass
def _initialize(self):
self.__trid = False
self.__transport = None
self.__uniqueName = self.__generateUniqueClientName()
self.__msgBroker = getGlobalMessageBroker()
self.__callbacks = {}
self.__connectExtraParams = {}
self.__specialCallbacks = {'drop': [], 'msg': []}
def __generateUniqueClientName(self):
hashStr = ":".join((Time.toString(), str(random.random()), Network.getFQDN(), gLogger.getName()))
hexHash = md5(hashStr.encode()).hexdigest()
return hexHash
def setUniqueName(self, uniqueName):
self.__uniqueName = uniqueName
def __checkResult(self, result):
if not result['OK']:
raise self.MSGException(result['Message'])
return result['Value']
def createMessage(self, msgName):
return self.__msgBroker.getMsgFactory().createMessage(self.getServiceName(), msgName)
@property
def connected(self):
return self.__trid
def connect(self, **extraParams):
if extraParams:
self.__connectExtraParams = extraParams
if self.__trid:
return S_ERROR("Already connected")
try:
trid, transport = self.__checkResult(self._connect())
self.__checkResult(self._proposeAction(transport, ("Connection", 'new')))
self.__checkResult(transport.sendData(S_OK([self.__uniqueName, self.__connectExtraParams])))
self.__checkResult(transport.receiveData())
self.__checkResult(self.__msgBroker.addTransportId(trid, self._serviceName,
receiveMessageCallback=self.__cbRecvMsg,
disconnectCallback=self.__cbDisconnect))
self.__trid = trid
self.__transport = transport
except self.MSGException as e:
return S_ERROR(str(e))
return S_OK()
def __cbDisconnect(self, trid):
if not self.__trid:
return
if self.__trid != trid:
gLogger.error("OOps. trid's don't match. This shouldn't happen!", "(%s vs %s)" % (self.__trid, trid))
return S_ERROR("OOOPS")
self.__trid = False
try:
self.__transport.close()
except Exception:
pass
for cb in self.__specialCallbacks['drop']:
try:
cb(self)
except Exception:
gLogger.exception("Exception while processing disconnect callbacks")
def __cbRecvMsg(self, trid, msgObj):
msgName = msgObj.getName()
msgObj.setMsgClient(self)
for cb in self.__specialCallbacks['msg']:
try:
result = cb(self, msgObj)
if not isReturnStructure(result):
gLogger.error("Callback for message does not return S_OK/S_ERROR", msgObj.getName())
return S_ERROR("No response")
if not result['OK']:
return result
# If no specific callback but a generic one, return the generic one
if msgName not in self.__callbacks:
return result
except Exception:
gLogger.exception("Exception while processing callbacks", msgObj.getName())
if msgName not in self.__callbacks:
return S_ERROR("Unexpected message")
try:
result = self.__callbacks[msgName](msgObj)
if not isReturnStructure(result):
gLogger.error("Callback for message does not return S_OK/S_ERROR", msgName)
return S_ERROR("No response")
return result
except Exception:
gLogger.exception("Exception while processing callbacks", msgName)
return S_ERROR("No response")
def getTrid(self):
return self.__trid
def sendMessage(self, msgObj):
if not self.__trid:
result = self.connect()
if not result['OK']:
return result
return self.__msgBroker.sendMessage(self.__trid, msgObj)
def subscribeToAllMessages(self, cbFunction):
if not callable(cbFunction):
return S_ERROR("%s is not callable" % cbFunction)
self.__specialCallbacks['msg'].append(cbFunction)
return S_OK()
def subscribeToMessage(self, msgName, cbFunction):
if not callable(cbFunction):
return S_ERROR("%s is not callable" % cbFunction)
self.__callbacks[msgName] = cbFunction
return S_OK()
def subscribeToDisconnect(self, cbFunction):
if not callable(cbFunction):
return S_ERROR("%s is not callable" % cbFunction)
self.__specialCallbacks['drop'].append(cbFunction)
return S_OK()
def clearSubscription(self, msgName):
try:
del(self.__callbacks[msgName])
except KeyError:
return False
return True
def disconnect(self):
trid = self.__trid
self.__trid = False
self.__msgBroker.removeTransport(trid)
|
yujikato/DIRAC
|
src/DIRAC/Core/DISET/MessageClient.py
|
Python
|
gpl-3.0
| 5,197
|
[
"DIRAC"
] |
1c1569d1ba082387d96c433949911be857c52869ab08954d2da545f709a3417d
|
# -*- coding: utf-8 -*-
"""
Sub-package with code handling netcdf datasets.
"""
import dataset
from operations import *
from dataset import *
from date_time import *
|
jfrygeo/solutions-geoprocessing-toolbox
|
suitability/toolboxes/scripts/MultidimensionSupplementalTools/MultidimensionSupplementalTools/Scripts/mds/netcdf/__init__.py
|
Python
|
apache-2.0
| 166
|
[
"NetCDF"
] |
5e495fbcd7dc4dcb84b522d0add4ebaaa743f343fa134f6d7470ce1af876f908
|
import numpy as np
import os, time
#Gaussian function
def gaussian(A,tauGaussian) :
return np.exp(-tauGaussian*A)
#Remove matrices for saving space
def removeOldMatrices() :
dirpath = "./Matrices"
filelist = [ f for f in os.listdir(dirpath)]
for f in filelist:
os.remove(dirpath+"/"+f)
#Initialize the kernel matrix
def initK(X,Y=None,tauGaussian=0.1) :
if Y == None:
X = np.mat(X)
XXT = X * X.T
dxx = XXT.diagonal().T
dxx_mat = np.tile(dxx, [1, X.shape[0]])
A = 0.5 * dxx_mat + 0.5 * dxx_mat.T - XXT
else:
X = np.mat(X)
Y = np.mat(Y)
XYT = X * Y.T
XXT = X * X.T
YYT = Y * Y.T
dxx = XXT.diagonal().T
dyy = YYT.diagonal().T
dxx_mat = np.tile(dxx, [1,Y.shape[0]])
dyy_mat = np.tile(dyy, [1,X.shape[0]])
A = 0.5 * dxx_mat + 0.5 * dyy_mat.T - XYT
return gaussian(A, tauGaussian)
#Generate all Kernel matrix before experiment
def generateAllMatrices(T_d,V_d,tauGaussian) :
for i in range(0,10) :
K = initK(T_d[i],tauGaussian=tauGaussian)
Ktest = initK(T_d[i],V_d[i],tauGaussian=tauGaussian)
np.save("./Matrices/K_" + str(tauGaussian) + "_" +str(i), K)
np.save("./Matrices/Ktest_" + str(tauGaussian) + "_" + str(i), Ktest)
print 'generated matrices for fold #', i
|
bernardgut/MLTools
|
SMO/KernelComputation.py
|
Python
|
gpl-2.0
| 1,363
|
[
"Gaussian"
] |
69c6a1078531ebee42655c330bb7a5d83a92bc9822108efb4f24be381a9939c9
|
#!/usr/bin/env python
""" This script is used to submit the jobs on the grid.
It uses an executable (first argument), creates
a directory in which it will store all the job ids (<jobName> args),
and submit a configurable amount of jobs.
"""
from __future__ import print_function
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
from DIRAC.Interfaces.API.Dirac import Dirac
from DIRAC.Interfaces.API.Job import Job
import sys
import os
if len(sys.argv) < 4:
print("Usage %s <scriptName> <jobName> <nbJobs>" % sys.argv[0])
sys.exit(1)
scriptName = sys.argv[1]
jobName = sys.argv[2]
nbJobs = int(sys.argv[3])
if not os.path.exists(jobName):
os.makedirs(jobName)
os.makedirs("%s/Done"%jobName)
os.makedirs("%s/Failed"%jobName)
else:
print("Folder %s exists" % jobName)
sys.exit(1)
f = open("%s/jobIdList.txt"%jobName, 'w')
for i in xrange(nbJobs):
j = Job()
j.setCPUTime(10000)
j.setExecutable(scriptName)
j.addToOutputSandbox.append('myLog.txt')
j.addToOutputSandbox.append('clock.txt')
j.addToOutputSandbox.append('time.txt')
dirac = Dirac()
jobID = dirac.submitJob(j)
realId = jobID.get('JobID')
f.write("%s\n"%realId)
f.close()
|
fstagni/DIRAC
|
tests/Performance/DFCPerformance/submitJobs.py
|
Python
|
gpl-3.0
| 1,214
|
[
"DIRAC"
] |
940e2aba70b541045cc020d64993e72f469e57d45cd0a05598567b4ac7051e9a
|
# Copyright 2012 Jose Blanca, Peio Ziarsolo, COMAV-Univ. Politecnica Valencia
# This file is part of ngs_crumbs.
# ngs_crumbs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# ngs_crumbs is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with ngs_crumbs. If not, see <http://www.gnu.org/licenses/>.
import unittest
import os.path
from subprocess import check_output
from tempfile import NamedTemporaryFile
from crumbs.seq.mate_chimeras import (classify_mapped_reads, classify_chimeras,
calculate_distance_distribution)
from crumbs.utils.bin_utils import SEQ_BIN_DIR
from crumbs.utils.test_utils import TEST_DATA_DIR
from crumbs.utils.tags import NON_CHIMERIC, CHIMERA, UNKNOWN
from crumbs.seq.seq import get_name
from crumbs.mapping import map_with_bwamem, map_process_to_sortedbam
class FilterByMappingType(unittest.TestCase):
def test_classify_paired_reads(self):
index_fpath = os.path.join(TEST_DATA_DIR, 'ref_example.fasta')
#Non chimeric
query1 = '>seq1 1:N:0:GATCAG\nGGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT\n'
query2 = '>seq1 2:N:0:GATCAG\nAGGAGGGATCGGGCACCCACGGCGCGGTAGACTGAGGCCTTCTCGAACT\n'
#Chimeric
query3 = '>seq2 1:N:0:GATCAG\nAAGTTCAATGTAGCAAGGGTACATGCTGACGAGATGGGTCTGCGATCCC\n'
query4 = '>seq2 2:N:0:GATCAG\nACGTGGATGCGGCGACGGCCCTACGGCACATACTGTTATTAGGGTCACT\n'
#unknown
query5 = '>seq3 1:N:0:GATCAG\nAGTGACCCTAATAACAGTATGTGCCGTAGGGCCGTCGCCGCATCCACGT\n'
query6 = '>seq3 2:N:0:GATCAG\nGTCGTGCGCAGCCATTGAGACCTTCCTAGGGTTTTCCCCATGGAATCGG\n'
query = query1 + query2 + query5 + query6 + query3 + query4
in_fhand = NamedTemporaryFile()
in_fhand.write(query)
in_fhand.flush()
bam_fhand = NamedTemporaryFile(suffix='.bam')
extra_params = ['-a', '-M']
bwa = map_with_bwamem(index_fpath, interleave_fpath=in_fhand.name,
extra_params=extra_params)
map_process_to_sortedbam(bwa, bam_fhand.name, key='queryname')
result = classify_mapped_reads(bam_fhand, mate_distance=2000)
for pair, kind in result:
if kind == NON_CHIMERIC:
assert 'seq1' in get_name(pair[0])
elif kind == UNKNOWN:
assert 'seq3' in get_name(pair[0])
elif kind == CHIMERA:
assert 'seq2' in get_name(pair[0])
else:
self.fail()
def test_filter_chimeras(self):
index_fpath = os.path.join(TEST_DATA_DIR, 'ref_example.fasta')
# Non chimeric
query1 = '>seq1 1:N:0:GATCAG\nGGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT\n'
query2 = '>seq1 2:N:0:GATCAG\nAGGAGGGATCGGGCACCCACGGCGCGGTAGACTGAGGCCTTCTCGAACT\n'
# Chimeric
query3 = '>seq2 1:N:0:GATCAG\nAAGTTCAATGTAGCAAGGGTACATGCTGACGAGATGGGTCTGCGATCCC\n'
query4 = '>seq2 2:N:0:GATCAG\nACGTGGATGCGGCGACGGCCCTACGGCACATACTGTTATTAGGGTCACT\n'
# unknown
query5 = '>seq3 1:N:0:GATCAG\nAGTGACCCTAATAACAGTATGTGCCGTAGGGCCGTCGCCGCATCCACGT\n'
query6 = '>seq3 2:N:0:GATCAG\nGTCGTGCGCAGCCATTGAGACCTTCCTAGGGTTTTCCCCATGGAATCGG\n'
query = query1 + query2 + query5 + query6 + query3 + query4
in_fhand = NamedTemporaryFile()
in_fhand.write(query)
in_fhand.flush()
# classify_chimeras function
out_fhand = NamedTemporaryFile()
chimeras_fhand = NamedTemporaryFile()
unknown_fhand = NamedTemporaryFile()
classify_chimeras(in_fhand, index_fpath, mate_distance=2000,
out_fhand=out_fhand, chimeras_fhand=chimeras_fhand,
unknown_fhand=unknown_fhand)
out_fhand.flush()
chimeras_fhand.flush()
unknown_fhand.flush()
assert 'seq1' in open(out_fhand.name).next()
assert 'seq2' in open(chimeras_fhand.name).next()
assert 'seq3' in open(unknown_fhand.name).next()
def test_filter_chimeras_bin(self):
index_fpath = os.path.join(TEST_DATA_DIR, 'ref_example.fasta')
# Non chimeric
query1 = '>seq1 1:N:0:GATCAG\nGGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT\n'
query2 = '>seq1 2:N:0:GATCAG\nAGGAGGGATCGGGCACCCACGGCGCGGTAGACTGAGGCCTTCTCGAACT\n'
# Chimeric
query3 = '>seq2 1:N:0:GATCAG\nAAGTTCAATGTAGCAAGGGTACATGCTGACGAGATGGGTCTGCGATCCC\n'
query4 = '>seq2 2:N:0:GATCAG\nACGTGGATGCGGCGACGGCCCTACGGCACATACTGTTATTAGGGTCACT\n'
# unknown
query5 = '>seq3 1:N:0:GATCAG\nAGTGACCCTAATAACAGTATGTGCCGTAGGGCCGTCGCCGCATCCACGT\n'
query6 = '>seq3 2:N:0:GATCAG\nGTCGTGCGCAGCCATTGAGACCTTCCTAGGGTTTTCCCCATGGAATCGG\n'
query = query1 + query2 + query5 + query6 + query3 + query4
in_fhand = NamedTemporaryFile()
in_fhand.write(query)
in_fhand.flush()
filter_chimeras_bin = os.path.join(SEQ_BIN_DIR, 'classify_chimeras')
assert 'usage' in check_output([filter_chimeras_bin, '-h'])
chimeras_fhand = NamedTemporaryFile()
unknown_fhand = NamedTemporaryFile()
out_fhand = NamedTemporaryFile()
cmd = [filter_chimeras_bin, in_fhand.name, '-r', index_fpath]
cmd.extend(['-c', chimeras_fhand.name, '-u', unknown_fhand.name,
'-s', '2000', '-o', out_fhand.name])
check_output(cmd, stdin=in_fhand)
assert 'seq1' in open(out_fhand.name).next()
assert 'seq2' in open(chimeras_fhand.name).next()
assert 'seq3' in open(unknown_fhand.name).next()
class DrawDistanceDistribution(unittest.TestCase):
def test_calculate_mp_distance_distribution(self):
index_fpath = os.path.join(TEST_DATA_DIR, 'ref_example.fasta')
query1 = '>seq1 1:N:0:GATCAG\n'
query1 += 'GGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT\n'
query2 = '>seq1 2:N:0:GATCAG\n'
query2 += 'AGGAGGGATCGGGCACCCACGGCGCGGTAGACTGAGGCCTTCTCGAACT\n'
# Chimeric
query3 = '>seq2 1:N:0:GATCAG\n'
query3 += 'AAGTTCAATGTAGCAAGGGTACATGCTGACGAGATGGGTCTGCGATCCC\n'
query4 = '>seq2 2:N:0:GATCAG\n'
query4 += 'ACGTGGATGCGGCGACGGCCCTACGGCACATACTGTTATTAGGGTCACT\n'
# unknown
query5 = '>seq3 1:N:0:GATCAG\n'
query5 += 'AGTGACCCTAATAACAGTATGTGCCGTAGGGCCGTCGCCGCATCCACGT\n'
query6 = '>seq3 2:N:0:GATCAG\n'
query6 += 'GTCGTGCGCAGCCATTGAGACCTTCCTAGGGTTTTCCCCATGGAATCGG\n'
query = query1 + query2 + query5 + query6 + query3 + query4
in_fhand = NamedTemporaryFile()
in_fhand.write(query)
in_fhand.flush()
stats = calculate_distance_distribution(in_fhand, index_fpath,
max_clipping=0.05)
assert stats['outies'][1776] == 1
assert stats['innies'][82] == 1
assert stats['others'][1417] == 1
def test_draw_distance_distribution_bin(self):
index_fpath = os.path.join(TEST_DATA_DIR, 'ref_example.fasta')
# Non chimeric
query1 = '>seq1 1:N:0:GATCAG\n'
query1 += 'GGGATCGCAGACCCATCTCGTCAGCATGTACCCTTGCTACATTGAACTT\n'
query2 = '>seq1 2:N:0:GATCAG\n'
query2 += 'AGGAGGGATCGGGCACCCACGGCGCGGTAGACTGAGGCCTTCTCGAACT\n'
# Chimeric
query3 = '>seq2 1:N:0:GATCAG\n'
query3 += 'AAGTTCAATGTAGCAAGGGTACATGCTGACGAGATGGGTCTGCGATCCC\n'
query4 = '>seq2 2:N:0:GATCAG\n'
query4 += 'ACGTGGATGCGGCGACGGCCCTACGGCACATACTGTTATTAGGGTCACT\n'
# unknown
query5 = '>seq3 1:N:0:GATCAG\n'
query5 += 'AGTGACCCTAATAACAGTATGTGCCGTAGGGCCGTCGCCGCATCCACGT\n'
query6 = '>seq3 2:N:0:GATCAG\n'
query6 += 'GTCGTGCGCAGCCATTGAGACCTTCCTAGGGTTTTCCCCATGGAATCGG\n'
query = query1 + query2 + query5 + query6 + query3 + query4
in_fhand = NamedTemporaryFile()
in_fhand.write(query)
in_fhand.flush()
distribution_fhand = NamedTemporaryFile()
draw_bin = os.path.join(SEQ_BIN_DIR, 'draw_pair_distance_distribution')
assert 'usage' in check_output([draw_bin, '-h'])
cmd = [draw_bin, '-r', index_fpath, '-o', distribution_fhand.name,
in_fhand.name]
print check_output(cmd)
# raw_input(distribution_fhand.name)
if __name__ == "__main__":
# import sys; sys.argv = ['', 'DrawDistanceDistribution']
unittest.main()
|
JoseBlanca/ngs_crumbs
|
test/seq/test_mate_chimeras.py
|
Python
|
gpl-3.0
| 8,781
|
[
"BWA"
] |
5047edf860bb9a68df930dce0443e8906c3c80ef7fc9f1c84d5cff83056c88d0
|
import discord
from discord.ext import commands
import os
from .utils.dataIO import dataIO, fileIO
from __main__ import send_cmd_help
import asyncio
import random
from random import choice as rand_choice
import string
import datetime
import time
from collections import OrderedDict
import clashroyale
import requests
creditIcon = "https://i.imgur.com/TP8GXZb.png"
credits = "Bot by GR8 | Titan"
BOTCOMMANDER_ROLES = ["Family Representative", "Clan Manager",
"Clan Deputy", "Co-Leader", "Hub Officer", "admin"]
rules_text = """**Here are some Legend Family Discord server rules.**\n
• No Hateful, obscene, offensive, racist, sexual or violent words allowed in chat or images.
• Respect others' opinions. If you disagree, please do so in a constructive manner.
• This is an English only server, please use any other languages in a private message.
• Do not spam, and avoid ever using @myclanname without permission from clan managers or deputies.
• No advertisement of any kind, e.g. clans, websites, discord invites.
• Use #bot-spam for bot features, e.g. !deck or !payday
• Obtaining credits or reputations using unethical ways like cheating or trading is strictly forbidden
• Respect and do not subvert moderators and managers.
• A good rule is to talk to people as if you were talking to them face to face.
• There are more rules that vary from clan to clan. Ask your clan leader for the rules of your clan.\n
**Clan Transfer**\n
• If you are transferring from one Legend Family clan to another, please contact your destination clan's clan leader first,
and wait for the all clear from that clan leader. We are all for members being wherever they want to be, but it helps us keep track of what is going on, and helps us make sure you get accepted.
• If you are leaving the clan for another reason, please talk with your leader first when possible. As a clan leader it helps to know if you're leaving for good, if you're leaving to do 2v2 with a few friends for a while, or if you're leaving for an eSport event.\n
**Violation of these roles will lead to punishment including temporary guest role reduced access, temporary kick from server, or permanent kick from server, depending on the severity and/or frequency of the offense**"""
commands_text = """Here are some of the Legend Family Bot commands, you can use them in the #bot-spam channel.\n
**!clashProfile** - to view your Clash Royale stats.
**!clashDeck** - to view your Clash Royale current deck.
**!chests** - to view your upcoming chests you will receive.
**!cwr** - to view your clan war readiness for your card levels.
**!tourney** - to instantly recieve an open tournament that is available to join.
**!topmembers** - shows the top ranked players in our family.
**!payday** - receive your 300 credits every 30 minutes.
**!heist** - Play a heist with a crew in #heist channel.
**!duel** - Challenge someone for a duel and win credits in #duels channel.
**!buy** - Take a look at what you can purchase with your credits.
**!balance** - To check your current bank balance.
**!profile** - view your server profile.
**!deck** - make and save your deck.
**!legend** - to see status of all Legend Family clans.
**!rep @user** - give reputation points to users.
**!remindme** - Use this command to make the bot remind you of something in the future.
**!trivia** - start a trivia of your choice. Bot will ask you questions, you will get points of answering them.
**!play** - Listen to songs, type with command with the song name inside a voice channel. (!skip, !pause, !resume, !playlist).
**!invite** - Get the invite link for the server to share with your friends.
**!report** - Report a user to the moderators for breaking the rules.
**!coaching** - To request a coaching session.\n
**You can type !help here to see the full commands list**"""
info_text = """You will find several channels on our Discord Server\n
**#global-chat**: to discuss about the game.
**#tourneys**: Dozens of tournaments posted everyday.
**#news**: important info about family.
**#request-role**: Easily get your notification and archetype roles.
**#giveaways**: Win Discord credits and game keys every day.
**#deck-recommendation**: decks discussion.
**#off-topic**: you can chat about anything unrelated to clash royale here.
**#bots-spam**: Use bot commands, You can mute the channels you don't need in DISCORD settings.
**#heist**: Play Heist mini game with a crew and get lots of credits.
**#duels**: Challenge or accept duel offers for a Clash Royale Battle.
**#challenges**: Word and number challenge games with other members. Answer all the questions before any one else to win.
**#friends-forever**: Post your Clash friend invite link or add others.
"""
cw_info = """We organize **Legend Wars** every weekend, which aims to determine **which clan is the strongest**.
The **idea** is simple: A private tournament that anyone may join **within Legend Family and the alliance.**
Score is calculated in a way that allows every participant to contribute to help their clan win. We sum the earned tournament trophies of the members of each clan to calculate a clan score, clan with highest clan score is declared the **winner**!
There are 2 factors to win: convince more players to participate within your clan and earn more tournament trophies. Both are **equally important**. We publish tourneys and passwords at same time, so we give equal chances to each clan and player.
The Top player in each war will recieve $10. However, each and every participant will recieve discord credits for getting trophies for their clan. The more trophies you can collect, the more credits you will get. Credits can used in LeGeND shop to buy various items.
**All clans** will be closed/full to avoid any leaks, nobody will be allowed to join.
**3 golden Rules for Legend Wars:** We respect the Opponent (no BMing if you win), we play to have fun (no obligation to participate), and don't join if you think you cannot play.
"""
credits_info = """**WHAT ARE CREDITS?**
Credits are a virtual currency in LeGeND Discord, you earn credits by playing in Legend Wars, donating, and playing mini games in discord. To use your credits, you can buy items using !buy.
• Every 30 minutes, you can get free credits by typing !payday in #bot-spam channel.
• Every Sunday, you receive something called a "Weekly Payout". Which converts all your week's clan donations, War Cards collected and War wins into credits. So the more active you are in a clan, the more credits you get.
• We have Legend Wars every weekend, participating in these wars also give you tons of credits according to your tournament trophies.
• You can also win credits by playing #heist and #challenges.
• You can play Clash Royale #duels to bet on your skills in friend battles.
• Last but not least, you can get easy credits by just chatting on discord. The more you chat, the more credits you accumulate.
You can type !buy here to look at different ways you can spend these credits.
"""
esports_info = """**The LeGeND eSports Team** is recruiting all active and aspiring players!
With the goal of encouraging competitive play in the family, there is a LeGeND eSports **Americas** and **Eurasia** team to represent the family in various events. Our strongest players will compete side by side with the very best in leagues such as **CCTS, CPL, and even RPL**!
While we have a clan called LeGeND eSports!, the team operates separately from the clan, and sends members from our family to events.
But please remember that this is a more professional setting than the rest of the family and **poor behaviour will not be tolerated**.
Join now: https://discord.gg/ck8nGEN
Please note that if you just lurk in the server and not participate for a long period of time you will be kicked off the server.
"""
coc_bs = """We also play **Clash of Clans** and **Brawl Stars**, we would like to invite to you join them if you play either of these supercell games.
• Clash of Clans - **LeGeND Raiders! (#JQJRGVJU)** - https://discord.gg/BG7wMFw
• Brawl Stars - https://discord.gg/5ww5D3q
You can join either servers and talk to our friendly staff to get you set up with a club of your choice.
"""
social_info = """Stay Social! Come and follow us on these platforms to stay up to date on the latest news and announcements.
https://twitter.com/TheLegendClans
https://www.facebook.com/LegendClans
https://www.instagram.com/legendclans
Visit our website to see live clan statistics:
https://legendclans.com
"""
guest_rules = """Welcome to the **Legend Family** Discord server. As a guest, you agree to the following rules:
• Respect others' opinions. If you disagree, please do so in a constructive manner.
• This is an English only server, please use any other languages in a private message.
• Do not spam, and avoid ever using @clanname without permission from clan managers or deputies.
• No advertisement of any kind, e.g. clans, websites, discord invites, etc.
• Use #bot-spam for bot features, e.g. !deck or !payday.
• Respect and do not subvert moderators or managers.
• A good rule is to talk to people as if you were talking to them face to face.
Failure to follow these rules will get you kicked from the server. Repeat offenders will be banned.
You can chat with family members and guests in `#global-chat`. For games, you can check out `#heist` `#duels` and `#challenges`.
If you would like to invite your friends to join this server, you may use this Discord invite: <http://discord.gg/T7XdjFS>
Additional help and information: https://legendclans.com
Thanks + enjoy!
"""
class legend:
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json('data/legend/settings.json')
self.auth = self.bot.get_cog('crtools').auth
self.constants = self.bot.get_cog('crtools').constants
self.tags = self.bot.get_cog('crtools').tags
self.clans = self.bot.get_cog('crtools').clans
self.clash = clashroyale.OfficialAPI(self.auth.getOfficialToken(), is_async=True)
self.welcome = dataIO.load_json('data/legend/welcome.json')
self.bank = dataIO.load_json('data/economy/bank.json')
self.seen = dataIO.load_json('data/seen/seen.json')
async def updateSeen(self):
self.seen = dataIO.load_json('data/seen/seen.json')
def save_settings(self):
"""Saves the json"""
dataIO.save_json('data/legend/settings.json', self.settings)
async def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
async def _add_roles(self, member, role_names):
"""Add roles"""
server = member.server
roles = [discord.utils.get(server.roles, name=role_name) for role_name in role_names]
try:
await self.bot.add_roles(member, *roles)
except discord.Forbidden:
raise
except discord.HTTPException:
raise
async def _remove_roles(self, member, role_names):
"""Remove roles"""
server = member.server
roles = [discord.utils.get(server.roles, name=role_name) for role_name in role_names]
try:
await self.bot.remove_roles(member, *roles)
except:
pass
async def _is_commander(self, member):
server = member.server
botcommander_roles = [discord.utils.get(server.roles, name=r) for r in BOTCOMMANDER_ROLES]
botcommander_roles = set(botcommander_roles)
author_roles = set(member.roles)
if len(author_roles.intersection(botcommander_roles)):
return True
else:
return False
async def _is_member(self, member):
server = member.server
botcommander_roles = [discord.utils.get(server.roles, name=r) for r in ["Member",
"Co-Leader",
"Hub Officer",
"Clan Deputy",
"Clan Manager"]]
botcommander_roles = set(botcommander_roles)
author_roles = set(member.roles)
if len(author_roles.intersection(botcommander_roles)):
return True
else:
return False
async def getUserCount(self, server, name):
"""Returns the numbers of people with the member role"""
members = server.members
count = 0
for member in members:
for role in member.roles:
if role.name == name:
count += 1
return count
def emoji(self, name):
"""Emoji by name."""
for emoji in self.bot.get_all_emojis():
if emoji.name == name.replace(" ", "").replace("-", "").replace(".", ""):
return '<:{}:{}>'.format(emoji.name, emoji.id)
return ''
def getLeagueEmoji(self, trophies):
"""Get clan war League Emoji"""
mapLeagues = {
"legendleague": [3000, 99999],
"gold3league": [2500, 2999],
"gold2league": [2000, 2499],
"goldleague": [1500, 1999],
"silver3league": [1200, 1499],
"silver2league": [900, 1199],
"silverleague": [600, 899],
"bronze3league": [400, 599],
"bronze2league": [200, 399],
"bronzeleague": [0, 199]
}
for league in mapLeagues.keys():
if mapLeagues[league][0] <= trophies <= mapLeagues[league][1]:
return self.emoji(league)
async def getLeague(self, trophies):
if trophies >= 3000:
return "legend"
elif trophies >= 1500:
return "gold"
elif trophies >= 600:
return "silver"
else:
return "bronze"
async def getBestLeague(self, cards):
"""Get best leagues using readiness"""
readiness = await self.clanwarReadiness(cards)
legend = readiness["legend"]
gold = readiness["gold"] - legend
silver = readiness["silver"] - gold - legend
bronze = readiness["bronze"] - silver - gold - legend
readinessCount = {"legend": legend, "gold": gold, "silver": silver, "bronze": bronze}
max_key = max(readinessCount, key=lambda k: readinessCount[k])
return "{} League ({}%)".format(max_key.capitalize(), readiness[max_key])
async def getBestPerc(self, cards, league):
"""Get best leagues level perc using readiness"""
readiness = await self.clanwarReadiness(cards)
return readiness[league]
async def clanwarReadiness(self, cards):
"""Calculate clanwar readiness"""
readiness = {}
leagueLevels = {
"legend": 12,
"gold": 11,
"silver": 10,
"bronze": 9
}
for league in leagueLevels.keys():
readiness[league] = 0
for card in cards:
if await self.constants.get_new_level(card) >= leagueLevels[league]:
readiness[league] += 1
readiness[league] = int((readiness[league] / len(cards)) * 100)
return readiness
@commands.group(pass_context=True, no_pm=True, name="clash")
async def _clash(self, ctx):
"""Legend BS cog's group command"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@_clash.command(pass_context=True)
async def legend(self, ctx, member: discord.Member=None):
""" Show Legend clans, can also show clans based on a member's trophies"""
await self.bot.type()
if member is None:
trophies = 9999
maxtrophies = 9999
plyrLeagueCWR = {"legend": 0, "gold": 0, "silver": 0, "bronze": 0}
else:
try:
await self.bot.type()
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
trophies = profiledata.trophies
cards = profiledata.cards
maxtrophies = profiledata.best_trophies
maxwins = profiledata.challenge_max_wins
plyrLeagueCWR = await self.clanwarReadiness(cards)
if profiledata.clan is None:
clanname = "*None*"
else:
clanname = profiledata.clan.name
ign = profiledata.name
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You must associate a tag with this member first using ``{}save #tag @member``".format(ctx.prefix))
clandata = []
for clankey in self.clans.keysClans():
try:
clan = await self.clash.get_clan(await self.clans.getClanData(clankey, 'tag'))
clandata.append(clan)
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
clandata = sorted(clandata, key=lambda x: (x.clan_war_trophies, x.required_trophies, x.clan_score), reverse=True)
embed = discord.Embed(color=0xFAA61A)
if "url" in self.settings and "family" in self.settings:
embed.set_author(name=self.settings['family'], url=self.settings['url'],
icon_url="https://i.imgur.com/dtSMITE.jpg")
else:
embed.set_author(name="Legend Family Clans",
url="http://royaleapi.com/clan/family/legend",
icon_url="https://i.imgur.com/dtSMITE.jpg")
embed.set_footer(text=credits, icon_url=creditIcon)
foundClan = False
totalMembers = 0
totalWaiting = 0
for clan in clandata:
numWaiting = 0
personalbest = 0
bonustitle = None
plyrCWRGood = True
clankey = await self.clans.getClanKey(clan.tag.strip("#"))
numWaiting = await self.clans.numWaiting(clankey)
personalbest = await self.clans.getClanData(clankey, 'personalbest')
cwr = await self.clans.getClanData(clankey, 'cwr')
bonustitle = await self.clans.getClanData(clankey, 'bonustitle')
emoji = await self.clans.getClanData(clankey, 'emoji')
totalWaiting += numWaiting
if numWaiting > 0:
title = "["+str(numWaiting)+" Waiting] "
else:
title = ""
member_count = clan.get("members")
totalMembers += member_count
if member_count < 50:
showMembers = str(member_count) + "/50"
else:
showMembers = "**FULL** "
if str(clan.type) != 'inviteOnly':
title += "["+str(clan.type).title()+"] "
title += clan.name + " (" + clan.tag + ") "
if personalbest > 0:
title += "PB: "+str(personalbest)+"+ "
for league in cwr:
if cwr[league] > 0:
title += "{}: {}% ".format(league[:1].capitalize(), cwr[league])
if plyrLeagueCWR[league] < cwr[league]:
plyrCWRGood = False
if bonustitle is not None:
title += bonustitle
desc = ("{} {} <:crtrophy:448609948008579073> "
"{}+ {} {}".format(emoji,
showMembers,
clan.required_trophies,
self.getLeagueEmoji(clan.clan_war_trophies),
clan.clan_war_trophies))
if (member is None) or ((clan.required_trophies <= trophies) and
(maxtrophies > personalbest) and
(plyrCWRGood) and
(trophies - clan.required_trophies < 1200) and
(clan.type != 'closed')) or ((clan.required_trophies < 2000) and
(member_count != 50) and
(2000 < trophies < 4000) and
(clan.type != 'closed')):
foundClan = True
embed.add_field(name=title, value=desc, inline=False)
if not foundClan:
embed.add_field(name="uh oh!",
value="There are no clans available for you at the moment, "
"please type !legend to see all clans.",
inline=False)
embed.description = ("Our Family is made up of {} "
"clans with a total of {} "
"members. We have {} spots left "
"and {} members in waiting lists.".format(await self.clans.numClans(),
totalMembers,
(await self.clans.numClans()*50)-totalMembers,
totalWaiting))
await self.bot.say(embed=embed)
if member is not None:
await self.bot.say(("Hello **{}**, above are all the clans "
"you are allowed to join, based on your statistics. "
"Which clan would you like to join? \n\n"
"**Name:** {} (#{})\n**Trophies:** {}/{}\n"
"**CW Readiness:** {}\n"
"**Max Challenge Wins:** {}\n"
"**Clan:** {}\n\n"
":warning: **YOU WILL BE REJECTED "
"IF YOU JOIN ANY CLAN WITHOUT "
"APPROVAL**".format(ign,
ign,
profiletag,
trophies,
maxtrophies,
await self.getBestLeague(cards),
maxwins,
clanname)))
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def approve(self, ctx, member: discord.Member, clankey):
"""Send instructions to people joining a clan"""
server = ctx.message.server
legendServer = ["374596069989810176"]
if server.id not in legendServer:
return await self.bot.say("This command can only be executed in the Legend Family Server")
clankey = clankey.lower()
try:
clan_tag = await self.clans.getClanData(clankey, 'tag')
clan_name = await self.clans.getClanData(clankey, 'name')
clan_role = await self.clans.getClanData(clankey, 'role')
clan_pb = await self.clans.getClanData(clankey, 'personalbest')
clan_cwr = await self.clans.getClanData(clankey, 'cwr')
clan_approval = await self.clans.getClanData(clankey, 'approval')
except KeyError:
return await self.bot.say("Please use a valid clanname: {}".format(await self.clans.namesClans()))
leftClan = False
try:
await self.bot.type()
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
clandata = await self.clash.get_clan(clan_tag)
ign = profiledata.name
if profiledata.clan is None:
leftClan = True
clantag = ""
else:
clantag = profiledata.clan.tag.strip("#")
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You must associate a tag with this member first using ``{}save #tag @member``".format(ctx.prefix))
membership = not await self.clans.verifyMembership(clantag)
if membership:
trophies = profiledata.trophies
cards = profiledata.cards
maxtrophies = profiledata.best_trophies
plyrLeagueCWR = await self.clanwarReadiness(cards)
if (clandata.get("members") == 50):
return await self.bot.say("Approval failed, the clan is Full.")
if ((trophies < clandata.required_trophies) or (maxtrophies < clan_pb)):
return await self.bot.say("Approval failed, you don't meet the trophy requirements.")
plyrCWRGood = True
for league in clan_cwr:
if clan_cwr[league] > 0:
if plyrLeagueCWR[league] < clan_cwr[league]:
plyrCWRGood = False
if (not plyrCWRGood):
return await self.bot.say("Approval failed, you don't meet the CW Readiness requirements.")
if (clandata.type == "closed"):
return await self.bot.say("Approval failed, the clan is currently closed.")
if clan_approval:
if clan_role not in [y.name for y in ctx.message.author.roles]:
return await self.bot.say("Approval failed, only {} staff can approve new recruits for this clan.".format(clan_name))
if await self.clans.numWaiting(clankey) > 0:
if await self.clans.checkWaitingMember(clankey, member.id):
canWait = (50 - clandata.get("members")) - 1
if await self.clans.getWaitingIndex(clankey, member.id) > canWait:
return await self.bot.say("Approval failed, you are not first in queue for the waiting list on this server.")
await self.clans.delWaitingMember(clankey, member.id)
role = discord.utils.get(server.roles, name="Waiting")
try:
await self.bot.remove_roles(member, role)
except discord.Forbidden:
raise
except discord.HTTPException:
raise
else:
return await self.bot.say("Approval failed, there is a waiting queue for this clan. Please first join the waiting list.")
if not leftClan:
warning = ("\n\n:warning: **YOU WILL BE REJECTED "
"IF YOU JOIN ANY CLAN WITHOUT "
"APPROVAL**")
await self.bot.say(("{} Please leave your current clan now. "
"Your recruit code will arrive in 3 minutes.{}".format(member.mention, warning)))
await asyncio.sleep(180)
try:
recruitCode = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
await self.bot.send_message(member, "Congratulations, You have been approved to join **" + clan_name +
" (#" + clan_tag + ")**.\n\n\n" +
"Your **RECRUIT CODE** is: ``" + recruitCode + "`` \n" +
"Send this code in the join request message.\n\n" +
"Click this link to join the clan: https://legendclans.com/clanInfo/" +
clan_tag + "\n\n" +
"That's it! Now wait for your clan leadership to accept you. " +
"It usually takes a few minutes to get accepted, but it may take up to a few hours. \n\n" +
"**IMPORTANT**: Once your clan leadership has accepted your request, " +
"let a staff member in discord know that you have been accepted. " +
"They will then unlock all the member channels for you.")
await self.bot.say(member.mention + " has been approved for **" + clan_name + "**. Please check your DM for instructions on how to join.")
try:
newname = ign + " (Approved)"
await self.bot.change_nickname(member, newname)
except discord.HTTPException:
await self.bot.say("I don’t have permission to change nick for this user.")
roleName = discord.utils.get(server.roles, name=clan_role)
embed = discord.Embed(color=0x0080ff)
embed.set_author(name="New Recruit", icon_url="https://i.imgur.com/dtSMITE.jpg")
embed.add_field(name="Name", value=ign, inline=True)
embed.add_field(name="Recruit Code", value=recruitCode, inline=True)
embed.add_field(name="Clan", value=clan_name, inline=True)
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.send_message(discord.Object(id='375839851955748874'), content=roleName.mention, embed=embed)
except discord.errors.Forbidden:
await self.bot.say("Approval failed, {} please fix your privacy settings, we are unable to send you Direct Messages.".format(member.mention))
else:
await self.bot.say("Approval failed, You are already a part of a clan in the family.")
@_clash.command(pass_context=True, no_pm=True)
async def newmember(self, ctx, member: discord.Member):
"""Setup nickname, roles and invite links for a new member"""
server = ctx.message.server
author = ctx.message.author
legendServer = ["374596069989810176"]
if server.id not in legendServer:
return await self.bot.say("This command can only be executed in the Legend Family Server")
isMember = await self._is_member(member)
if isMember:
return await self.bot.say("Error, " + member.mention + " is not a new member.")
try:
await self.bot.type()
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
if profiledata.clan is None:
clantag = ""
clanname = ""
else:
clantag = profiledata.clan.tag.strip("#")
clanname = profiledata.clan.name
ign = profiledata.name
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You must associate a tag with this member first using ``{}save #tag @member``".format(ctx.prefix))
allowed = False
if member is None:
allowed = True
elif member.id == author.id:
allowed = True
else:
allowed = await self._is_commander(author)
if not allowed:
return await self.bot.say("You dont have enough permissions to use this command on others.")
membership = await self.clans.verifyMembership(clantag)
if membership:
try:
savekey = await self.clans.getClanKey(clantag)
invite = await self.clans.getClanData(savekey, 'discord')
role = await self.clans.getClanData(savekey, 'role')
current_members = await self.getUserCount(server, role)
if current_members > 50:
return await self.bot.say("Audit Error: Maximum number of {} discord members reached, type ``!audit {}`` to resolve this issue.".format(clanname, savekey))
if invite is not None:
joinLink = "https://discord.gg/" + str(invite)
await self.bot.send_message(member, "Hi There! Congratulations on getting accepted into our family. " +
"We have unlocked all the member channels for you in LeGeND Discord Server. " +
"Now you have to carefuly read this message and follow the steps mentioned below: \n\n" +
"Please click on the link below to join your clan Discord server. \n\n" +
clanname + ": " + joinLink + "\n\n" +
"Please do not leave our main or clan servers while you are in the clan. Thank you.")
else:
await self.bot.send_message(member, "Hi There! Congratulations on getting accepted into our family. "
"We have unlocked all the member channels for you in LeGeND Discord Server. \n\n" +
"Please do not leave our Discord server while you are in the clan. Thank you.")
except discord.errors.Forbidden:
return await self.bot.say(("Membership failed, {} please fix your privacy settings, "
"we are unable to send you Direct Messages.".format(member.mention)))
await self.clans.delWaitingMember(savekey, member.id)
mymessage = ""
if ign is None:
await self.bot.say("Cannot find IGN.")
else:
try:
newclanname = await self.clans.getClanData(savekey, 'nickname')
newname = ign + " | " + newclanname
await self.bot.change_nickname(member, newname)
except discord.HTTPException:
await self.bot.say("I don’t have permission to change nick for this user.")
else:
mymessage += "Nickname changed to **{}**\n".format(newname)
role_names = [role, 'Member']
try:
await self._add_roles(member, role_names)
mymessage += "**" + await self.clans.getClanData(savekey, 'role') + "** and **Member** roles added."
except discord.Forbidden:
await self.bot.say(
"{} does not have permission to edit {}’s roles.".format(
author.display_name, member.display_name))
except discord.HTTPException:
await self.bot.say("failed to add {}.".format(', '.join(role_names)))
await self.bot.say(mymessage)
welcomeMsg = rand_choice(self.welcome["GREETING"])
await self.bot.send_message(discord.Object(id='374596069989810178'), welcomeMsg.format(member, server))
await self._remove_roles(member, ['Guest'])
roleName = discord.utils.get(server.roles, name=role_names[0])
await self.bot.send_message(discord.Object(id='375839851955748874'),
"**{}** recruited **{} (#{})** to {}".format(ctx.message.author.display_name,
ign,
profiletag,
roleName.mention))
await asyncio.sleep(300)
await self.bot.send_message(member, rules_text)
await asyncio.sleep(300)
await self.bot.send_message(member, commands_text)
await asyncio.sleep(300)
await self.bot.send_message(member, info_text)
await asyncio.sleep(300)
await self.bot.send_message(member, cw_info)
await asyncio.sleep(300)
await self.bot.send_message(member, credits_info)
await asyncio.sleep(300)
await self.bot.send_message(member, coc_bs)
await asyncio.sleep(300)
await self.bot.send_message(member, esports_info)
await asyncio.sleep(300)
await self.bot.send_message(member, social_info)
else:
await self.bot.say("You must be accepted into a clan before I can give you clan roles. "
"Would you like me to check again in 2 minutes? (Yes/No)")
answer = await self.bot.wait_for_message(timeout=15, author=ctx.message.author)
if answer is None:
return
elif "yes" not in answer.content.lower():
return
await self.bot.say("Okay, I will retry this command in 2 minutes.")
await asyncio.sleep(120)
message = ctx.message
message.content = ctx.prefix + "newmember {}".format(member.mention)
await self.bot.process_commands(message)
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def waiting(self, ctx, member: discord.Member, clankey):
"""Add people to the waiting list for a clan"""
server = ctx.message.server
legendServer = ["374596069989810176"]
if server.id not in legendServer:
return await self.bot.say("This command can only be executed in the Legend Family Server")
clankey = clankey.lower()
try:
clan_tag = await self.clans.getClanData(clankey, 'tag')
clan_name = await self.clans.getClanData(clankey, 'name')
clan_pb = await self.clans.getClanData(clankey, 'personalbest')
clan_cwr = await self.clans.getClanData(clankey, 'cwr')
except KeyError:
return await self.bot.say("Please use a valid clanname: {}".format(await self.clans.namesClans()))
try:
await self.bot.type()
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
clandata = await self.clash.get_clan(clan_tag)
ign = profiledata.name
trophies = profiledata.trophies
cards = profiledata.cards
maxtrophies = profiledata.best_trophies
plyrLeagueCWR = await self.clanwarReadiness(cards)
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You must associate a tag with this member first using ``{}save #tag @member``".format(ctx.prefix))
if ((trophies < clandata.required_trophies) and (maxtrophies < clan_pb)):
return await self.bot.say("Cannot add you to the waiting list, you don't meet the trophy requirements.")
plyrCWRGood = True
for league in clan_cwr:
if clan_cwr[league] > 0:
if plyrLeagueCWR[league] < clan_cwr[league]:
plyrCWRGood = False
if (not plyrCWRGood):
return await self.bot.say("Cannot add you to the waiting lists, you don't meet the CW Readiness requirements.")
if not await self.clans.addWaitingMember(clankey, member.id):
return await self.bot.say("You are already in a waiting list for this clan.")
role = discord.utils.get(server.roles, name="Waiting")
try:
await self.bot.add_roles(member, role)
except discord.Forbidden:
raise
except discord.HTTPException:
raise
await self.bot.say(member.mention + " You have been added to the waiting list for **" +
clan_name +
"**. We will mention you when a spot is available.")
roleName = discord.utils.get(server.roles, name=await self.clans.getClanData(clankey, 'role'))
await self.bot.send_message(discord.Object(id='375839851955748874'), "**{} (#{})** added to the waiting list for {}".format(ign, profiletag, roleName.mention))
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def remove(self, ctx, member: discord.Member, clankey):
"""Delete people from the waiting list for a clan"""
server = ctx.message.server
legendServer = ["374596069989810176"]
if server.id not in legendServer:
return await self.bot.say("This command can only be executed in the Legend Family Server")
clankey = clankey.lower()
try:
clan_name = await self.clans.getClanData(clankey, 'name')
except KeyError:
return await self.bot.say("Please use a valid clanname: {}".format(await self.clans.namesClans()))
try:
await self.clans.delWaitingMember(clankey, member.id)
role = discord.utils.get(server.roles, name="Waiting")
try:
await self.bot.remove_roles(member, role)
except discord.Forbidden:
raise
except discord.HTTPException:
raise
await self.bot.say(member.mention + " has been removed from the waiting list for **" + clan_name + "**.")
except ValueError:
await self.bot.say("Recruit not found in the waiting list.")
@_clash.command(pass_context=True, no_pm=True, aliases=["waitlist", "wait"])
async def waitinglist(self, ctx):
"""Show status of the waiting list."""
message = ""
counterClans = 0
counterPlayers = 0
server = ctx.message.server
legendServer = ["374596069989810176"]
if server.id not in legendServer:
await self.bot.say("This command can only be executed in the Legend Family Server")
return
await self.bot.type()
embed = discord.Embed(color=0xFAA61A)
for clan in self.clans.keysClans():
if await self.clans.numWaiting(clan) > 0:
counterClans += 1
message = ""
for index, userID in enumerate(await self.clans.getClanData(clan, 'waiting')):
user = discord.utils.get(ctx.message.server.members, id=userID)
try:
message += str(index+1) + ". " + user.display_name + "\n"
counterPlayers += 1
except AttributeError:
await self.clans.delWaitingMember(clan, userID)
message += str(index+1) + ". " + "*user not found*" + "\n"
embed.add_field(name=await self.clans.getClanData(clan, 'name'), value=message, inline=False)
if not message:
await self.bot.say("The waiting list is empty")
else:
embed.description = "We have " + str(counterPlayers) + " people waiting for " + str(counterClans) + " clans."
embed.set_author(name="Legend Family Waiting List", icon_url="https://i.imgur.com/dtSMITE.jpg")
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.say(embed=embed)
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def changenick(self, ctx, member: discord.Member=None):
""" Change nickname of a user of their IGN + Clan"""
member = member or ctx.message.author
try:
await self.bot.type()
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
if profiledata.clan is None:
clantag = "none"
else:
clantag = profiledata.clan.tag.strip("#")
ign = profiledata.name
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You must associate a tag with this member first using ``{}save #tag @member``".format(ctx.prefix))
membership = await self.clans.verifyMembership(clantag)
if membership:
if ign is None:
await self.bot.say("Cannot find IGN.")
else:
try:
savekey = await self.clans.getClanKey(clantag)
newclanname = await self.clans.getClanData(savekey, 'nickname')
newname = ign + " | " + newclanname
await self.bot.change_nickname(member, newname)
except discord.HTTPException:
await self.bot.say("I don’t have permission to change nick for this user.")
else:
await self.bot.say("Nickname changed to ** {} **\n".format(newname))
else:
await self.bot.say("This command is only available for family members.")
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def changeclan(self, ctx, member: discord.Member=None):
""" Change clan of a user of their IGN + Clan"""
member = member or ctx.message.author
try:
await self.bot.type()
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
if profiledata.clan is None:
clantag = "none"
else:
clantag = profiledata.clan.tag.strip("#")
ign = profiledata.name
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You must associate a tag with this member first using ``{}save #tag @member``".format(ctx.prefix))
membership = await self.clans.verifyMembership(clantag)
if membership:
mymessage = ""
savekey = await self.clans.getClanKey(clantag)
rolesToRemove = await self.clans.rolesClans()
await self._remove_roles(member, rolesToRemove)
if ign is None:
await self.bot.say("Cannot find IGN.")
else:
try:
newclanname = await self.clans.getClanData(savekey, 'nickname')
newname = ign + " | " + newclanname
await self.bot.change_nickname(member, newname)
except discord.HTTPException:
await self.bot.say("I don’t have permission to change nick for this user.")
else:
mymessage += "Nickname changed to **{}**\n".format(newname)
role_names = [await self.clans.getClanData(savekey, 'role'), 'Member']
try:
await self._add_roles(member, role_names)
mymessage += "**" + await self.clans.getClanData(savekey, 'role') + "** and **Member** roles added."
except discord.Forbidden:
await self.bot.say(
"{} does not have permission to edit {}’s roles.".format(
member.display_name, member.display_name))
except discord.HTTPException:
await self.bot.say("failed to add {}.".format(', '.join(role_names)))
await self.bot.say(mymessage)
else:
await self.bot.say("This command is only available for family members.")
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def audit(self, ctx, clankey):
""" Check to see if your clan members are setup properly in discord."""
server = ctx.message.server
legendServer = ["374596069989810176"]
if server.id not in legendServer:
return await self.bot.say("This command can only be executed in the Legend Family Server")
clankey = clankey.lower()
try:
clan_tag = await self.clans.getClanData(clankey, 'tag')
clan_role = await self.clans.getClanData(clankey, 'role')
clan_name = await self.clans.getClanData(clankey, 'name')
clan_nickname = await self.clans.getClanData(clankey, 'nickname')
clan_role = await self.clans.getClanData(clankey, 'role')
except KeyError:
return await self.bot.say("Please use a valid clanname: {}".format(await self.clans.namesClans()))
await self.bot.type()
try:
clandata = await self.clash.get_clan(clan_tag)
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
await self.updateSeen()
cr_members_name = []
cr_members_tag = []
cr_members_trophy = []
for member in clandata.member_list:
cr_members_name.append(member.name)
cr_members_tag.append(member.tag.strip("#"))
cr_members_trophy.append(member.trophies)
role = discord.utils.get(server.roles, name=clan_role)
d_members = [m for m in server.members if role in m.roles]
d_members = sorted(d_members, key=lambda x: x.display_name.lower())
cr_members_with_no_player_tag = []
cr_members_with_less_trophies = []
d_members_with_no_player_tag = []
d_members_not_in_clan = []
d_members_without_role = []
d_members_without_name = []
d_members_inactive = []
cr_clanSettings = []
for d_member in d_members:
try:
player_tag = await self.tags.getTagCR(d_member.id)
if player_tag not in cr_members_tag:
d_members_not_in_clan.append(d_member.display_name)
try:
if self.seen[legendServer[0]][d_member.id]['TIMESTAMP'] < time.time() - 691200:
d_members_inactive.append(d_member.display_name)
except:
pass
except KeyError:
d_members_with_no_player_tag.append(d_member.display_name)
continue
for index, player_tag in enumerate(cr_members_tag):
try:
dc_member = await self.tags.getUserCR(server.members, player_tag)
if role not in dc_member.roles:
d_members_without_role.append(dc_member.display_name)
if (cr_members_name[index] not in dc_member.display_name) or (clan_nickname not in dc_member.display_name):
d_members_without_name.append(dc_member.display_name)
except AttributeError:
cr_members_with_no_player_tag.append(cr_members_name[index])
continue
clanReq = clandata.required_trophies
for index, player_trophy in enumerate(cr_members_trophy):
if player_trophy < clanReq:
cr_members_with_less_trophies.append(cr_members_name[index])
cr_clanSettings.append(clandata.badge_id == 16000002)
cr_clanSettings.append(clandata.location.name == "International")
cr_clanSettings.append("Legend Family🔥14 Clans🔥LegendClans.com🔥Events & Prizes🔥Apply at legendclans.com/discord🔥" in clandata.description)
cr_clanSettings.append(clandata.type != "closed")
message = ""
if False in cr_clanSettings:
message += "\n\n:warning: Problems in clan settings for **" + clan_name + "**:```"
if not cr_clanSettings[0]: message += "\n• Clan Badge is incorrect."
if not cr_clanSettings[1]: message += "\n• Clan Location is incorrect."
if not cr_clanSettings[2]: message += "\n• Clan description is incorrect."
if not cr_clanSettings[3]: message += "\n• Clan is closed."
message += "```\n\n"
if cr_members_with_no_player_tag:
message += ":warning: **({})** Players in **{}**, but have **NOT** joined discord: ```• ".format(len(cr_members_with_no_player_tag), clan_name)
message += "\n• ".join(cr_members_with_no_player_tag)
message += "```\n\n"
if d_members_with_no_player_tag:
message += ":warning: **({})** Players with **{}** role, but have **NO** tags saved: ```• ".format(len(d_members_with_no_player_tag), clan_name)
message += "\n• ".join(d_members_with_no_player_tag)
message += "```\n\n"
if d_members_not_in_clan:
message += ":warning: **({})** Players with **{}** role, but have **NOT** joined the clan: ```• ".format(len(d_members_not_in_clan), clan_name)
message += "\n• ".join(d_members_not_in_clan)
message += "```\n\n"
if d_members_without_role:
message += ":warning: **({})** Players in **{}**, but **DO NOT** have the clan role: ```• ".format(len(d_members_without_role), clan_name)
message += "\n• ".join(d_members_without_role)
message += "```\n\n"
if d_members_without_name:
message += ":warning: **({})** Players in **{}**, but have an **INCORRECT** nickname: ```• ".format(len(d_members_without_name), clan_name)
message += "\n• ".join(d_members_without_name)
message += "```\n\n"
if cr_members_with_less_trophies:
message += ":warning: **({})** Players in **{}**, but **DO NOT** meet the trophy requirements: ```• ".format(len(cr_members_with_less_trophies), clan_name)
message += "\n• ".join(cr_members_with_less_trophies)
message += "```\n\n"
if d_members_inactive:
message += ":warning: **({})** Players in **{}**, but **NOT** active on Discord: ```• ".format(len(d_members_inactive), clan_name)
message += "\n• ".join(d_members_inactive)
message += "```"
if message == "":
message += "Congratulations, your clan has no problems found so far. Kudos!"
await self.bot.say(message)
@commands.group(pass_context=True)
async def topmembers(self, ctx):
"""Base command for showing top members"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@topmembers.command(name="trophies")
async def topmembers_trophies(self, role: str=None):
"""Show Family Ladder LeaderBoard"""
number = 10
if number > 100:
await self.bot.say("Sorry! the number must be below 100.")
return
if "family" in self.settings:
familyname = self.settings['family']
else:
familyname = "Legend Family"
if role is None:
title = "{} leaderboard - Trophies".format(familyname)
else:
role = role.replace("-", "").strip('s').lower()
title = "{} {} leaderboard - Trophies".format(familyname, role.capitalize())
if role not in ["leader", "coleader", "elder", "member", None]:
return await self.bot.say("Invalid role! Please chose between: leader, coleader, and elder.")
embed = discord.Embed(color=0xFAA61A)
embed.set_author(name=title,
icon_url="https://i.imgur.com/dtSMITE.jpg")
await self.bot.type()
try:
if "url" in self.settings:
familyurl = '{}/members/datatable'.format(self.settings['url'])
allplayers = requests.get(familyurl, timeout=15).json()
else:
allplayers = requests.get('http://royaleapi.com/clan/family/legend/members/datatable', timeout=15).json()
except:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
players = dict(allplayers)
players['data'] = sorted(allplayers['data'], key=lambda x: x['family_rank_score'])
message = ""
amount = 0
for x in range(0, len(players['data'])):
clanrole = players['data'][x]['role'].replace("-", "").lower()
clantag = players['data'][x]['clan_tag']
for i in self.clans.keysClans():
if clantag == await self.clans.getClanData(i, 'tag'):
clanname = await self.clans.getClanData(i, 'nickname')
if role:
if role != clanrole:
continue
message += "``{} [{}]`` {} ({})\n".format((str(amount + 1) + ".").ljust(3),
players['data'][x]['trophies'],
players['data'][x]['name'],
clanname)
amount += 1
if amount == number:
break
embed.description = message
await self.bot.say(embed=embed)
@topmembers.command(name="donations")
async def topmembers_donations(self, role: str=None):
"""Show Family Donations LeaderBoard"""
number = 10
if number > 100:
return await self.bot.say("Sorry! the number must be below 100.")
if "family" in self.settings:
familyname = self.settings['family']
else:
familyname = "Legend Family"
if role is None:
title = "{} leaderboard - Donations".format(familyname)
else:
role = role.replace("-", "").strip('s').lower()
title = "{} {} leaderboard - Donations".format(familyname, role.capitalize())
if role not in ["leader", "coleader", "elder", "member", None]:
return await self.bot.say("Invalid role! Please chose between: leader, coleader, and elder.")
embed = discord.Embed(color=0xFAA61A)
embed.set_author(name=title,
icon_url="https://i.imgur.com/dtSMITE.jpg")
await self.bot.type()
try:
if "url" in self.settings:
familyurl = '{}/members/datatable'.format(self.settings['url'])
allplayers = requests.get(familyurl, timeout=15).json()
else:
allplayers = requests.get('http://royaleapi.com/clan/family/legend/members/datatable', timeout=15).json()
except:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
players = dict(allplayers)
players['data'] = sorted(allplayers['data'], key=lambda x: x['family_rank_donations'])
message = ""
amount = 0
for x in range(0, len(players['data'])):
clanrole = players['data'][x]['role'].replace("-", "").lower()
clantag = players['data'][x]['clan_tag']
for i in self.clans.keysClans():
if clantag == await self.clans.getClanData(i, 'tag'):
clanname = await self.clans.getClanData(i, 'nickname')
if role:
if role != clanrole:
continue
message += "``{} [{}]`` {} ({})\n".format((str(amount + 1) + ".").ljust(3),
players['data'][x]['donations'],
players['data'][x]['name'],
clanname)
amount += 1
if amount == number:
break
embed.description = message
await self.bot.say(embed=embed)
@commands.command()
async def topclans(self):
"""Show top 10 international clans"""
await self.bot.type()
try:
topclans = (await self.clash.get_top_clans('57000006')).get("items")
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
msg = ""
for x in range(10):
msg += "``" + str(topclans[x].rank).zfill(3) + "." + "`` " + topclans[x].name + "\n"
for i in range(10, len(topclans)):
for j in self.clans.keysClans():
if topclans[i].tag.strip("#") == await self.clans.getClanData(j, 'tag'):
msg += "``" + str(topclans[i].rank).zfill(3) + "." + "`` " + topclans[i].name + "\n"
embed = discord.Embed(description=msg, color=0xFAA61A)
embed.set_author(name="Local International Leaderboard",
url="http://royaleapi.com/top/clans/_int",
icon_url="https://i.imgur.com/dtSMITE.jpg")
embed.set_footer(text=credits, icon_url=creditIcon)
await self.bot.say(embed=embed)
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def guest(self, ctx, member: discord.Member):
"""Add guest role and change nickname to CR"""
server = ctx.message.server
legendServer = ["374596069989810176"]
if server.id not in legendServer:
return await self.bot.say("This command can only be executed in the Legend Family Server")
try:
await self.bot.type()
profiletag = await self.tags.getTagCR(member.id)
profiledata = await self.clash.get_player(profiletag)
ign = profiledata.name
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
except KeyError:
return await self.bot.say("You must associate a tag with this member first using ``{}save #tag @member``".format(ctx.prefix))
try:
newname = ign + " | Guest"
await self.bot.change_nickname(member, newname)
except discord.HTTPException:
return await self.bot.say("I don’t have permission to change nick for this user.")
role = discord.utils.get(server.roles, name="Guest")
try:
await self.bot.send_message(member, guest_rules)
await self.bot.say("{} Role Added to {}".format(role.name, member.display_name))
except discord.errors.Forbidden:
return await self.bot.say("Command failed, {} please fix your privacy settings, we are unable to send you Guest Rules.".format(member.mention))
try:
await self.bot.add_roles(member, role)
except discord.Forbidden:
raise
except discord.HTTPException:
raise
@_clash.command(pass_context=True, no_pm=True)
@commands.has_any_role(*BOTCOMMANDER_ROLES)
async def inactive(self, ctx, member: discord.Member):
"""Use this command after kicking people from clan"""
server = ctx.message.server
legendServer = ["374596069989810176"]
if server.id not in legendServer:
return await self.bot.say("This command can only be executed in the Legend Family Server")
rolesToRemove = await self.clans.rolesClans()
rolesToRemove += ["Bait", "Siege", "Cycle", "Control",
"Beatdown", "Tournaments", "Giveaways"]
await self._remove_roles(member, rolesToRemove)
await self.bot.change_nickname(member, None)
await self.bot.say("Member and clan roles removed.\nNickname has been reset.")
@commands.command()
async def gmt(self):
"""Get the currect GMT time"""
await self.bot.say(datetime.datetime.now(datetime.timezone.utc).strftime("%H:%M GMT"))
@commands.command(pass_context=True, no_pm=True)
async def cwstats(self, ctx, tag):
"""Tournament/Clanwar Statistics generator"""
await self.bot.type()
tag = await self.tags.formatTag(tag)
if not await self.tags.verifyTag(tag):
return await self.bot.say("The ID you provided has invalid characters. Please try again.")
try:
tourney = await self.clash.get_tournament(tag)
except clashroyale.NotFoundError:
return await self.bot.say("Error: Tournament not found. Please double check your #TAG")
except clashroyale.RequestError:
return await self.bot.say("Error: cannot reach Clash Royale Servers. Please try again later.")
clanwar_dict = {}
for member in tourney.members_list:
tourney_score = member.score
if not hasattr(member, 'clan'):
tourney_clan = "OTHERS"
else:
tourney_clan = member.clan.name
if tourney_clan not in clanwar_dict:
clanwar_dict[tourney_clan] = {}
clanwar_dict[tourney_clan]['score'] = 0
clanwar_dict[tourney_clan]['participants'] = 0
clanwar_dict[tourney_clan]['score'] += tourney_score
clanwar_dict[tourney_clan]['participants'] += 1
message = "\n**{}**```{}\t{}\t{}\n".format(tourney.name, "CLAN".ljust(17), "SCORE".ljust(9), "PARTICIPANTS")
clanwar_dict = OrderedDict(sorted(clanwar_dict.items(), key=lambda x: x[1]['score'], reverse=True))
for x in clanwar_dict:
message += "{}\t{}\t{}\n".format(x.ljust(17), str(clanwar_dict[x]['score']).ljust(9), clanwar_dict[x]['participants'])
message += "```"
await self.bot.say(message)
def check_folders():
if not os.path.exists("data/legend"):
print("Creating data/legend folder...")
os.makedirs("data/legend")
if not os.path.exists("data/seen"):
print("Creating data/seen folder...")
os.makedirs("data/seen")
def check_files():
f = "data/legend/settings.json"
if not fileIO(f, "check"):
print("Creating empty settings.json...")
fileIO(f, "save", {})
f = "data/seen/seen.json"
if not fileIO(f, "check"):
print("Creating empty seen.json...")
fileIO(f, "save", {})
def setup(bot):
check_folders()
check_files()
bot.add_cog(legend(bot))
|
Gr8z/Legend-Cogs
|
legend/legend.py
|
Python
|
mit
| 66,830
|
[
"VisIt"
] |
ec4d11acd2fcdb1929f4e3245b8d8676be895df4a2aa97f4668d2b5480f49ffc
|
import numpy as np
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
import pickle
from TheCannon_2 import dataset,apogee
from TheCannon_2 import model
pkl_file = open('wl.pkl', 'rb')
wl = pickle.load(pkl_file)
pkl_file.close()
# load path
pkl_file = open('n_900_path_fits.pkl', 'rb')
path_fits = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open('n_900_path_flux.pkl', 'rb')
path_flux = pickle.load(pkl_file)
pkl_file.close()
# mean_ivar
pkl_file = open('n_900_mean_ivar.pkl', 'rb')
mi = pickle.load(pkl_file)
pkl_file.close()
N = len(path_fits)
print(N)
class plot():
def read_data(self):
N = len(path_fits)
print(N)
velocity = []
velocity_new = []
fiber_id = []
mean_ivar = []
parameters = np.array([0,1,0])
parameters_new = np.array([0,1,0])
inf_label = []
dchi = []
MJD = []
HJD = []
meanivar = []
RA = []
DEC = []
SNR = []
airmass = []
# star name and the number of visit
# dimension N*2
star_visit = []
star_name = []
for i in range(0, N):
print("loading star %d" % (i + 1))
star_name_i = path_fits[i]
star_i = fits.open(path_fits[i])
ni = len(star_i[4].data[:, 0])
# mean ivar
one = np.ones(ni - 2)
for si in range(0,ni-2):
star_name = np.append(star_name, star_name_i)
star_visit.append(si)
meanivar = np.append(meanivar, one * mi[i])
dchi = np.append(dchi, star_i[6].data[2:ni])
# SNR RA DEC
SNR = np.append(SNR, (star_i[0].header["SNR"] * one))
RA = np.append(RA, (star_i[0].header["RA"] * one))
DEC = np.append(DEC, (star_i[0].header["DEC"] * one))
velocity = np.append(velocity, star_i[10].data[2:ni, 0])
velocity_new = np.append(velocity_new, star_i[15].data[2:ni, 0])
fiber_id = np.append(fiber_id, star_i[7].data)
mean_ivar.append(np.mean(star_i[1].data[0]))
parameters = np.vstack((parameters,star_i[4].data[2:ni,0:3]))
parameters_new = np.vstack((parameters_new, star_i[14].data[2:ni, 0:3]))
MJD = np.append(MJD, star_i[11].data)
HJD = np.append(HJD,star_i[16].data)
airmass = np.append(airmass,star_i[17].data)
print(star_i[4].data[:, 0].shape, star_i[0].data.shape, star_i[11].data.shape, star_i[12].data.shape)
print(star_i[11].data)
self.path_fits = path_fits
velocity = np.array(velocity)
self.velocity = velocity
velocity_new = np.array(velocity_new)
self.velocity_new = velocity_new
fiber_id = np.array(fiber_id)
self.fiber_id = fiber_id
na = len(parameters[:,0])
parameters = parameters[1:na,:]
self.parameters = parameters
parameters_new = parameters_new[1:na,:]
self.parameters_new = parameters_new
dchi = np.array(dchi)
self.dchi = dchi
MJD = np.array(MJD)
self.MJD = MJD
HJD = np.array(HJD)
self.HJD = HJD
airmass = np.array(airmass)
self.airmass = airmass
SNR = np.array(SNR)
self.SNR = SNR
RA = np.array(RA)
self.RA = RA
DEC = np.array(DEC)
self.DEC = DEC
meanivar = np.array(meanivar)
self.meanivar = meanivar
star_name =np.array(star_name)
self.star_name = star_name
star_visit = np.array(star_visit)
self.star_visit = star_visit
print("star name shape")
print(star_name.shape,star_visit.shape)
# check shape
print(MJD.shape, SNR.shape, RA.shape, DEC.shape)
# give values:
def plot_continuum_pixel_single_star(self,flux, ivar):
# obtain contmask
tr_ID = "biggest_c_a"
test_labels_all_i = ["Teff", "Logg", "Fe/H"]
ds = dataset.Dataset(wl, tr_ID, flux, ivar,
test_labels_all_i, tr_ID, flux, ivar)
ds.ranges = [[371, 3192], [3697, 5997], [6461, 8255]]
# set sudo-continuous spectrum
pseudo_tr_flux, pseudo_tr_ivar = ds.continuum_normalize_training_q \
(q=0.90, delta_lambda=50)
# set mask
contmask = ds.make_contmask(pseudo_tr_flux, pseudo_tr_ivar, frac=0.07)
# get continuous mask
ds.set_continuum(contmask)
# fit the normalized-spectrum in the continuous region
cont = ds.fit_continuum(3, "sinusoid")
# Obtain the normalized flux
norm_tr_flux, norm_tr_ivar, norm_test_flux, norm_test_ivar = \
ds.continuum_normalize(cont)
## diagnostic
# contmask is bool
contmask = np.array(contmask)
N = len(flux[:, 0])
name = ["combined spectrum", "combined spectrum"]
for j in range(0, N - 2):
name.append("individual visit")
plt.figure()
for i in range(N):
plt.step(wl, flux[i] + 0.5 * i, "k",label=name[i], linewidth=0.5)
plt.plot(wl, (flux[i] + 0.5 * i) * contmask, "ro", label=name[i], markersize=1.5)
# plt.errorbar(wl,flux[i] + 0.3 * i, ecolor='k', alpha=0.02, capthick=0.2, yerr=ivar[i]**(-0.5))
axes = plt.gca()
#axes.set_xlim([15660, 15780])
#axes.set_xlim([16160,16280])
axes.set_ylim([0.5, 1 + 0.5 * N])
# axes.set_yticks(np.arange(0.8,1.21,0.1))
plt.xlabel("Wave length $\AA$", fontsize=20)
plt.ylabel("Flux", fontsize=20)
plt.title("The fluxes of one star", fontsize=20)
plt.show()
def histogram_2_2_rv_abc(self,RV,a,b,c):
font = {'weight': 'bold', 'size': 15}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2), (ax3, ax4)) = \
plt.subplots(2, 2)
colors = ["cyan",'b', 'g', 'r']
name = ["RV shifts","a", "b", "c"]
# histogram of rv
#ax1
ax1.hist(RV, bins=40, color=colors[0], label=name[0])
#ax1.set_title('Histogram of Radial velocity shifts', fontsize=30)
ax1.set_xlabel('values of radial velocity shifts $m/s$', fontsize=15)
ax1.set_ylabel('Number', fontsize=15)
ax1.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of a
#ax2
ax2.hist(a, bins=40, color=colors[1], label=name[1])
#ax2.set_title('Histogram of parameter a', fontsize=30)
ax2.set_xlabel('values of parameter a', fontsize=15)
ax2.set_ylabel('Number', fontsize=15)
ax2.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of b
#ax3
ax3.hist(b, bins=40, color=colors[2], label=name[2])
ax3.legend(prop={'size': 15})
#ax3.set_title('Histogram of paramete b', fontsize=30)
ax3.set_xlabel("values of parameter b", fontsize=15)
ax3.set_ylabel('Number', fontsize=15)
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of c
#ax4
ax4.hist(c, bins=40, color=colors[3], label=name[3])
ax4.legend(prop={'size': 15})
#ax4.set_title('Histogram of parameter c', fontsize=30)
ax4.set_xlabel("values of parameter c", fontsize=15)
ax4.set_ylabel('Number', fontsize=15)
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
f.suptitle("Histogram of RV shifts, a, b and c by using the absorption line")
#f.suptitle("Histogram of RV shifts, a, b and c by using the absorption lines")
plt.show()
# RV vs HJD RA DEC Fiber Airmass
def ve_subplot_5(self,velocity,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('RV shifts vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('RV shifts $m/s$', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-6000,8000])
ax1.set_yticks(np.arange(-6000,8001,3500))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('RV shifts vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('RV shifts $m/s$', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-6000, 8000])
ax2.set_yticks(np.arange(-6000, 8001, 3500))
#ax3
ax3.scatter(DEC, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('RV shifts vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-6000, 8000])
ax3.set_yticks(np.arange(-6000, 8001, 3500))
#ax3.set_position([0, 0, 0.4, 0.4])
#ax4
ax4.scatter(Fiber, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('RV shifts vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('RV shifts $m/s$', fontsize=20)
ax4.set_ylim([-6000, 8000])
ax4.set_yticks(np.arange(-6000, 8001, 3500))
#ax4.set_position([0.5,0, 0.4, 0.4])
#ax5
ax5.scatter(airmass, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('RV shifts vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-6000, 8000])
ax5.set_yticks(np.arange(-6000, 8001, 3500))
#ax6
ax6.scatter(SNR, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('RV shifts vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
#ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-6000, 8000])
ax6.set_yticks(np.arange(-6000, 8001, 3500))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("RV shifts from the whole spectrum vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
# RV vs HJD RA DEC Fiber Airmass new
def ve_new_subplot_5(self,velocity,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('RV shifts vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('RV shifts $m/s$', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-6000, 8000])
ax1.set_yticks(np.arange(-6000, 8001, 3500))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('RV shifts vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('RV shifts $m/s$', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-6000, 8000])
ax2.set_yticks(np.arange(-6000, 8001, 3500))
#ax3
ax3.scatter(DEC, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('RV shifts vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-6000, 8000])
ax3.set_yticks(np.arange(-6000, 8001, 3500))
#ax4
ax4.scatter(Fiber, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('RV shifts vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('RV shifts $m/s$', fontsize=20)
ax4.set_ylim([-6000, 8000])
ax4.set_yticks(np.arange(-6000, 8001, 3500))
#ax5
ax5.scatter(airmass, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('RV shifts vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-6000, 8000])
ax5.set_yticks(np.arange(-6000, 8001, 3500))
#ax6
ax6.scatter(SNR, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('RV shifts vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
ax6.set_ylim([-6000, 8000])
ax6.set_yticks(np.arange(-6000, 8001, 3500))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, velocity, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("RV shifts from the absorption line vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
## a vs them
# RV vs HJD RA DEC Fiber Airmass
def a_subplot_5(self,a,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('a vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('a', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-2,3])
ax1.set_yticks(np.arange(-2,3.1,1))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('a vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('a', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-2,3])
ax2.set_yticks(np.arange(-2,3.1,1))
#ax3
ax3.scatter(DEC, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('a vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-2,3])
ax3.set_yticks(np.arange(-2,3.1,1))
#ax3.set_position([0, 0, 0.4, 0.4])
#ax4
ax4.scatter(Fiber, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('a vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('a', fontsize=20)
ax4.set_ylim([-2,3])
ax4.set_yticks(np.arange(-2,3.1,1))
#ax4.set_position([0.5,0, 0.4, 0.4])
#ax5
ax5.scatter(airmass, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('a vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-2,3])
ax5.set_yticks(np.arange(-2,3.1,1))
#ax6
ax6.scatter(SNR, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('a vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
#ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-2,3])
ax6.set_yticks(np.arange(-2,3.1,1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter a from the whole spectrum vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
# RV vs HJD RA DEC Fiber Airmass new
# RV vs HJD RA DEC Fiber Airmass
def a_new_subplot_5(self,a,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('a vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('a', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-2,3])
ax1.set_yticks(np.arange(-2,3.1,1))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('a vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('a', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-2,3])
ax2.set_yticks(np.arange(-2,3.1,1))
#ax3
ax3.scatter(DEC, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('a vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-2,3])
ax3.set_yticks(np.arange(-2,3.1,1))
#ax3.set_position([0, 0, 0.4, 0.4])
#ax4
ax4.scatter(Fiber, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('a vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('a', fontsize=20)
ax4.set_ylim([-2,3])
ax4.set_yticks(np.arange(-2,3.1,1))
#ax4.set_position([0.5,0, 0.4, 0.4])
#ax5
ax5.scatter(airmass, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('a vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-2,3])
ax5.set_yticks(np.arange(-2,3.1,1))
#ax6
ax6.scatter(SNR, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('a vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
#ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-2,3])
ax6.set_yticks(np.arange(-2,3.1,1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter a from the absorption line vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
## a vs them
# RV vs HJD RA DEC Fiber Airmass
def a_subplot_5(self, a, HJD, Fiber, RA, DEC, airmass, mean_ivar, SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
# ax1
ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('a vs HJD', fontsize=24, y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('a', fontsize=20)
# add vertical line:
# ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-2, 3])
ax1.set_yticks(np.arange(-2, 3.1, 1))
# ax1.set_position([0,0.6,0.4,0.4])
# ax2
ax2.scatter(RA, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('a vs RA', fontsize=24, y=0.85)
# add vertical line:
# ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
# ax2.set_ylabel('a', fontsize=20)
# ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-2, 3])
ax2.set_yticks(np.arange(-2, 3.1, 1))
# ax3
ax3.scatter(DEC, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('a vs DEC', fontsize=24, y=0.85)
# add vertical line:
# ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
# ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-2, 3])
ax3.set_yticks(np.arange(-2, 3.1, 1))
# ax3.set_position([0, 0, 0.4, 0.4])
# ax4
ax4.scatter(Fiber, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('a vs FiberID', fontsize=24, y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
# ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('a', fontsize=20)
ax4.set_ylim([-2, 3])
ax4.set_yticks(np.arange(-2, 3.1, 1))
# ax4.set_position([0.5,0, 0.4, 0.4])
# ax5
ax5.scatter(airmass, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('a vs air mass', fontsize=24, y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
# ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
# ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-2, 3])
ax5.set_yticks(np.arange(-2, 3.1, 1))
# ax6
ax6.scatter(SNR, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('a vs SNR', fontsize=24, y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1, color="k", alpha=0.5)
# ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
# ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-2, 3])
ax6.set_yticks(np.arange(-2, 3.1, 1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter a from the whole spectrum vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
# RV vs HJD RA DEC Fiber Airmass new
# RV vs HJD RA DEC Fiber Airmass
def a_new_subplot_5(self, a, HJD, Fiber, RA, DEC, airmass, mean_ivar, SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
# ax1
ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('a vs HJD', fontsize=24, y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('a', fontsize=20)
# add vertical line:
# ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-2, 3])
ax1.set_yticks(np.arange(-2, 3.1, 1))
# ax1.set_position([0,0.6,0.4,0.4])
# ax2
ax2.scatter(RA, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('a vs RA', fontsize=24, y=0.85)
# add vertical line:
# ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
# ax2.set_ylabel('a', fontsize=20)
# ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-2, 3])
ax2.set_yticks(np.arange(-2, 3.1, 1))
# ax3
ax3.scatter(DEC, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('a vs DEC', fontsize=24, y=0.85)
# add vertical line:
# ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
# ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-2, 3])
ax3.set_yticks(np.arange(-2, 3.1, 1))
# ax3.set_position([0, 0, 0.4, 0.4])
# ax4
ax4.scatter(Fiber, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('a vs FiberID', fontsize=24, y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
# ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('a', fontsize=20)
ax4.set_ylim([-2, 3])
ax4.set_yticks(np.arange(-2, 3.1, 1))
# ax4.set_position([0.5,0, 0.4, 0.4])
# ax5
ax5.scatter(airmass, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('a vs air mass', fontsize=24, y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
# ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
# ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-2, 3])
ax5.set_yticks(np.arange(-2, 3.1, 1))
# ax6
ax6.scatter(SNR, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('a vs SNR', fontsize=24, y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1, color="k", alpha=0.5)
# ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
# ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-2, 3])
ax6.set_yticks(np.arange(-2, 3.1, 1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, a, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter a from the absorption line vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
## b vs them
# RV vs HJD RA DEC Fiber Airmass
def b_subplot_5(self,b,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('b vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('b', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-3,4])
ax1.set_yticks(np.arange(-3,4.1,1))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('b vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('a', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-3,4])
ax2.set_yticks(np.arange(-3,4.1,1))
#ax3
ax3.scatter(DEC, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('b vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-3,4])
ax3.set_yticks(np.arange(-3,4.1,1))
#ax3.set_position([0, 0, 0.4, 0.4])
#ax4
ax4.scatter(Fiber, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('b vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('b', fontsize=20)
ax4.set_ylim([-3,4])
ax4.set_yticks(np.arange(-3,4.1,1))
#ax4.set_position([0.5,0, 0.4, 0.4])
#ax5
ax5.scatter(airmass, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('b vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-3,4])
ax5.set_yticks(np.arange(-3,4.1,1))
#ax6
ax6.scatter(SNR, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('b vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
#ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-3,4])
ax6.set_yticks(np.arange(-3,4.1,1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter b from the whole spectrum vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
# RV vs HJD RA DEC Fiber Airmass
def b_new_subplot_5(self,b,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('b vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('b', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-3,4])
ax1.set_yticks(np.arange(-3,4.1,1))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('b vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('a', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-3,4])
ax2.set_yticks(np.arange(-3,4.1,1))
#ax3
ax3.scatter(DEC, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('b vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-3,4])
ax3.set_yticks(np.arange(-3,4.1,1))
#ax3.set_position([0, 0, 0.4, 0.4])
#ax4
ax4.scatter(Fiber, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('b vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('b', fontsize=20)
ax4.set_ylim([-3,4])
ax4.set_yticks(np.arange(-3,4.1,1))
#ax4.set_position([0.5,0, 0.4, 0.4])
#ax5
ax5.scatter(airmass, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('b vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-3,4])
ax5.set_yticks(np.arange(-3,4.1,1))
#ax6
ax6.scatter(SNR, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('b vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
#ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-3,4])
ax6.set_yticks(np.arange(-3,4.1,1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, b, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter b from the absorption line vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
## c vs them
# RV vs HJD RA DEC Fiber Airmass
def c_subplot_5(self,c,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('c vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('c', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-2,3])
ax1.set_yticks(np.arange(-2,3.1,1))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('c vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('a', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-2,3])
ax2.set_yticks(np.arange(-2,3.1,1))
#ax3
ax3.scatter(DEC, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('c vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-2,3])
ax3.set_yticks(np.arange(-2,3.1,1))
#ax3.set_position([0, 0, 0.4, 0.4])
#ax4
ax4.scatter(Fiber, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('c vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('c', fontsize=20)
ax4.set_ylim([-2,3])
ax4.set_yticks(np.arange(-2,3.1,1))
#ax4.set_position([0.5,0, 0.4, 0.4])
#ax5
ax5.scatter(airmass, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('c vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-2,3])
ax5.set_yticks(np.arange(-2,3.1,1))
#ax6
ax6.scatter(SNR, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('c vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
#ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-2,3])
ax6.set_yticks(np.arange(-2,3.1,1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter c from the whole spectrum vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
# RV vs HJD RA DEC Fiber Airmass
def c_new_subplot_5(self,c,HJD,Fiber,RA,DEC,airmass,mean_ivar,SNR):
font = {'weight': 'bold', 'size': 13}
matplotlib.rc('font', **font)
fig = plt.figure()
f, ((ax1, ax2,ax3), (ax4, ax5,ax6)) = \
plt.subplots(2, 3)
alpha = 0.3
#ax1
ax1.scatter(HJD, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax1.set_title('c vs HJD', fontsize=24,y=0.85)
ax1.set_xlabel('HJD', fontsize=20)
ax1.set_ylabel('c', fontsize=20)
# add vertical line:
#ax1.plot((np.min(HJD),np.max(HJD)), (0,0), 'k-', linewidth=1)
ax1.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax1.set_ylim([-2,3])
ax1.set_yticks(np.arange(-2,3.1,1))
#ax1.set_position([0,0.6,0.4,0.4])
#ax2
ax2.scatter(RA, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax2.set_title('c vs RA', fontsize=24,y=0.85)
# add vertical line:
#ax2.plot((np.min(RA),np.max(RA)), (0,0), 'k-', linewidth=1)
ax2.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax2.set_xlabel('RA', fontsize=20)
#ax2.set_ylabel('a', fontsize=20)
#ax2.set_position([0.5, 0.6, 0.4, 0.4])
ax2.set_ylim([-2,3])
ax2.set_yticks(np.arange(-2,3.1,1))
#ax3
ax3.scatter(DEC, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax3.set_title('c vs DEC', fontsize=24,y=0.85)
# add vertical line:
#ax3.plot((np.min(DEC),np.max(DEC)), (0,0), 'k-', linewidth=1)
ax3.axhline(y=0, linewidth=1, color="k", alpha=0.5)
ax3.set_xlabel('DEC', fontsize=20)
#ax3.set_ylabel('RV shifts $m/s$', fontsize=20)
ax3.set_ylim([-2,3])
ax3.set_yticks(np.arange(-2,3.1,1))
#ax3.set_position([0, 0, 0.4, 0.4])
#ax4
ax4.scatter(Fiber, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax4.set_title('c vs FiberID', fontsize=24,y=0.85)
# add vertical line:
ax4.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax4.plot((np.min(Fiber),np.max(Fiber)), (0,0), 'k-', linewidth=1)
ax4.set_xlabel('FIberID', fontsize=20)
ax4.set_ylabel('c', fontsize=20)
ax4.set_ylim([-2,3])
ax4.set_yticks(np.arange(-2,3.1,1))
#ax4.set_position([0.5,0, 0.4, 0.4])
#ax5
ax5.scatter(airmass, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax5.set_title('c vs air mass', fontsize=24,y=0.85)
# add vertical line:
ax5.axhline(y=0, linewidth=1, color="k", alpha=0.5)
#ax5.plot((np.min(airmass),np.max(airmass)), (0,0), 'k-', linewidth=1)
ax5.set_xlabel('FIberID', fontsize=20)
#ax5.set_ylabel('RV shifts $m/s$', fontsize=20)
ax5.set_ylim([-2,3])
ax5.set_yticks(np.arange(-2,3.1,1))
#ax6
ax6.scatter(SNR, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
ax6.set_title('c vs SNR', fontsize=24,y=0.85)
# add vertical line:
ax6.axhline(y=0, linewidth=1,color="k",alpha=0.5)
#ax6.plot((np.min(SNR),np.max(SNR)), (0,0), )
ax6.set_xlabel('SNR', fontsize=20)
#ax6.set_ylabel('RV shifts $m/s$', fontsize=20)
ax6.set_ylim([-2,3])
ax6.set_yticks(np.arange(-2,3.1,1))
f.subplots_adjust(right=0.8)
pl = ax1.scatter(HJD, c, marker='x', c=mean_ivar,
vmin=10000, vmax=40000, alpha=alpha)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Mean inverse variance", fontsize=20)
f.suptitle("Parameter c from the absorption line vs HJD, RA, DEC, FiberID, Airmass and SNR", fontsize=30)
plt.show()
def old_vs_new(self,RV,RV_new,parameters,parameters_new):
font = {'weight': 'bold', 'size': 20}
matplotlib.rc('font', **font)
f, ((ax1, ax2), (ax3, ax4)) = \
plt.subplots(2, 2)
"""
a = parameters_new[:,0]
b = parameters_new[:,1]
c = parameters_new[:,2]
RV_n = (c-a)/(a+b+c)*4144.68
"""
#rv
ax1.plot(RV,RV_new,"ro",label="RV shifts", markersize=3)
ax1.plot(RV,RV,"k-")
ax1.set_xlabel("RV shifts from the whole spectrum $m/s$", fontsize=12)
ax1.set_ylabel("RV shifts from the absorption line $m/s$", fontsize=12)
#a
ax2.plot(parameters[:,0],parameters_new[:,0],"ro",label="Parameter a",markersize=3)
ax2.plot(parameters[:,0],parameters[:,0], "k-")
ax2.set_xlabel("Parameter a from the whole spectrum", fontsize=12)
ax2.set_ylabel("Parameter a from the absorption line", fontsize=12)
#b
ax3.plot(parameters[:,1],parameters_new[:,1],"ro",label="Parameter b", markersize=3)
ax3.plot(parameters[:,1],parameters[:,1], "k-")
ax3.set_xlabel("Parameter b from the whole spectrum", fontsize=12)
ax3.set_ylabel("Parameter b from the absorption line", fontsize=12)
#c
ax4.plot(parameters[:,2],parameters_new[:,2],"ro",label="Parameter c", markersize=3)
ax4.plot(parameters[:, 2], parameters[:, 2], "k-")
ax4.set_xlabel("Parameter c from the whole spectrum", fontsize=12)
ax4.set_ylabel("Parameter c from the absorption line", fontsize=12)
f.suptitle("Comparison of the old and the new method", fontsize=20)
plt.show()
def plot_single_star_mask_result(self,path):
star = fits.open(path)
N = len(star[0].data[:,0])
name = str(path).replace(".fits","")
name = name.replace("/Users/caojunzhi/Desktop/Data/n_3_suspect/","")
mask = star[13].data
parameters = star[4].data
a = parameters[:,0]
b = parameters[:, 1]
c = parameters[:, 2]
parameters_new = star[14].data
velocity = (c-a)/(a+b+c)*4144.68
velocity_new = star[15].data[:,0]
flux = star[0].data
inf_flux = star[2].data
flux_m = flux * mask
# only choose individual visit
# plot:
for i in range(2,N):
#mask
plt.subplot(N-2, 2, 2*i-3)
plt.step(wl, flux[i], "k", label = "Data flux", linewidth=0.7, alpha=1)
plt.plot(wl, flux_m[i], "ro", label="The absorption line",markersize = 1, alpha=0.5)
plt.ylabel("Flux", fontsize=20)
axes = plt.gca()
axes.set_xlim([15660, 15780])
# axes.set_xlim([16160,16280])
axes.set_ylim([0.5, 1.5])
# inf
plt.subplot(N-2,2,2*i-2)
plt.step(wl, flux[i], "k", label="From the whole spectrum RV=%.2f $m/s$ a=%.2f b=%.2f c=%.2f" % (velocity[i],parameters[i,0],parameters[i,1],parameters[i,2]), linewidth=0.7, alpha=1)
plt.plot(wl, inf_flux[i], "b", label="From the absorption line RV=%.2f $m/s$ a=%.2f b=%.2f c=%.2f" % (velocity_new[i],parameters_new[i,0],parameters_new[i,1],parameters_new[i,2]),linewidth=0.7, alpha=0.5)
# plt.errorbar(wl,flux[i], ecolor='k', alpha=0.02, capthick=0.2, yerr=ivar[i]**(-0.5))
axes = plt.gca()
axes.set_xlim([15660, 15780])
# axes.set_xlim([16160,16280])
axes.set_ylim([0.5, 1.5])
# axes.set_yticks(np.arange(0.8,1.21,0.1))
# plt.xlabel("Wave length $\AA$", fontsize=20)
plt.ylabel("Flux", fontsize=20)
if i==0:
plt.title("The fluxes of individual visits for %s from the APOGEE team"%name, fontsize=20)
else:
nm=1
plt.legend()
plt.suptitle("The fitting result of individual visits for %s"%name,fontsize = 20)
plt.show()
def choose_four_biggest_RV_for_new_method(self):
# return index
N = len(self.velocity_new)
index = self.velocity_new.argsort()[N-8:N-4]
print(self.velocity_new[index])
# from small to big
print(index)
return index
def choose_four_biggest_delta_rv(self):
N = len(self.velocity_new)
index = abs(self.velocity-self.velocity_new).argsort()[N-4:N]
print(index)
return index
def plot_visit_mask_result(self,index):
# only choose individual visits:
N = len(index)
for i in range(0,N):
# mask
plt.subplot(N,2,2*i+1)
star = fits.open(self.star_name[index[i]])
name = str(self.star_name[index[i]]).replace(".fits", "")
name = name.replace("/Users/caojunzhi/Desktop/Data/n_900/", "")
ind = self.star_visit[index[i]]
flux = star[0].data[ind+2,:]
inf = star[2].data[ind + 2, :]
mask = star[13].data[ind+2,:]
plt.step(wl, flux, "k", label="One visit of star %s"%name, linewidth=0.7, alpha=1)
plt.plot(wl, flux*mask, "ro", label="The absorption line", markersize=1, alpha=0.5)
plt.ylabel("Flux", fontsize=20)
axes = plt.gca()
axes.set_xlim([15660, 15780])
# axes.set_xlim([16160,16280])
axes.set_ylim([0.5, 1.5])
plt.legend()
# inf
plt.subplot(N,2,2*i+2)
plt.step(wl, flux, "k", label="From the whole spectrum RV=%.2f $m/s$ a=%.2f b=%.2f c=%.2f" % (
self.velocity[index[i]], self.parameters[index[i],0], self.parameters[index[i],1], self.parameters[index[i],2]), linewidth=0.7, alpha=1)
plt.plot(wl, inf, "b", label="From the absorption line RV=%.2f $m/s$ a=%.2f b=%.2f c=%.2f" % (
self.velocity_new[index[i]], self.parameters_new[index[i],0], self.parameters_new[index[i],1], self.parameters_new[index[i],2]), linewidth=0.7,
alpha=0.5)
# plt.errorbar(wl,flux[i], ecolor='k', alpha=0.02, capthick=0.2, yerr=ivar[i]**(-0.5))
axes = plt.gca()
axes.set_xlim([15660, 15780])
# axes.set_xlim([16160,16280])
axes.set_ylim([0.5, 1.5])
# axes.set_yticks(np.arange(0.8,1.21,0.1))
# plt.xlabel("Wave length $\AA$", fontsize=20)
#plt.ylabel("Flux", fontsize=20)
plt.legend()
plt.suptitle("The fitting result of visits with the biggest delta RV shifts", fontsize=20)
plt.show()
path = np.array(["/Users/caojunzhi/Desktop/Data/n_3_suspect/2M00041859+7104111.fits","/Users/caojunzhi/Desktop/Data/n_3_suspect/2M00080292+7332356.fits","/Users/caojunzhi/Desktop/Data/n_3_suspect/2M00093507+6609268.fits"])
path_origin = np.array(["/Volumes/Data_2TB/Data/n_3_suspect/apStar-r5-2M00041859+7104111.fits","/Volumes/Data_2TB/Data/n_3_suspect/apStar-r5-2M00080292+7332356.fits","/Volumes/Data_2TB/Data/n_3_suspect/apStar-r5-2M00093507+6609268.fits"])
model = plot()
model.plot_single_star_mask_result(path[2])
|
peraktong/Cannon-Experiment
|
0218_plot_three_suspect_star.py
|
Python
|
mit
| 56,764
|
[
"VisIt"
] |
3f28bca25a62fac1600bc57d11107b957619f02d5e046a9599e5fcaa10132c06
|
#
# Copyright (C) 2001-2004 greg Landrum and Rational Discovery LLC
# All Rights Reserved
#
""" The "parser" for compound descriptors.
I almost hesitate to document this, because it's not the prettiest
thing the world has ever seen... but it does work (for at least some
definitions of the word).
Rather than getting into the whole mess of writing a parser for the
compound descriptor expressions, I'm just using string substitutions
and python's wonderful ability to *eval* code.
It would probably be a good idea at some point to replace this with a
real parser, if only for the flexibility and intelligent error
messages that would become possible.
The general idea is that we're going to deal with expressions where
atomic descriptors have some kind of method applied to them which
reduces them to a single number for the entire composition. Compound
descriptors (those applicable to the compound as a whole) are not
operated on by anything in particular (except for standard math stuff).
Here's the general flow of things:
1) Composition descriptor references ($a, $b, etc.) are replaced with the
corresponding descriptor names using string substitution.
(*_SubForCompoundDescriptors*)
2) Atomic descriptor references ($1, $2, etc) are replaced with lookups
into the atomic dict with "DEADBEEF" in place of the atom name.
(*_SubForAtomicVars*)
3) Calls to Calculator Functions are augmented with a reference to
the composition and atomic dictionary
(*_SubMethodArgs*)
**NOTE:**
anytime we don't know the answer for a descriptor, rather than
throwing a (completely incomprehensible) exception, we just return
-666. So bad descriptor values should stand out like sore thumbs.
"""
# The wildcard import is required to make functions available for the eval statement
from math import *
from rdkit import RDConfig
__DEBUG = False
# we do this to allow the use of stuff in the math module
# ----------------------
# atomic descriptor section
# ----------------------
# these are the methods which can be applied to ATOMIC descriptors.
knownMethods = ['SUM', 'MIN', 'MAX', 'MEAN', 'AVG', 'DEV', 'HAS']
def HAS(strArg, composList, atomDict):
""" *Calculator Method*
does a string search
**Arguments**
- strArg: the arguments in string form
- composList: the composition vector
- atomDict: the atomic dictionary
**Returns**
1 or 0
"""
splitArgs = strArg.split(',')
if len(splitArgs) > 1:
for atom, _ in composList:
tStr = splitArgs[0].replace('DEADBEEF', atom)
where = eval(tStr)
what = eval(splitArgs[1])
if what in where:
return 1
return 0
else:
return -666
def SUM(strArg, composList, atomDict):
""" *Calculator Method*
calculates the sum of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum = accum + eval(tStr) * num
return accum
def MEAN(strArg, composList, atomDict):
""" *Calculator Method*
calculates the average of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = 0.0
nSoFar = 0
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum = accum + eval(tStr) * num
nSoFar = nSoFar + num
return accum / nSoFar
AVG = MEAN
def DEV(strArg, composList, atomDict):
""" *Calculator Method*
calculates the average deviation of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
avg = MEAN(strArg, composList, atomDict)
accum = 0.0
nSoFar = 0.0
for atom, num in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum = accum + abs(eval(tStr) - avg) * num
nSoFar = nSoFar + num
return accum / nSoFar
def MIN(strArg, composList, atomDict):
""" *Calculator Method*
calculates the minimum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom, _ in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum.append(eval(tStr))
return min(accum)
def MAX(strArg, composList, atomDict):
""" *Calculator Method*
calculates the maximum value of a descriptor across a composition
**Arguments**
- strArg: the arguments in string form
- compos: the composition vector
- atomDict: the atomic dictionary
**Returns**
a float
"""
accum = []
for atom, _ in composList:
tStr = strArg.replace('DEADBEEF', atom)
accum.append(eval(tStr))
return max(accum)
# ------------------
# string replacement routines
# these are not intended to be called by clients
# ------------------
def _SubForAtomicVars(cExpr, varList, dictName):
""" replace atomic variables with the appropriate dictionary lookup
*Not intended for client use*
"""
for i in range(len(varList)):
cExpr = cExpr.replace('$%d' % (i + 1), '%s["DEADBEEF"]["%s"]' % (dictName, varList[i]))
return cExpr
def _SubForCompoundDescriptors(cExpr, varList, dictName):
""" replace compound variables with the appropriate list index
*Not intended for client use*
"""
for i in range(len(varList)):
cExpr = cExpr.replace('$%s' % chr(ord('a') + i), '%s["%s"]' % (dictName, varList[i]))
return cExpr
def _SubMethodArgs(cExpr, knownMethods):
""" alters the arguments of calls to calculator methods
*Not intended for client use*
This is kind of putrid (and the code ain't so pretty either)
The general idea is that the various special methods for atomic
descriptors need two extra arguments (the composition and the atomic
dict). Rather than make the user type those in, we just find
invocations of these methods and fill out the function calls using
string replacements.
"""
res = cExpr
for method in knownMethods:
p = 0
while p != -1 and p < len(res):
p = res.find(method, p)
if p != -1:
p = p + len(method) + 1
start = p
parenCount = 1
while parenCount and p < len(res):
if res[p] == ')':
parenCount = parenCount - 1
elif res[p] == '(':
parenCount = parenCount + 1
p = p + 1
if p <= len(res):
res = res[0:start] + "'%s',compos,atomDict" % (res[start:p - 1]) + res[p - 1:]
return res
def CalcSingleCompoundDescriptor(compos, argVect, atomDict, propDict):
""" calculates the value of the descriptor for a single compound
**ARGUMENTS:**
- compos: a vector/tuple containing the composition
information... in the form:
'[("Fe",1.),("Pt",2.),("Rh",0.02)]'
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDescriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVect:
a list of descriptors for the composition.
**RETURNS:**
the value of the descriptor, -666 if a problem was encountered
**NOTE:**
- because it takes rather a lot of work to get everything set
up to calculate a descriptor, if you are calculating the
same descriptor for multiple compounds, you probably want to
be calling _CalcMultipleCompoundsDescriptor()_.
"""
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula, compositionVarNames, 'propDict')
formula = _SubForAtomicVars(formula, atomVarNames, 'atomDict')
evalTarget = _SubMethodArgs(formula, knownMethods)
except Exception:
if __DEBUG:
import traceback
print('Sub Failure!')
traceback.print_exc()
print(evalTarget)
print(propDict)
raise RuntimeError('Failure 1')
else:
return -666
try:
v = eval(evalTarget)
except Exception:
if __DEBUG:
import traceback
outF = open(RDConfig.RDCodeDir + '/ml/descriptors/log.txt', 'a+')
outF.write('#------------------------------\n')
outF.write('formula: %s\n' % repr(formula))
outF.write('target: %s\n' % repr(evalTarget))
outF.write('propDict: %s\n' % (repr(propDict)))
outF.write('keys: %s\n' % (repr(sorted(atomDict))))
outF.close()
print('ick!')
print('formula:', formula)
print('target:', evalTarget)
print('propDict:', propDict)
print('keys:', atomDict.keys())
traceback.print_exc()
raise RuntimeError('Failure 2')
else:
v = -666
return v
def CalcMultipleCompoundsDescriptor(composVect, argVect, atomDict, propDictList):
""" calculates the value of the descriptor for a list of compounds
**ARGUMENTS:**
- composVect: a vector of vector/tuple containing the composition
information.
See _CalcSingleCompoundDescriptor()_ for an explanation of the elements.
- argVect: a vector/tuple with three elements:
1) AtomicDescriptorNames: a list/tuple of the names of the
atomic descriptors being used. These determine the
meaning of $1, $2, etc. in the expression
2) CompoundDsscriptorNames: a list/tuple of the names of the
compound descriptors being used. These determine the
meaning of $a, $b, etc. in the expression
3) Expr: a string containing the expression to be used to
evaluate the final result.
- atomDict:
a dictionary of atomic descriptors. Each atomic entry is
another dictionary containing the individual descriptors
and their values
- propVectList:
a vector of vectors of descriptors for the composition.
**RETURNS:**
a vector containing the values of the descriptor for each
compound. Any given entry will be -666 if problems were
encountered
"""
res = [-666] * len(composVect)
try:
atomVarNames = argVect[0]
compositionVarNames = argVect[1]
formula = argVect[2]
formula = _SubForCompoundDescriptors(formula, compositionVarNames, 'propDict')
formula = _SubForAtomicVars(formula, atomVarNames, 'atomDict')
evalTarget = _SubMethodArgs(formula, knownMethods)
except Exception:
return res
for i in range(len(composVect)):
propDict = propDictList[i]
compos = composVect[i]
try:
v = eval(evalTarget)
except Exception:
v = -666
res[i] = v
return res
# ------------
# Demo/testing code
# ------------
def _exampleCode(): # pragma: nocover
piece1 = [['d1', 'd2', 's1'], ['d1', 'd2', 's1']]
aDict = {'Fe': {'d1': 1., 'd2': 2., 's1': 'abc'}, 'Pt': {'d1': 10., 'd2': 20., 's1': 'def'}}
pDict = {'d1': 100., 'd2': 200.}
compos = [('Fe', 1), ('Pt', 1)]
cExprs = ["SUM($1)", "SUM($1)+SUM($2)", "SUM($1)+SUM($1)", "MEAN($1)", "DEV($2)", "MAX($1)",
"MIN($1)/MAX($1)", "MIN($2)", "SUM($1)/$a", "sqrt($a+$b)", "SUM((3.*$1)/($2))",
'HAS($3,"def")', 'HAS($3,"xyz")', "foo"]
for cExpr in cExprs:
argVect = piece1 + [cExpr]
print(cExpr)
print(CalcSingleCompoundDescriptor(compos, argVect, aDict, pDict))
print(CalcMultipleCompoundsDescriptor([compos, compos], argVect, aDict, [pDict, pDict]))
if __name__ == '__main__': # pragma: nocover
_exampleCode()
|
ptosco/rdkit
|
rdkit/ML/Descriptors/Parser.py
|
Python
|
bsd-3-clause
| 12,437
|
[
"RDKit"
] |
d940433a8b23295bae9ec24ce7b8ace9368cd682ea553ce9e2fca3129441cc52
|
import numpy as _np
from scipy import ndimage as _nd
from . import norm as _norm
from ndarray.openalea import aleanode as _aleanode # decorator to declare openalea nodes
@_aleanode({'name':'kernel'})
def coordinates(shape):
"""
Compute an array containing for each axis the coordinates arrays of given shape:
coord = coordinates( shape )
:Input:
shape: a list/tuple/vector of the kernel sizes of each dimension
:Output:
A numpy array of shape (N, [shape]) where N is the length of given 'shape'
and returned coord[i,:] is the centered coordinates over the ith dimension
:Example:
coordinates((3,4))
array([[[-1, -1, -1, -1],
[ 0, 0, 0, 0],
[ 1, 1, 1, 1]],
[[-1, 0, 1, 2],
[-1, 0, 1, 2],
[-1, 0, 1, 2]]])
"""
if _np.isscalar(shape): shape = [shape]
else: shape = _np.asarray(shape).tolist()
return _np.mgrid[map(slice,[-((s-1)/2) for s in shape],[s/2+1 for s in shape])]
@_aleanode({'name':'kernel'})
def distance(shape, metric=2):
"""
return a distance kernel of given shape:
d = distance(shape, metric='euclidian')
:Input:
shape: a scalar (for 1d) or list/tuple/vector of the kernel shape
metric: the distance function used. Same as the 'method' argument of array.norm()
:Output:
an array of given shape, where the center cell is zero, and all others
have values equal to there distance to this center
:Example:
distance((3,4))
array([[ 1.41, 1. , 1.4, 2.23],
[ 1. , 0. , 1. , 2. ],
[ 1.41, 1. , 1.4, 2.23]])
"""
coord = coordinates(shape)
return _norm(coord,method=metric,axis=0)
@_aleanode({'name':'kernel'})
def ellipse(radius,shape=None):
"""
return a boolean array an ellipse kernel
circle = ellipse(shape, radius)
:Input:
radius: a tuple the ellipse radius for each dimension.
shape: a scalar (for 1d) or list/tuple/vector of the kernel shape
*** It must have same length as 'shape' ***
By default (if None), the maximum ellipse embedable in 'shape'
:Output:
an array of given shape, where the pixel inside the ellipse have True value
:Example:
ellipse((5,9),(2,3)).astype(int)
array([[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0]])
"""
radius = _np.asarray([radius],dtype='float32').ravel().tolist()
if shape is None: shape = [int(2*r+1) for r in radius]
coord = map(_np.divide,tuple(coordinates(shape=shape)), radius)
return _np.sqrt(reduce(_np.add,map(_np.square,coord)))<=1
@_aleanode({'name':'kernel'})
def gaussian(sigma, shape=[]):
"""
return a gaussian kernel of given shape:
d = gaussian(sigma, shape=None)
:Input:
sigma: a scalar or list/tuple of the sigma parameter for each dimension
shape: a scalar or list/tuple of the kernel shape
if shape size is less than sigma, missing dimension are set to None
all None value are replaced to a size determined by sigma
:Output:
A gaussian kernel of suitable shape.
The total sum of all kernel values is equal to 1.
:Example:
np.round(gaussian((2,3),shape=(4,8)),3)
array([[ 0.014, 0.032, 0.053, 0.063, 0.053, 0.032, 0.014, 0.004],
[ 0.018, 0.041, 0.068, 0.081, 0.068, 0.041, 0.018, 0.006],
[ 0.014, 0.032, 0.053, 0.063, 0.053, 0.032, 0.014, 0.004],
[ 0.007, 0.015, 0.025, 0.03 , 0.025, 0.015, 0.007, 0.002]])
"""
sigma = _np.asarray([sigma]).ravel()
shape = tuple(_np.asarray([shape]).ravel())
auto = tuple([8*s+1 for s in sigma])
shape = auto[0:(sigma.size-len(shape))] + shape[(len(shape)-sigma.size):]
coord = coordinates(shape) # (ndim,[shape])
sigma.shape = (sigma.size,) + (1,)*len(shape) # (ndim, [ones])
kernel = _np.exp(-0.5 * _np.sum(coord**2 * (1./sigma), axis=0))
return kernel / kernel.sum()
|
julien-diener/ndarray
|
src/ndarray/kernel.py
|
Python
|
bsd-3-clause
| 4,556
|
[
"Gaussian"
] |
0ee50a46198ecca51b229f4cb75070b805c671865931436a72fc06ba0ad5658f
|
"""Analyze python import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import os
import re
from . import types as t
from .io import (
read_binary_file,
)
from .util import (
display,
ApplicationError,
is_subdir,
)
from .data import (
data_context,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = get_import_path(import_name)
if import_path not in imports_by_target_path:
import_path = get_import_path(import_name, package=True)
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
package_path = get_import_path(module_util, package=True)
if os.path.exists(package_path) and not os.path.getsize(package_path):
continue # ignore empty __init__.py files
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
else:
prefix = 'ansible.module_utils'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
if path == base_path:
name = prefix
else:
name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
# Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
# See: https://www.python.org/dev/peps/pep-0263
# Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
code = read_binary_file(path)
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
def get_import_path(name, package=False): # type: (str, bool) -> str
"""Return a path from an import name."""
if package:
filename = os.path.join(name.replace('.', '/'), '__init__.py')
else:
filename = '%s.py' % name.replace('.', '/')
if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
path = os.path.join('lib', filename)
elif data_context().content.collection and (
name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
path = '/'.join(filename.split('/')[3:])
else:
raise Exception('Unexpected import name: %s' % name)
return path
def path_to_module(path): # type: (str) -> str
"""Convert the given path to a module name."""
module = os.path.splitext(path)[0].replace(os.path.sep, '.')
if module.endswith('.__init__'):
module = module[:-9]
return module
def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
"""Convert a relative import to an absolute import."""
if level <= 0:
absolute_name = name
elif not module:
display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
absolute_name = 'relative.nomodule'
else:
parts = module.split('.')
if level >= len(parts):
display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
absolute_name = 'relative.abovelevel'
else:
absolute_name = '.'.join(parts[:-level] + [name])
return absolute_name
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
self.module = None
if data_context().content.is_ansible:
# Various parts of the Ansible source tree execute within diffent modules.
# To support import analysis, each file which uses relative imports must reside under a path defined here.
# The mapping is a tuple consisting of a path pattern to match and a replacement path.
# During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
path_map = (
('^hacking/build_library/build_ansible/', 'build_ansible/'),
('^lib/ansible/', 'ansible/'),
('^test/lib/ansible_test/_data/sanity/validate-modules/', 'validate_modules/'),
('^test/units/', 'test/units/'),
('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
('^test/integration/targets/.*/library/', 'ansible/modules/'),
)
for pattern, replacement in path_map:
if re.search(pattern, self.path):
revised_path = re.sub(pattern, replacement, self.path)
self.module = path_to_module(revised_path)
break
else:
# This assumes that all files within the collection are executed by Ansible as part of the collection.
# While that will usually be true, there are exceptions which will result in this resolution being incorrect.
self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
# import ansible.module_utils.MODULE[.MODULE]
# import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
self.add_imports([alias.name for alias in node.names], node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
if not module.startswith('ansible'):
return
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while self.is_module_util_name(name):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
"""Add the given import names if they are module_utils imports."""
for name in names:
if self.is_module_util_name(name):
self.add_import(name, line_no)
@staticmethod
def is_module_util_name(name): # type: (str) -> bool
"""Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
return True
if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
return True
return False
|
j-carl/ansible
|
test/lib/ansible_test/_internal/import_analysis.py
|
Python
|
gpl-3.0
| 14,066
|
[
"VisIt"
] |
c5573d612fe504dd7d6c46b27d6d10c0e8a2b14bb8f5e20c46b2badffb3d33f6
|
def installed():
import os
from ase.test import NotAvailable
try:
fleur = os.getenv('FLEUR')
if fleur == None:
raise NotAvailable('FLEUR not defined')
except NotAvailable:
raise NotAvailable('Fleur required')
return True
|
conwayje/ase-python
|
ase/test/fleur/__init__.py
|
Python
|
gpl-2.0
| 277
|
[
"ASE",
"FLEUR"
] |
dc956ff5fb6ef19a03a8d9e06f9380d13ba84863e6aef0f012c2fa53b892cf45
|
""" A computing element class using singularity containers.
This computing element will start the job in the container set by
the "ContainerRoot" config option.
DIRAC will the re-installed within the container, extra flags can
be given to the dirac-install command with the "ContainerExtraOpts"
option.
See the Configuration/Resources/Computing documention for details on
where to set the option parameters.
"""
import os
import sys
import shutil
import tempfile
import DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.Subprocess import systemCall
from DIRAC.ConfigurationSystem.Client.Helpers import CSGlobals
from DIRAC.ConfigurationSystem.Client.Helpers import Operations
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
from DIRAC.WorkloadManagementSystem.Utilities.Utils import createRelocatedJobWrapper
__RCSID__ = "$Id$"
DIRAC_INSTALL = os.path.join(DIRAC.rootPath, 'DIRAC', 'Core', 'scripts', 'dirac-install.py')
# Default container to use if it isn't specified in the CE options
CONTAINER_DEFROOT = "/cvmfs/cernvm-prod.cern.ch/cvm3"
CONTAINER_WORKDIR = "containers"
CONTAINER_INNERDIR = "/tmp"
CONTAINER_WRAPPER = """#!/bin/bash
echo "Starting inner container wrapper scripts at `date`."
set -x
cd /tmp
# Install DIRAC
./dirac-install.py %(install_args)s
source bashrc
dirac-configure -F %(config_args)s -I
# Run next wrapper (to start actual job)
bash %(next_wrapper)s
# Write the payload errorcode to a file for the outer scripts
echo $? > retcode
chmod 644 retcode
echo "Finishing inner continer wrapper scripts at `date`."
"""
class SingularityComputingElement(ComputingElement):
""" A Computing Element for running a job within a Singularity container.
"""
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
super(SingularityComputingElement, self).__init__(ceUniqueID)
self.__submittedJobs = 0
self.__runningJobs = 0
self.__root = CONTAINER_DEFROOT
if 'ContainerRoot' in self.ceParameters:
self.__root = self.ceParameters['ContainerRoot']
self.__workdir = CONTAINER_WORKDIR
self.__innerdir = CONTAINER_INNERDIR
self.__singularityBin = 'singularity'
self.log = gLogger.getSubLogger('Singularity')
def __hasSingularity(self):
""" Search the current PATH for an exectuable named singularity.
Returns True if it is found, False otherwise.
"""
if self.ceParameters.get('ContainerBin'):
binPath = self.ceParameters['ContainerBin']
if os.path.isfile(binPath) and os.access(binPath, os.X_OK):
self.__singularityBin = binPath
self.log.debug('Use singularity from "%s"' % self.__singularityBin)
return True
if "PATH" not in os.environ:
return False # Hmm, PATH not set? How unusual...
for searchPath in os.environ["PATH"].split(os.pathsep):
binPath = os.path.join(searchPath, 'singularity')
if os.path.isfile(binPath):
# File found, check it's exectuable to be certain:
if os.access(binPath, os.X_OK):
self.log.debug('Find singularity from PATH "%s"' % binPath)
return True
# No suitablable binaries found
return False
def __getInstallFlags(self):
""" Get the flags to pass to dirac-install.py inside the container.
Returns a string containing the command line flags.
"""
instOpts = []
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
opsHelper = Operations.Operations(setup=setup)
installationName = opsHelper.getValue("Pilot/Installation", "")
if installationName:
instOpts.append('-V %s' % installationName)
diracVersions = opsHelper.getValue("Pilot/Version", [])
instOpts.append("-r '%s'" % diracVersions[0])
pyVer = "%u%u" % (sys.version_info.major, sys.version_info.minor)
instOpts.append("-i %s" % pyVer)
pilotExtensionsList = opsHelper.getValue("Pilot/Extensions", [])
extensionsList = []
if pilotExtensionsList:
if pilotExtensionsList[0] != 'None':
extensionsList = pilotExtensionsList
else:
extensionsList = CSGlobals.getCSExtensions()
if extensionsList:
instOpts.append("-e '%s'" % ','.join([ext for ext in extensionsList if 'Web' not in ext]))
if 'ContainerExtraOpts' in self.ceParameters:
instOpts.append(self.ceParameters['ContainerExtraOpts'])
return ' '.join(instOpts)
@staticmethod
def __getConfigFlags():
""" Get the flags for dirac-configure inside the container.
Returns a string containing the command line flags.
"""
cfgOpts = []
setup = gConfig.getValue("/DIRAC/Setup", "unknown")
if setup:
cfgOpts.append("-S '%s'" % setup)
csServers = gConfig.getValue("/DIRAC/Configuration/Servers", [])
cfgOpts.append("-C '%s'" % ','.join(csServers))
return ' '.join(cfgOpts)
def __createWorkArea(self, proxy, jobDesc, log, logLevel):
""" Creates a directory for the container and populates it with the
template directories, scripts & proxy.
"""
# Create the directory for our continer area
try:
os.mkdir(self.__workdir)
except OSError:
if not os.path.isdir(self.__workdir):
result = S_ERROR("Failed to create container base directory '%s'" % self.__workdir)
result['ReschedulePayload'] = True
return result
# Otherwise, directory probably just already exists...
baseDir = None
try:
baseDir = tempfile.mkdtemp(prefix="job%s_" % jobDesc["jobID"], dir=self.__workdir)
except OSError:
result = S_ERROR("Failed to create container work directory in '%s'" % self.__workdir)
result['ReschedulePayload'] = True
return result
self.log.debug('Use singularity workarea: %s' % baseDir)
for subdir in ["home", "tmp", "var_tmp"]:
os.mkdir(os.path.join(baseDir, subdir))
tmpDir = os.path.join(baseDir, "tmp")
# Now we have a directory, we can stage in the proxy and scripts
# Proxy
proxyLoc = os.path.join(tmpDir, "proxy")
rawfd = os.open(proxyLoc, os.O_WRONLY | os.O_CREAT, 0o600)
fd = os.fdopen(rawfd, "w")
fd.write(proxy)
fd.close()
# dirac-install.py
install_loc = os.path.join(tmpDir, "dirac-install.py")
shutil.copyfile(DIRAC_INSTALL, install_loc)
os.chmod(install_loc, 0o755)
# Job Wrapper (Standard DIRAC wrapper)
result = createRelocatedJobWrapper(tmpDir, self.__innerdir,
log=log, logLevel=logLevel, **jobDesc)
if not result['OK']:
result['ReschedulePayload'] = True
return result
wrapperPath = result['Value']
# Extra Wrapper (Container DIRAC installer)
wrapSubs = {'next_wrapper': wrapperPath,
'install_args': self.__getInstallFlags(),
'config_args': self.__getConfigFlags(),
}
wrapLoc = os.path.join(tmpDir, "dirac_container.sh")
rawfd = os.open(wrapLoc, os.O_WRONLY | os.O_CREAT, 0o700)
fd = os.fdopen(rawfd, "w")
fd.write(CONTAINER_WRAPPER % wrapSubs)
fd.close()
ret = S_OK()
ret['baseDir'] = baseDir
ret['tmpDir'] = tmpDir
ret['proxyLocation'] = proxyLoc
return ret
def __getEnv(self):
""" Gets the environment for use within the container.
We blank almost everything to prevent contamination from the host system.
"""
payloadEnv = {}
if 'TERM' in os.environ:
payloadEnv['TERM'] = os.environ['TERM']
payloadEnv['TMP'] = '/tmp'
payloadEnv['TMPDIR'] = '/tmp'
payloadEnv['X509_USER_PROXY'] = os.path.join(self.__innerdir, "proxy")
return payloadEnv
@staticmethod
def __checkResult(tmpDir):
""" Gets the result of the payload command and returns it. """
# The wrapper writes the inner job return code to "retcode"
# in the working directory.
try:
fd = open(os.path.join(tmpDir, "retcode"), "r")
retCode = int(fd.read())
fd.close()
except (IOError, ValueError):
# Something failed while trying to get the return code
result = S_ERROR("Failed to get return code from inner wrapper")
result['ReschedulePayload'] = True
return result
result = S_OK()
if retCode:
# This is the one case where we don't reschedule:
# An actual failure of the inner payload for some reason
result = S_ERROR("Command failed with exit code %d" % retCode)
return result
# pylint: disable=unused-argument,arguments-differ
def submitJob(self, executableFile, proxy, jobDesc, log, logLevel, **kwargs):
""" Start a container for a job.
executableFile is ignored. A new wrapper suitable for running in a
container is created from jobDesc.
"""
rootImage = self.__root
# Check that singularity is available
if not self.__hasSingularity():
self.log.error('Singularity is not installed on PATH.')
result = S_ERROR("Failed to find singularity ")
result['ReschedulePayload'] = True
return result
self.log.info('Creating singularity container')
# Start by making the directory for the container
ret = self.__createWorkArea(proxy, jobDesc, log, logLevel)
if not ret['OK']:
return ret
baseDir = ret['baseDir']
tmpDir = ret['tmpDir']
proxyLoc = ret['proxyLocation']
# Now we have to set-up proxy renewal for the container
# This is fairly easy as it remains visible on the host filesystem
ret = getProxyInfo()
if not ret['OK']:
pilotProxy = None
else:
pilotProxy = ret['Value']['path']
result = gThreadScheduler.addPeriodicTask(self.proxyCheckPeriod, self._monitorProxy,
taskArgs=(pilotProxy, proxyLoc),
executions=0, elapsedTime=0)
renewTask = None
if result['OK']:
renewTask = result['Value']
else:
self.log.warn('Failed to start proxy renewal task')
# Very simple accounting
self.__submittedJobs += 1
self.__runningJobs += 1
# Now prepare start singularity
# Mount /cvmfs in if it exists on the host
withCVMFS = os.path.isdir("/cvmfs")
innerCmd = os.path.join(self.__innerdir, "dirac_container.sh")
cmd = [self.__singularityBin, "exec"]
cmd.extend(["-c", "-i", "-p"])
cmd.extend(["-W", baseDir])
if withCVMFS:
cmd.extend(["-B", "/cvmfs"])
if 'ContainerBind' in self.ceParameters:
bindPaths = self.ceParameters['ContainerBind'].split(',')
for bindPath in bindPaths:
cmd.extend(["-B", bindPath.strip()])
if 'ContainerOptions' in self.ceParameters:
containerOpts = self.ceParameters['ContainerOptions'].split(',')
for opt in containerOpts:
cmd.extend([opt.strip()])
cmd.extend([rootImage, innerCmd])
self.log.debug('Execute singularity command: %s' % cmd)
self.log.debug('Execute singularity env: %s' % self.__getEnv())
result = systemCall(0, cmd, callbackFunction=self.sendOutput, env=self.__getEnv())
self.__runningJobs -= 1
if not result["OK"]:
if renewTask:
gThreadScheduler.removeTask(renewTask)
result = S_ERROR("Error running singularity command")
result['ReschedulePayload'] = True
return result
result = self.__checkResult(tmpDir)
if not result["OK"]:
if renewTask:
gThreadScheduler.removeTask(renewTask)
return result
def getCEStatus(self, jobIDList=None):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = self.__submittedJobs
result['RunningJobs'] = self.__runningJobs
result['WaitingJobs'] = 0
return result
|
arrabito/DIRAC
|
Resources/Computing/SingularityComputingElement.py
|
Python
|
gpl-3.0
| 11,803
|
[
"DIRAC"
] |
6b3e9674a169b8370a12f59ede31eb01b375e795ced741f3673b08f2ad946a84
|
########################################################################
# $HeadURL$
########################################################################
""" This is the StorageElement class.
"""
__RCSID__ = "$Id$"
# # custom duty
import re
from types import ListType, StringType, StringTypes, DictType
# # from DIRAC
from DIRAC import gLogger, S_OK, S_ERROR, gConfig
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.Core.Utilities.Pfn import pfnparse
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Resources.Utilities import Utils
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Utilities.DictCache import DictCache
class StorageElementCache( object ):
def __init__( self ):
self.seCache = DictCache()
def __call__( self, name, protocols = None, vo = None ):
self.seCache.purgeExpired( expiredInSeconds = 60 )
argTuple = ( name, protocols, vo )
seObj = self.seCache.get( argTuple )
if not seObj:
seObj = StorageElementItem( name, protocols, vo )
# Add the StorageElement to the cache for 1/2 hour
self.seCache.add( argTuple, 1800, seObj )
return seObj
class StorageElementItem( object ):
"""
.. class:: StorageElement
common interface to the grid storage element
self.name is the resolved name of the StorageElement i.e CERN-tape
self.options is dictionary containing the general options defined in the CS e.g. self.options['Backend] = 'Castor2'
self.storages is a list of the stub objects created by StorageFactory for the protocols found in the CS.
self.localProtocols is a list of the local protocols that were created by StorageFactory
self.remoteProtocols is a list of the remote protocols that were created by StorageFactory
self.protocolOptions is a list of dictionaries containing the options found in the CS. (should be removed)
dynamic method :
retransferOnlineFile( lfn )
exists( lfn )
isFile( lfn )
getFile( lfn, localPath = False )
putFile( lfnLocal, sourceSize = 0 ) : {lfn:local}
replicateFile( lfn, sourceSize = 0 )
getFileMetadata( lfn )
getFileSize( lfn )
removeFile( lfn )
prestageFile( lfn, lifetime = 86400 )
prestageFileStatus( lfn )
pinFile( lfn, lifetime = 60 * 60 * 24 )
releaseFile( lfn )
isDirectory( lfn )
getDirectoryMetadata( lfn )
getDirectorySize( lfn )
listDirectory( lfn )
removeDirectory( lfn, recursive = False )
createDirectory( lfn )
putDirectory( lfn )
getDirectory( lfn, localPath = False )
"""
__deprecatedArguments = ["singleFile", "singleDirectory"] # Arguments that are now useless
# Some methods have a different name in the StorageElement and the plugins...
# We could avoid this static list in the __getattr__ by checking the storage plugin and so on
# but fine... let's not be too smart, otherwise it becomes unreadable :-)
__equivalentMethodNames = {"exists" : "exists",
"isFile" : "isFile",
"getFile" : "getFile",
"putFile" : "putFile",
"replicateFile" : "putFile",
"getFileMetadata" : "getFileMetadata",
"getFileSize" : "getFileSize",
"removeFile" : "removeFile",
"prestageFile" : "prestageFile",
"prestageFileStatus" : "prestageFileStatus",
"pinFile" : "pinFile",
"releaseFile" : "releaseFile",
"isDirectory" : "isDirectory",
"getDirectoryMetadata" : "getDirectoryMetadata",
"getDirectorySize" : "getDirectorySize",
"listDirectory" : "listDirectory",
"removeDirectory" : "removeDirectory",
"createDirectory" : "createDirectory",
"putDirectory" : "putDirectory",
"getDirectory" : "getDirectory",
}
# We can set default argument in the __executeFunction which impacts all plugins
__defaultsArguments = {"putFile" : {"sourceSize" : 0 },
"getFile": { "localPath": False },
"prestageFile" : { "lifetime" : 86400 },
"pinFile" : { "lifetime" : 60 * 60 * 24 },
"removeDirectory" : { "recursive" : False },
"getDirectory" : { "localPath" : False },
}
def __init__( self, name, protocols = None, vo = None ):
""" c'tor
:param str name: SE name
:param list protocols: requested protocols
:param vo
"""
self.methodName = None
if vo:
self.vo = vo
else:
result = getVOfromProxyGroup()
if not result['OK']:
return
self.vo = result['Value']
self.opHelper = Operations( vo = self.vo )
self.resources = Resources( vo = self.vo )
proxiedProtocols = gConfig.getValue( '/LocalSite/StorageElements/ProxyProtocols', "" ).split( ',' )
result = self.resources.getAccessProtocols( name )
if result['OK']:
ap = result['Value'][0]
useProxy = ( self.resources.getAccessProtocolValue( ap, "Protocol", "UnknownProtocol" )
in proxiedProtocols )
if not useProxy:
useProxy = gConfig.getValue( '/LocalSite/StorageElements/%s/UseProxy' % name, False )
if not useProxy:
useProxy = self.opHelper.getValue( '/Services/StorageElements/%s/UseProxy' % name, False )
self.valid = True
if protocols == None:
res = StorageFactory( useProxy ).getStorages( name, protocolList = [] )
else:
res = StorageFactory( useProxy ).getStorages( name, protocolList = protocols )
if not res['OK']:
self.valid = False
self.name = name
self.errorReason = res['Message']
else:
factoryDict = res['Value']
self.name = factoryDict['StorageName']
self.options = factoryDict['StorageOptions']
self.localProtocols = factoryDict['LocalProtocols']
self.remoteProtocols = factoryDict['RemoteProtocols']
self.storages = factoryDict['StorageObjects']
self.protocolOptions = factoryDict['ProtocolOptions']
self.turlProtocols = factoryDict['TurlProtocols']
self.log = gLogger.getSubLogger( "SE[%s]" % self.name )
self.readMethods = [ 'getFile',
'getAccessUrl',
'getTransportURL',
'prestageFile',
'prestageFileStatus',
'getDirectory']
self.writeMethods = [ 'retransferOnlineFile',
'putFile',
'replicateFile',
'pinFile',
'releaseFile',
'createDirectory',
'putDirectory' ]
self.removeMethods = [ 'removeFile', 'removeDirectory' ]
self.checkMethods = [ 'exists',
'getDirectoryMetadata',
'getDirectorySize',
'getFileSize',
'getFileMetadata',
'listDirectory',
'isDirectory',
'isFile',
]
self.okMethods = [ 'getLocalProtocols',
'getPfnForProtocol',
'getPfnForLfn',
'getPfnPath',
'getProtocols',
'getRemoteProtocols',
'getStorageElementName',
'getStorageElementOption',
'getStorageParameters',
'isLocalSE' ]
self.__resourceStatus = ResourceStatus()
def dump( self ):
""" Dump to the logger a summary of the StorageElement items. """
self.log.verbose( "dump: Preparing dump for StorageElement %s." % self.name )
if not self.valid:
self.log.debug( "dump: Failed to create StorageElement plugins.", self.errorReason )
return
i = 1
outStr = "\n\n============ Options ============\n"
for key in sorted( self.options ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), self.options[key] )
for storage in self.storages:
outStr = "%s============Protocol %s ============\n" % ( outStr, i )
res = storage.getParameters()
storageParameters = res['Value']
for key in sorted( storageParameters ):
outStr = "%s%s: %s\n" % ( outStr, key.ljust( 15 ), storageParameters[key] )
i = i + 1
self.log.verbose( outStr )
#################################################################################################
#
# These are the basic get functions for storage configuration
#
def getStorageElementName( self ):
""" SE name getter """
self.log.verbose( "StorageElement.getStorageElementName: The Storage Element name is %s." % self.name )
return S_OK( self.name )
def getChecksumType( self ):
""" get local /Resources/StorageElements/SEName/ChecksumType option if defined, otherwise
global /Resources/StorageElements/ChecksumType
"""
self.log.verbose( "StorageElement.getChecksumType : get checksum type for %s." % self.name )
return S_OK( str( gConfig.getValue( "/Resources/StorageElements/ChecksumType", "ADLER32" ) ).upper()
if "ChecksumType" not in self.options else str( self.options["ChecksumType"] ).upper() )
def getStatus( self ):
"""
Return Status of the SE, a dictionary with:
- Read: True (is allowed), False (it is not allowed)
- Write: True (is allowed), False (it is not allowed)
- Remove: True (is allowed), False (it is not allowed)
- Check: True (is allowed), False (it is not allowed).
NB: Check always allowed IF Read is allowed (regardless of what set in the Check option of the configuration)
- DiskSE: True if TXDY with Y > 0 (defaults to True)
- TapeSE: True if TXDY with X > 0 (defaults to False)
- TotalCapacityTB: float (-1 if not defined)
- DiskCacheTB: float (-1 if not defined)
"""
self.log.verbose( "StorageElement.getStatus : determining status of %s." % self.name )
retDict = {}
if not self.valid:
retDict['Read'] = False
retDict['Write'] = False
retDict['Remove'] = False
retDict['Check'] = False
retDict['DiskSE'] = False
retDict['TapeSE'] = False
retDict['TotalCapacityTB'] = -1
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
# If nothing is defined in the CS Access is allowed
# If something is defined, then it must be set to Active
retDict['Read'] = self.__resourceStatus.isUsableStorage( self.name, 'ReadAccess' )
retDict['Write'] = self.__resourceStatus.isUsableStorage( self.name, 'WriteAccess' )
retDict['Remove'] = self.__resourceStatus.isUsableStorage( self.name, 'RemoveAccess' )
if retDict['Read']:
retDict['Check'] = True
else:
retDict['Check'] = self.__resourceStatus.isUsableStorage( self.name, 'CheckAccess' )
diskSE = True
tapeSE = False
if 'SEType' in self.options:
# Type should follow the convention TXDY
seType = self.options['SEType']
diskSE = re.search( 'D[1-9]', seType ) != None
tapeSE = re.search( 'T[1-9]', seType ) != None
retDict['DiskSE'] = diskSE
retDict['TapeSE'] = tapeSE
try:
retDict['TotalCapacityTB'] = float( self.options['TotalCapacityTB'] )
except Exception:
retDict['TotalCapacityTB'] = -1
try:
retDict['DiskCacheTB'] = float( self.options['DiskCacheTB'] )
except Exception:
retDict['DiskCacheTB'] = -1
return S_OK( retDict )
def isValid( self, operation = '' ):
""" check CS/RSS statuses for :operation:
:param str operation: operation name
"""
self.log.verbose( "StorageElement.isValid: Determining whether the StorageElement %s is valid for %s" % ( self.name,
operation ) )
if ( not operation ) or ( operation in self.okMethods ):
return S_OK()
if not self.valid:
self.log.debug( "StorageElement.isValid: Failed to create StorageElement plugins.", self.errorReason )
return S_ERROR( self.errorReason )
# Determine whether the StorageElement is valid for checking, reading, writing
res = self.getStatus()
if not res[ 'OK' ]:
self.log.debug( "Could not call getStatus" )
return S_ERROR( "StorageElement.isValid could not call the getStatus method" )
checking = res[ 'Value' ][ 'Check' ]
reading = res[ 'Value' ][ 'Read' ]
writing = res[ 'Value' ][ 'Write' ]
removing = res[ 'Value' ][ 'Remove' ]
# Determine whether the requested operation can be fulfilled
if ( not operation ) and ( not reading ) and ( not writing ) and ( not checking ):
self.log.debug( "StorageElement.isValid: Read, write and check access not permitted." )
return S_ERROR( "StorageElement.isValid: Read, write and check access not permitted." )
# The supplied operation can be 'Read','Write' or any of the possible StorageElement methods.
if ( operation in self.readMethods ) or ( operation.lower() in ( 'read', 'readaccess' ) ):
operation = 'ReadAccess'
elif operation in self.writeMethods or ( operation.lower() in ( 'write', 'writeaccess' ) ):
operation = 'WriteAccess'
elif operation in self.removeMethods or ( operation.lower() in ( 'remove', 'removeaccess' ) ):
operation = 'RemoveAccess'
elif operation in self.checkMethods or ( operation.lower() in ( 'check', 'checkaccess' ) ):
operation = 'CheckAccess'
else:
self.log.debug( "StorageElement.isValid: The supplied operation is not known.", operation )
return S_ERROR( "StorageElement.isValid: The supplied operation is not known." )
self.log.debug( "in isValid check the operation: %s " % operation )
# Check if the operation is valid
if operation == 'CheckAccess':
if not reading:
if not checking:
self.log.debug( "StorageElement.isValid: Check access not currently permitted." )
return S_ERROR( "StorageElement.isValid: Check access not currently permitted." )
if operation == 'ReadAccess':
if not reading:
self.log.debug( "StorageElement.isValid: Read access not currently permitted." )
return S_ERROR( "StorageElement.isValid: Read access not currently permitted." )
if operation == 'WriteAccess':
if not writing:
self.log.debug( "StorageElementisValid: Write access not currently permitted." )
return S_ERROR( "StorageElement.isValid: Write access not currently permitted." )
if operation == 'RemoveAccess':
if not removing:
self.log.debug( "StorageElement.isValid: Remove access not currently permitted." )
return S_ERROR( "StorageElement.isValid: Remove access not currently permitted." )
return S_OK()
def getProtocols( self ):
""" Get the list of all the protocols defined for this Storage Element
"""
self.log.verbose( "StorageElement.getProtocols : Obtaining all protocols of %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
allProtocols = self.localProtocols + self.remoteProtocols
return S_OK( allProtocols )
def getRemoteProtocols( self ):
""" Get the list of all the remote access protocols defined for this Storage Element
"""
self.log.verbose( "StorageElement.getRemoteProtocols: Obtaining remote protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.remoteProtocols )
def getLocalProtocols( self ):
""" Get the list of all the local access protocols defined for this Storage Element
"""
self.log.verbose( "StorageElement.getLocalProtocols: Obtaining local protocols for %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
return S_OK( self.localProtocols )
def getStorageElementOption( self, option ):
""" Get the value for the option supplied from self.options
:param option : option we are interested in
"""
self.log.verbose( "StorageElement.getStorageElementOption: Obtaining %s option for Storage Element %s." % ( option,
self.name ) )
if not self.valid:
return S_ERROR( self.errorReason )
if option in self.options:
optionValue = self.options[option]
return S_OK( optionValue )
else:
errStr = "StorageElement.getStorageElementOption: Option not defined for SE."
self.log.debug( errStr, "%s for %s" % ( option, self.name ) )
return S_ERROR( errStr )
def getStorageParameters( self, protocol ):
""" Get protocol specific options
:param protocol : protocol we are interested in
"""
self.log.verbose( "StorageElement.getStorageParameters: Obtaining storage parameters for %s protocol %s." % ( self.name,
protocol ) )
res = self.getProtocols()
if not res['OK']:
return res
availableProtocols = res['Value']
if not protocol in availableProtocols:
errStr = "StorageElement.getStorageParameters: Requested protocol not available for SE."
self.log.debug( errStr, '%s for %s' % ( protocol, self.name ) )
return S_ERROR( errStr )
for storage in self.storages:
res = storage.getParameters()
storageParameters = res['Value']
if storageParameters['ProtocolName'] == protocol:
return S_OK( storageParameters )
errStr = "StorageElement.getStorageParameters: Requested protocol supported but no object found."
self.log.debug( errStr, "%s for %s" % ( protocol, self.name ) )
return S_ERROR( errStr )
def isLocalSE( self ):
""" Test if the Storage Element is local in the current context
"""
self.log.verbose( "StorageElement.isLocalSE: Determining whether %s is a local SE." % self.name )
import DIRAC
localSEs = getSEsForSite( DIRAC.siteName() )['Value']
if self.name in localSEs:
return S_OK( True )
else:
return S_OK( False )
#################################################################################################
#
# These are the basic get functions for lfn manipulation
#
def __getSinglePfnForProtocol( self, pfn, protocol, withPort = True ):
""" Transform the input pfn into a pfn with the given protocol for the Storage Element.
:param pfn : input PFN
:param protocol : string or list of string of the protocol we want
:param withPort : includes the port in the returned pfn
"""
self.log.verbose( "StorageElement.getSinglePfnForProtocol: Getting pfn for given protocols in %s." % self.name )
# This test of the available protocols could actually be done in getPfnForProtocol once for all
# but it is safer to put it here in case we decide to call this method internally (which I doubt!)
res = self.getProtocols()
if not res['OK']:
return res
if type( protocol ) == StringType:
protocols = [protocol]
elif type( protocol ) == ListType:
protocols = protocol
else:
errStr = "StorageElement.getSinglePfnForProtocol: Supplied protocol must be string or list of strings."
self.log.debug( errStr, "%s %s" % ( protocol, self.name ) )
return S_ERROR( errStr )
availableProtocols = res['Value']
protocolsToTry = []
for protocol in protocols:
if protocol in availableProtocols:
protocolsToTry.append( protocol )
else:
errStr = "StorageElement.getSinglePfnForProtocol: Requested protocol not available for SE."
self.log.debug( errStr, '%s for %s' % ( protocol, self.name ) )
if not protocolsToTry:
errStr = "StorageElement.getSinglePfnForProtocol: None of the requested protocols were available for SE."
self.log.debug( errStr, '%s for %s' % ( protocol, self.name ) )
return S_ERROR( errStr )
# Check all available storages for required protocol then contruct the PFN
for storage in self.storages:
res = storage.getParameters()
if res['Value']['ProtocolName'] in protocolsToTry:
res = pfnparse( pfn )
if res['OK']:
res = storage.getProtocolPfn( res['Value'], withPort )
if res['OK']:
return res
errStr = "StorageElement.getSinglePfnForProtocol: Failed to get PFN for requested protocols."
self.log.debug( errStr, "%s for %s" % ( protocols, self.name ) )
return S_ERROR( errStr )
def getPfnForProtocol( self, pfns, protocol = "SRM2", withPort = True ):
""" create PFNs strings using protocol :protocol:
:param self: self reference
:param list pfns: list of PFNs
:param str protocol: protocol name (default: 'SRM2')
:param bool withPort: flag to include port in PFN (default: True)
"""
if type( pfns ) in StringTypes:
pfnDict = {pfns:False}
elif type( pfns ) == ListType:
pfnDict = {}
for pfn in pfns:
pfnDict[pfn] = False
elif type( pfns ) == DictType:
pfnDict = pfns
else:
errStr = "StorageElement.getLfnForPfn: Supplied pfns must be string, list of strings or a dictionary."
self.log.debug( errStr )
return S_ERROR( errStr )
res = self.isValid( "getPfnForProtocol" )
if not res["OK"]:
return res
retDict = { "Successful" : {}, "Failed" : {}}
for pfn in pfnDict:
res = self.__getSinglePfnForProtocol( pfn, protocol, withPort = withPort )
if res["OK"]:
retDict["Successful"][pfn] = res["Value"]
else:
retDict["Failed"][pfn] = res["Message"]
return S_OK( retDict )
def getPfnPath( self, pfn ):
""" Get the part of the PFN path below the basic storage path.
This path must coincide with the LFN of the file in order to be compliant with the LHCb conventions.
"""
self.log.verbose( "StorageElement.getPfnPath: Getting path from pfn in %s." % self.name )
if not self.valid:
return S_ERROR( self.errorReason )
res = pfnparse( pfn )
if not res['OK']:
return res
fullPfnPath = '%s/%s' % ( res['Value']['Path'], res['Value']['FileName'] )
# Check all available storages and check whether the pfn is for that protocol
pfnPath = ''
for storage in self.storages:
res = storage.isPfnForProtocol( pfn )
if res['OK']:
if res['Value']:
res = storage.getParameters()
saPath = res['Value']['Path']
if not saPath:
# If the sa path doesn't exist then the pfn path is the entire string
pfnPath = fullPfnPath
else:
if re.search( saPath, fullPfnPath ):
# Remove the sa path from the fullPfnPath
pfnPath = fullPfnPath.replace( saPath, '' )
if pfnPath:
return S_OK( pfnPath )
# This should never happen. DANGER!!
errStr = "StorageElement.getPfnPath: Failed to get the pfn path for any of the protocols!!"
self.log.debug( errStr )
return S_ERROR( errStr )
def getLfnForPfn( self, pfns ):
""" Get the LFN from the PFNS .
:param lfn : input lfn or lfns (list/dict)
"""
if type( pfns ) in StringTypes:
pfnDict = {pfns:False}
elif type( pfns ) == ListType:
pfnDict = {}
for pfn in pfns:
pfnDict[pfn] = False
elif type( pfns ) == DictType:
pfnDict = pfns.copy()
else:
errStr = "StorageElement.getLfnForPfn: Supplied pfns must be string, list of strings or a dictionary."
self.log.debug( errStr )
return S_ERROR( errStr )
res = self.isValid( "getPfnPath" )
if not res['OK']:
self.log.error( "StorageElement.getLfnForPfn: Failed to instantiate StorageElement at %s" % self.name )
return res
retDict = { "Successful" : {}, "Failed" : {} }
for pfn in pfnDict:
res = self.getPfnPath( pfn )
if res["OK"]:
retDict["Successful"][pfn] = res["Value"]
else:
retDict["Failed"][pfn] = res["Message"]
return S_OK( retDict )
def __getSinglePfnForLfn( self, lfn ):
""" Get the full PFN constructed from the LFN.
:param lfn : input lfn or lfns (list/dict)
"""
self.log.debug( "StorageElement.__getSinglePfnForLfn: Getting pfn from lfn in %s." % self.name )
for storage in self.storages:
res = storage.getPFNBase()
if res['OK']:
fullPath = "%s%s" % ( res['Value'], lfn )
return S_OK( fullPath )
# This should never happen. DANGER!!
errStr = "StorageElement.__getSinglePfnForLfn: Failed to get the full pfn for any of the protocols (%s)!!" % ( self.name )
self.log.debug( errStr )
return S_ERROR( errStr )
def getPfnForLfn( self, lfns ):
""" get PFNs for supplied LFNs at :storageElementName: SE
:param self: self reference
:param list lfns: list of LFNs
:param str stotrageElementName: DIRAC SE name
"""
if type( lfns ) in StringTypes:
lfnDict = {lfns:False}
elif type( lfns ) == ListType:
lfnDict = {}
for lfn in lfns:
lfnDict[lfn] = False
elif type( lfns ) == DictType:
lfnDict = lfns.copy()
else:
errStr = "StorageElement.getPfnForLfn: Supplied lfns must be string, list of strings or a dictionary."
self.log.debug( errStr )
return S_ERROR( errStr )
if not self.valid:
return S_ERROR( self.errorReason )
retDict = { "Successful" : {}, "Failed" : {} }
for lfn in lfnDict:
res = self.__getSinglePfnForLfn( lfn )
if res["OK"]:
retDict["Successful"][lfn] = res["Value"]
else:
retDict["Failed"][lfn] = res["Message"]
return S_OK( retDict )
def getPFNBase( self ):
""" Get the base to construct a PFN
"""
self.log.verbose( "StorageElement.getPFNBase: Getting pfn base for %s." % self.name )
if not self.storages:
return S_ERROR( 'No storages defined' )
for storage in self.storages:
result = storage.getPFNBase()
if result['OK']:
return result
return result
###########################################################################################
#
# This is the generic wrapper for file operations
#
def getAccessUrl( self, lfn, protocol = False, singleFile = None ):
""" execute 'getTransportURL' operation.
:param str lfn: string, list or dictionnary of lfns
:param protocol: if no protocol is specified, we will request self.turlProtocols
"""
self.log.verbose( "StorageElement.getAccessUrl: Getting accessUrl for lfn in %s." % self.name )
if not protocol:
protocols = self.turlProtocols
else:
protocols = [protocol]
argDict = {"protocols" : protocols}
if singleFile is not None:
argDict["singleFile"] = singleFile
self.methodName = "getTransportURL"
return self.__executeMethod( lfn, **argDict )
def __generatePfnDict( self, lfns, storage ):
""" Generates a dictionnary (pfn : lfn ), where the pfn are constructed
from the lfn using the getProtocolPfn method of the storage plugins.
:param: lfns : dictionnary {lfn:whatever}
:returns dictionnary {constructed pfn : lfn}
"""
self.log.verbose( "StorageElement.__generatePfnDict: generating pfn dict for %s lfn in %s." % ( len( lfns ), self.name ) )
pfnDict = {} # pfn : lfn
failed = {} # lfn : string with errors
for lfn in lfns:
if ":" in lfn:
errStr = "StorageElement.__generatePfnDict: received a pfn as input. It should not happen anymore, please check your code"
self.log.verbose( errStr, lfn )
res = pfnparse( lfn ) # pfnparse can take an lfn as input, it will just fill the path and filename
if not res['OK']:
errStr = "StorageElement.__generatePfnDict: Failed to parse supplied LFN."
self.log.debug( errStr, "%s: %s" % ( lfn, res['Message'] ) )
if lfn not in failed:
failed[lfn] = ''
failed[lfn] = "%s %s" % ( failed[lfn], errStr )
else:
res = storage.getProtocolPfn( res['Value'], True )
if not res['OK']:
errStr = "StorageElement.__generatePfnDict %s." % res['Message']
self.log.debug( errStr, 'for %s' % ( lfn ) )
if lfn not in failed:
failed[lfn] = ''
failed[lfn] = "%s %s" % ( failed[lfn], errStr )
else:
pfnDict[res['Value']] = lfn
res = S_OK( pfnDict )
res['Failed'] = failed
return res
def __executeMethod( self, lfn, *args, **kwargs ):
""" Forward the call to each storage in turn until one works.
The method to be executed is stored in self.methodName
:param lfn : string, list or dictionnary
:param *args : variable amount of non-keyword arguments. SHOULD BE EMPTY
:param **kwargs : keyword arguments
:returns S_OK( { 'Failed': {lfn : reason} , 'Successful': {lfn : value} } )
The Failed dict contains the lfn only if the operation failed on all the storages
The Successful dict contains the value returned by the successful storages.
"""
removedArgs = {}
self.log.verbose( "StorageElement.__executeMethod : preparing the execution of %s" % ( self.methodName ) )
# args should normaly be empty to avoid problem...
if len( args ):
self.log.verbose( "StorageElement.__executeMethod: args should be empty!%s" % args )
# because there is normaly normaly only one kw argument, I can move it from args to kwargs
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} ).keys()
if len( methDefaultArgs ):
kwargs[methDefaultArgs[0] ] = args[0]
args = args[1:]
self.log.verbose( "StorageElement.__executeMethod: put it in kwargs, but dirty and might be dangerous!args %s kwargs %s" % ( args, kwargs ) )
# We check the deprecated arguments
for depArg in StorageElementItem.__deprecatedArguments:
if depArg in kwargs:
self.log.verbose( "StorageElement.__executeMethod: %s is not an allowed argument anymore. Please change your code!" % depArg )
removedArgs[depArg] = kwargs[depArg]
del kwargs[depArg]
# Set default argument if any
methDefaultArgs = StorageElementItem.__defaultsArguments.get( self.methodName, {} )
for argName in methDefaultArgs:
if argName not in kwargs:
self.log.debug( "StorageElement.__executeMethod : default argument %s for %s not present.\
Setting value %s." % ( argName, self.methodName, methDefaultArgs[argName] ) )
kwargs[argName] = methDefaultArgs[argName]
if type( lfn ) in StringTypes:
lfnDict = {lfn:False}
elif type( lfn ) == ListType:
lfnDict = {}
for url in lfn:
lfnDict[url] = False
elif type( lfn ) == DictType:
lfnDict = lfn.copy()
else:
errStr = "StorageElement.__executeMethod: Supplied lfns must be string, list of strings or a dictionary."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.verbose( "StorageElement.__executeMethod: Attempting to perform '%s' operation with %s lfns." % ( self.methodName,
len( lfnDict ) ) )
res = self.isValid( operation = self.methodName )
if not res['OK']:
return res
else:
if not self.valid:
return S_ERROR( self.errorReason )
successful = {}
failed = {}
localSE = self.isLocalSE()['Value']
# Try all of the storages one by one
for storage in self.storages:
# Determine whether to use this storage object
res = storage.getParameters()
useProtocol = True
if not res['OK']:
self.log.debug( "StorageElement.__executeMethod: Failed to get storage parameters.", "%s %s" % ( self.name,
res['Message'] ) )
useProtocol = False
else:
protocolName = res['Value']['ProtocolName']
if not lfnDict:
useProtocol = False
self.log.debug( "StorageElement.__executeMethod: No lfns to be attempted for %s protocol." % protocolName )
elif not ( protocolName in self.remoteProtocols ) and not localSE:
# If the SE is not local then we can't use local protocols
useProtocol = False
self.log.debug( "StorageElement.__executeMethod: Local protocol not appropriate for remote use: %s." % protocolName )
if useProtocol:
self.log.verbose( "StorageElement.__executeMethod: Generating %s protocol PFNs for %s." % ( len( lfnDict ),
protocolName ) )
res = self.__generatePfnDict( lfnDict, storage )
pfnDict = res['Value'] # pfn : lfn
failed.update( res['Failed'] )
if not len( pfnDict ):
self.log.verbose( "StorageElement.__executeMethod No pfns generated for protocol %s." % protocolName )
else:
self.log.verbose( "StorageElement.__executeMethod: Attempting to perform '%s' for %s physical files" % ( self.methodName,
len( pfnDict ) ) )
fcn = None
if hasattr( storage, self.methodName ) and callable( getattr( storage, self.methodName ) ):
fcn = getattr( storage, self.methodName )
if not fcn:
return S_ERROR( "StorageElement.__executeMethod: unable to invoke %s, it isn't a member function of storage" )
pfnsToUse = {} # pfn : the value of the lfn dictionary for the lfn of this pfn
for pfn in pfnDict:
pfnsToUse[pfn] = lfnDict[pfnDict[pfn]]
res = fcn( pfnsToUse, *args, **kwargs )
if not res['OK']:
errStr = "StorageElement.__executeMethod: Completely failed to perform %s." % self.methodName
self.log.debug( errStr, '%s for protocol %s: %s' % ( self.name, protocolName, res['Message'] ) )
for lfn in pfnDict.values():
if lfn not in failed:
failed[lfn] = ''
failed[lfn] += " %s" % ( res['Message'] ) # Concatenate! Not '=' :-)
else:
for pfn, lfn in pfnDict.items():
if pfn not in res['Value']['Successful']:
if lfn not in failed:
failed[lfn] = ''
if pfn in res['Value']['Failed']:
failed[lfn] = "%s %s" % ( failed[lfn], res['Value']['Failed'][pfn] )
else:
failed[lfn] = "%s %s" % ( failed[lfn], 'No error returned from plug-in' )
else:
successful[lfn] = res['Value']['Successful'][pfn]
if lfn in failed:
failed.pop( lfn )
lfnDict.pop( lfn )
# Ensure backward compatibility for singleFile and singleDirectory for the time of a version
singleFileOrDir = removedArgs.get( "singleFile", False ) or removedArgs.get( "singleDirectory", False )
retValue = S_OK( { 'Failed': failed, 'Successful': successful } )
if singleFileOrDir:
self.log.verbose( "StorageElement.__executeMethod : use returnSingleResult for backward compatibility. You should fix your code " )
retValue = returnSingleResult( retValue )
return retValue
def __getattr__( self, name ):
""" Forwards the equivalent Storage calls to StorageElement.__executeMethod"""
# We take either the equivalent name, or the name itself
self.methodName = StorageElementItem.__equivalentMethodNames.get( name, None )
if self.methodName:
return self.__executeMethod
raise AttributeError
StorageElement = StorageElementCache()
|
sposs/DIRAC
|
Resources/Storage/StorageElement.py
|
Python
|
gpl-3.0
| 36,591
|
[
"DIRAC"
] |
dd4762d05db804cc9e0e7efb682e514ddd5c30edd04620e7464ca003944d98ce
|
# -*- coding: utf-8 -*-
__author__ = 'Vojtech Vozab'
import bigaussian
import argparse
import glob
import numpy as np
import os
from skimage import io
def process_16bit_folder(path, kernel_function, vesselness_function, sigma_foreground, sigma_background, step_size, number_steps, zratio):
suffix = 'tif'
image_list = glob.glob(path + "*" + suffix)
if not image_list:
print "no loadable files in folder"
return -1
for image_file in image_list:
process_16bit_file(image_file, None, bigaussian.bigaussian_kernel_3d_alt, bigaussian.lineness_bg_3d, sigma_foreground, sigma_background,
step_size, number_steps, zratio)
process_16bit_file(image_file, None, bigaussian.bigaussian_kernel_3d_alt, bigaussian.lineness_frangi_3d, sigma_foreground, sigma_background,
step_size, number_steps, zratio)
process_16bit_file(image_file, None, bigaussian.bigaussian_kernel_3d_alt, bigaussian.lineness_sato_3d, sigma_foreground, sigma_background,
step_size, number_steps, zratio)
process_16bit_file(image_file, None, bigaussian.gaussian_kernel_3d_alt, bigaussian.lineness_bg_3d, sigma_foreground, sigma_background,
step_size, number_steps, zratio)
process_16bit_file(image_file, None, bigaussian.gaussian_kernel_3d_alt, bigaussian.lineness_frangi_3d, sigma_foreground, sigma_background,
step_size, number_steps, zratio)
process_16bit_file(image_file, None, bigaussian.gaussian_kernel_3d_alt, bigaussian.lineness_sato_3d, sigma_foreground, sigma_background,
step_size, number_steps, zratio)
def process_16bit_file(input_file, output_file, kernel_function, vesselness_function, sigma_foreground, sigma_background, step_size, number_steps, zratio):
img_3d_float = io.imread(input_file).astype(np.float64) / 65535
if kernel_function is bigaussian.bigaussian_kernel_3d_alt:
dirname = "out_bg"
else:
dirname = "out_gauss"
if vesselness_function is bigaussian.lineness_bg_3d:
dirname = dirname+"_bg"
elif vesselness_function is bigaussian.lineness_frangi_3d:
dirname = dirname+"_frangi"
else:
dirname = dirname+"_sato"
if output_file is None:
directory, filename = os.path.split(input_file)
filename_nosuf, suffix = os.path.splitext(filename)
if not os.path.exists(os.path.join(directory, dirname)):
os.makedirs(os.path.join(directory, dirname))
print "processing", filename, "for", dirname
output_file = os.path.join(directory, dirname, filename_nosuf)+"_out"+suffix
print "filter params", sigma_foreground, sigma_background, step_size, number_steps
output_3d_float = bigaussian.general_filter_3d(img_3d_float, kernel_function, vesselness_function, sigma_foreground, sigma_background, step_size, number_steps, zratio)
io.imsave(output_file, (output_3d_float * 65535).astype(np.uint16))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A lineness filter for 3D images.')
parser.add_argument('input', help='input filename')
parser.add_argument('--output', '-o', help='output filename')
parser.add_argument('--params', '-p', metavar='X', type=float, nargs=4, help='Filter parameters - foreground sigma and '
'background sigma for bigaussian, number of '
'multiscale steps and the value by which'
'sigma gets enlarged each step, '
'in this order. If omitted, the default '
'parameters are 3 1.5 1 0.5')
parser.add_argument('--kernel', '-k', choices=['bigaussian', 'gaussian'], help='Choose between smoothing kernels, valid '
'options are bigaussian (default) or gaussian.')
parser.add_argument('--vesselness', '-v', choices=['bigaussian', 'frangi', 'sato'], help='Choose between vesselness functions, valid'
'options are bigaussian (default), frangi or sato.')
parser.add_argument('--directory', '-d', choices=['y', 'n'], help='If set to \'y\', filter will process every .tif image in the directory')
parser.add_argument('--zratio', '-z', type=float, help='For anisotropic images, set the scale of the z-axis, typically <1.')
args = parser.parse_args()
if args.params is None:
args.params = [3, 1.5, 1, 0.5]
if args.kernel == 'gaussian':
kernel_param = bigaussian.gaussian_kernel_3d
else:
kernel_param = bigaussian.bigaussian_kernel_3d_alt
if args.vesselness == 'sato':
vesselness_param = bigaussian.lineness_sato_3d
elif args.vesselness == 'frangi':
vesselness_param = bigaussian.lineness_frangi_3d
else:
vesselness_param = bigaussian.lineness_bg_3d
if args.zratio is None:
args.zratio = 1
if args.directory == 'y':
process_16bit_folder(args.input, kernel_param, vesselness_param, args.params[0], args.params[1], args.params[3], int(args.params[2]), args.zratio)
else:
process_16bit_file(args.input, args.output, kernel_param, vesselness_param, args.params[0], args.params[1], args.params[3], int(args.params[2]), args.zratio)
|
V17/bigaussian
|
main.py
|
Python
|
gpl-3.0
| 5,708
|
[
"Gaussian"
] |
e23d3e9b51de10f510e75e3db3fec486725c40a022577b75d726867a426ee7d1
|
'''
PathwayGenie (c) University of Manchester 2017
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
|
synbiochem/PathwayGenie
|
plasmid_genie/__init__.py
|
Python
|
mit
| 207
|
[
"VisIt"
] |
541a7945b7483a40a830dca97094a29f756cc11ccf22ecdaa6b19d52843640e5
|
from __future__ import division
import numpy as np
import pyhsmm
from internals.states import FactorialStates,\
FactorialComponentHSMMStates,\
FactorialComponentHSMMStatesPossibleChangepoints
###################################
# overall problem wrapper class #
###################################
class Factorial(pyhsmm.basic.abstractions.ModelGibbsSampling):
def __init__(self,component_models):
self.component_models = component_models # should be a list of factorial_component models
self.states_list = [] # a list of factorial_allstates
def add_data(self,data,**kwargs):
# pass in state dimensions so that museqs and varseqs can be maintained
# kwargs is for changepoints
self.states_list.append(
FactorialStates(
data=data,
component_models=self.component_models,
**kwargs))
def resample_model(self,max_extra_noise,min_extra_noise,niter=25):
# min_extra_noise useful for numerical stability
# set up a temperature schedule
temps = np.zeros(niter)
cutofftime = int(3./4 * len(temps))
temps[:cutofftime] = max_extra_noise/2 * (1+np.cos(np.linspace(0,np.pi,cutofftime)))
temps = np.where(temps < min_extra_noise, min_extra_noise, temps)
for itr, temp in enumerate(temps):
# tell each states object to resample each of its component state chains
# (marginalizing out the component emissions)
# this call will also delete any instantiated component emissions (in
# principle)
for s in self.states_list:
s.resample(temp_noise=temp)
# then resample component emissions so that the other models can be
# resampled
for s in self.states_list:
s.instantiate_component_emissions(temp)
# resample component models (this call will not cause any states objects
# referenced by self.states_list to resample, but the parameter
# resampling involved in resampling these models will need the component
# emissions)
for c in self.component_models:
c.resample_model()
def generate(self,T,keep=True):
tempstates = \
FactorialStates(
data=None,
T=T,
keep=keep,
component_models=self.component_models,
)
sumobs, allobs, allstates = tempstates.sumobs, tempstates.allobs, tempstates.allstates
if keep:
tempstates.added_with_generate = True
tempstates.data = sumobs
self.states_list.append(tempstates)
return sumobs, allobs, allstates
def plot(self,color=None): # TODO
# this is ALWAYS useful
raise NotImplementedError
######################################
# classes for the component models #
######################################
# NOTE: component_models must have scalar gaussian observation
# distributions! this code, which references the same cached means and vars as
# the states, requires it!
class FactorialComponentHSMM(pyhsmm.models.HSMM):
def __init__(self,**kwargs): # no explicit parameter naming because DRY
assert 'obs_distns' in kwargs
obs_distns = kwargs['obs_distns']
self.means, self.vars = np.zeros(len(obs_distns)), np.zeros(len(obs_distns))
for idx, distn in enumerate(obs_distns):
assert isinstance(distn,pyhsmm.basic.distributions.ScalarGaussian),\
'Factorial model components must have scalar Gaussian observation distributions!'
distn.mubin = self.means[idx,...]
distn.sigmasqbin = self.vars[idx,...]
self.means[idx] = distn.mu
self.vars[idx] = distn.sigmasq
super(FactorialComponentHSMM,self).__init__(**kwargs)
def generate(self,T,keep=True):
# just like parent method, except uses our own states class
tempstates = \
FactorialComponentHSMMStates(
means=self.means,
vars=self.vars,
model=self,
T=T,
trunc=self.trunc
)
return self._generate(tempstates,keep)
def add_factorial_sumdata(self,data):
assert data.ndim == 1 or data.ndim == 2
data = np.reshape(data,(-1,1))
self.states_list.append(
FactorialComponentHSMMStates(
model=self,
data=data,
means=self.means,
vars=self.vars,
trunc=self.trunc,
))
# the added states object will get its resample() method called, but
# since that object doesn't do anything at the moment,
# resample_factorial needs to be called higher up
class FactorialComponentHSMMPossibleChangepoints(FactorialComponentHSMM):
def add_factorial_sumdata(self,data,changepoints):
if data is not None:
assert data.ndim == 1 or data.ndim == 2
data = np.reshape(data,(-1,1))
self.states_list.append(
FactorialComponentHSMMStatesPossibleChangepoints(
data=data,
changepoints=changepoints,
means=self.means,
vars=self.vars,
model=self,
trunc=self.trunc,
))
def generate(self,T,keep=True):
# just like parent method, except uses our own states class
tempstates = \
FactorialComponentHSMMStatesPossibleChangepoints(
means=self.means,
vars=self.vars,
T=T,
model=self,
trunc=self.trunc
)
return self._generate(tempstates,keep)
# TODO hmm versions below here
# class factorial_component_hmm(pyhsmm.models.hmm):
# means = None
# vars = None
# def add_factorial_sumdata(self,data,**kwargs):
# self.states_list.append(pyhsmm.plugins.factorial.states.factorial_component_hmm_states(data,**kwargs))
# class factorial_component_hmm_possiblechangepoints(pyhsmm.models.hmm):
# means = None
# vars = None
# def add_factorial_sumdata(self,data,changepoints,**kwargs):
# self.states_list.append(pyhsmm.plugins.factorial.states.factorial_component_hmm_states_possiblechangepoints(data,changepoints,**kwargs))
|
fivejjs/pyhsmm-factorial
|
models.py
|
Python
|
mit
| 6,665
|
[
"Gaussian"
] |
a8a8210c8fff7ae6fab04657bbaf484058700e9b4ce0450e76b2aa3ec1b94150
|
""" Base class for all services
"""
import os
import types
import time
import DIRAC
from DIRAC.Core.DISET.private.FileHelper import FileHelper
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR, isReturnStructure
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.ConfigurationSystem.Client.Config import gConfig
from DIRAC.Core.Utilities import Time
__RCSID__ = "$Id$"
def getServiceOption(serviceInfo, optionName, defaultValue):
""" Get service option resolving default values from the master service
"""
if optionName[0] == "/":
return gConfig.getValue(optionName, defaultValue)
for csPath in serviceInfo['csPaths']:
result = gConfig.getOption("%s/%s" % (csPath, optionName, ), defaultValue)
if result['OK']:
return result['Value']
return defaultValue
class RequestHandler(object):
class ConnectionError(Exception):
def __init__(self, msg):
self.__msg = msg
def __str__(self):
return "ConnectionError: %s" % self.__msg
def __init__(self, handlerInitDict, trid):
"""
Constructor
:type handlerInitDict: dictionary
:param handlerInitDict: Information vars for the service
:type trid: object
:param trid: Transport to use
"""
# Initially serviceInfoDict is the one base to the RequestHandler
# the one created in _rh_initializeClass
# FSM help me for I have made a complex stuff that I will forget in 5 mins :P
handlerInitDict.update(self.__srvInfoDict)
self.serviceInfoDict = handlerInitDict
self.__trid = trid
def initialize(self):
"""Initialize this instance of the handler (to be overwritten)
"""
pass
@classmethod
def _rh__initializeClass(cls, serviceInfoDict, lockManager, msgBroker, monitor):
"""
Class initialization (not to be called by hand or overwritten!!)
:type serviceInfoDict: dictionary
:param serviceInfoDict: Information vars for the service
:type msgBroker: object
:param msgBroker: Message delivery
:type lockManager: object
:param lockManager: Lock manager to use
"""
cls.__srvInfoDict = serviceInfoDict
cls.__svcName = cls.__srvInfoDict['serviceName']
cls.__lockManager = lockManager
cls.__msgBroker = msgBroker
cls.__trPool = msgBroker.getTransportPool()
cls.__monitor = monitor
cls.log = gLogger
def getRemoteAddress(self):
"""
Get the address of the remote peer.
:return: Address of remote peer.
"""
return self.__trPool.get(self.__trid).getRemoteAddress()
def getRemoteCredentials(self):
"""
Get the credentials of the remote peer.
:return: Credentials dictionary of remote peer.
"""
return self.__trPool.get(self.__trid).getConnectingCredentials()
@classmethod
def getCSOption(cls, optionName, defaultValue=False):
"""
Get an option from the CS section of the services
:return: Value for serviceSection/optionName in the CS being defaultValue the default
"""
return cls.srv_getCSOption(optionName, defaultValue)
def _rh_executeAction(self, proposalTuple):
"""
Execute an action.
:type proposalTuple: tuple
:param proposalTuple: Type of action to execute. First position of the tuple must be the type
of action to execute. The second position is the action itself.
"""
actionTuple = proposalTuple[1]
gLogger.debug("Executing %s:%s action" % actionTuple)
startTime = time.time()
actionType = actionTuple[0]
self.serviceInfoDict['actionTuple'] = actionTuple
try:
if actionType == "RPC":
retVal = self.__doRPC(actionTuple[1])
elif actionType == "FileTransfer":
retVal = self.__doFileTransfer(actionTuple[1])
elif actionType == "Connection":
retVal = self.__doConnection(actionTuple[1])
else:
return S_ERROR("Unknown action %s" % actionType)
except RequestHandler.ConnectionError as excp:
gLogger.error("ConnectionError", str(excp))
return S_ERROR(excp)
if not isReturnStructure(retVal):
message = "Method %s for action %s does not return a S_OK/S_ERROR!" % (actionTuple[1], actionTuple[0])
gLogger.error(message)
retVal = S_ERROR(message)
self.__logRemoteQueryResponse(retVal, time.time() - startTime)
result = self.__trPool.send(self.__trid, retVal) # this will delete the value from the S_OK(value)
del retVal
retVal = None
return result
#####
#
# File to/from Server Methods
#
#####
def __doFileTransfer(self, sDirection):
"""
Execute a file transfer action
:type sDirection: string
:param sDirection: Direction of the transfer
:return: S_OK/S_ERROR
"""
retVal = self.__trPool.receive(self.__trid)
if not retVal['OK']:
raise RequestHandler.ConnectionError("Error while receiving file description %s %s" %
(self.srv_getFormattedRemoteCredentials(), retVal['Message']))
fileInfo = retVal['Value']
sDirection = "%s%s" % (sDirection[0].lower(), sDirection[1:])
if "transfer_%s" % sDirection not in dir(self):
self.__trPool.send(self.__trid, S_ERROR("Service can't transfer files %s" % sDirection))
return
retVal = self.__trPool.send(self.__trid, S_OK("Accepted"))
if not retVal['OK']:
return retVal
self.__logRemoteQuery("FileTransfer/%s" % sDirection, fileInfo)
self.__lockManager.lock("FileTransfer/%s" % sDirection)
try:
try:
fileHelper = FileHelper(self.__trPool.get(self.__trid))
if sDirection == "fromClient":
fileHelper.setDirection("fromClient")
uRetVal = self.transfer_fromClient(fileInfo[0], fileInfo[1], fileInfo[2], fileHelper)
elif sDirection == "toClient":
fileHelper.setDirection("toClient")
uRetVal = self.transfer_toClient(fileInfo[0], fileInfo[1], fileHelper)
elif sDirection == "bulkFromClient":
fileHelper.setDirection("fromClient")
uRetVal = self.transfer_bulkFromClient(fileInfo[0], fileInfo[1], fileInfo[2], fileHelper)
elif sDirection == "bulkToClient":
fileHelper.setDirection("toClient")
uRetVal = self.transfer_bulkToClient(fileInfo[0], fileInfo[1], fileHelper)
elif sDirection == "listBulk":
fileHelper.setDirection("toClient")
uRetVal = self.transfer_listBulk(fileInfo[0], fileInfo[1], fileHelper)
else:
return S_ERROR("Direction %s does not exist!!!" % sDirection)
if uRetVal['OK'] and not fileHelper.finishedTransmission():
gLogger.error("You haven't finished receiving/sending the file", str(fileInfo))
return S_ERROR("Incomplete transfer")
del fileHelper
fileHelper = None
return uRetVal
finally:
self.__lockManager.unlock("FileTransfer/%s" % sDirection)
except Exception as e: # pylint: disable=broad-except
gLogger.exception("Uncaught exception when serving Transfer", "%s" % sDirection, lException=e)
return S_ERROR("Server error while serving %s: %s" % (sDirection, repr(e)))
def transfer_fromClient(self, fileId, token, fileSize, fileHelper): # pylint: disable=unused-argument
return S_ERROR("This server does no allow receiving files")
def transfer_toClient(self, fileId, token, fileHelper): # pylint: disable=unused-argument
return S_ERROR("This server does no allow sending files")
def transfer_bulkFromClient(self, bulkId, token, bulkSize, fileHelper): # pylint: disable=unused-argument
return S_ERROR("This server does no allow bulk receiving")
def transfer_bulkToClient(self, bulkId, token, fileHelper): # pylint: disable=unused-argument
return S_ERROR("This server does no allow bulk sending")
def transfer_listBulk(self, bulkId, token, fileHelper): # pylint: disable=unused-argument
return S_ERROR("This server does no allow bulk listing")
#####
#
# RPC Methods
#
#####
def __doRPC(self, method):
"""
Execute an RPC action
:type method: string
:param method: Method to execute
:return: S_OK/S_ERROR
"""
retVal = self.__trPool.receive(self.__trid)
if not retVal['OK']:
raise RequestHandler.ConnectionError("Error while receiving arguments %s %s" %
(self.srv_getFormattedRemoteCredentials(), retVal['Message']))
args = retVal['Value']
self.__logRemoteQuery("RPC/%s" % method, args)
return self.__RPCCallFunction(method, args)
def __RPCCallFunction(self, method, args):
"""
Check the arguments then call the RPC function
:type method: string
:param method: arguments sended by remote client
:return: S_OK/S_ERROR
"""
realMethod = "export_%s" % method
gLogger.debug("RPC to %s" % realMethod)
try:
# Get the method we are trying to call
oMethod = getattr(self, realMethod)
except:
return S_ERROR("Unknown method %s" % method)
# Check if the client sends correct arguments
dRetVal = self.__checkExpectedArgumentTypes(method, args)
if not dRetVal['OK']:
return dRetVal
# Lock the method with Semaphore to avoid too many calls at the same time
self.__lockManager.lock("RPC/%s" % method)
self.__msgBroker.addTransportId(self.__trid,
self.serviceInfoDict['serviceName'],
idleRead=True)
try:
try:
# Trying to execute the method
uReturnValue = oMethod(*args)
return uReturnValue
finally:
# Unlock method
self.__lockManager.unlock("RPC/%s" % method)
self.__msgBroker.removeTransport(self.__trid, closeTransport=False)
except Exception as e:
gLogger.exception("Uncaught exception when serving RPC", "Function %s" % method, lException=e)
return S_ERROR("Server error while serving %s: %s" % (method, str(e)))
def __checkExpectedArgumentTypes(self, method, args):
"""
Check that the arguments received match the ones expected
:type method: string
:param method: Method to check against
:type args: tuple
:param args: Arguments to check
:return: S_OK/S_ERROR
"""
sListName = "types_%s" % method
try:
oTypesList = getattr(self, sListName)
except:
gLogger.error("There's no types info for method", "export_%s" % method)
return S_ERROR("Handler error for server %s while processing method %s" % (self.serviceInfoDict['serviceName'],
method))
try:
mismatch = False
for iIndex in range(min(len(oTypesList), len(args))):
# If None skip the parameter
if oTypesList[iIndex] is None:
continue
# If parameter is a list or a tuple check types inside
elif isinstance(oTypesList[iIndex], (tuple, list)):
if not isinstance(args[iIndex], tuple(oTypesList[iIndex])):
mismatch = True
# else check the parameter
elif not isinstance(args[iIndex], oTypesList[iIndex]):
mismatch = True
# Has there been a mismatch?
if mismatch:
sError = "Type mismatch in parameter %d (starting with param 0) Received %s, expected %s" % (
iIndex, type(args[iIndex]), str(oTypesList[iIndex]))
return S_ERROR(sError)
if len(args) < len(oTypesList):
return S_ERROR("Function %s expects at least %s arguments" % (method, len(oTypesList)))
except Exception as v:
sError = "Error in parameter check: %s" % str(v)
gLogger.exception(sError)
return S_ERROR(sError)
return S_OK()
####
#
# Connection methods
#
####
__connectionCallbackTypes = {'new': [types.StringTypes, types.DictType],
'connected': [],
'drop': []}
def __doConnection(self, methodName):
"""
Connection callbacks
"""
retVal = self.__trPool.receive(self.__trid)
if not retVal['OK']:
raise RequestHandler.ConnectionError(
"Error while receiving arguments %s %s" % (self.srv_getFormattedRemoteCredentials(), retVal['Message']))
args = retVal['Value']
return self._rh_executeConnectionCallback(methodName, args)
def _rh_executeConnectionCallback(self, methodName, args=False):
self.__logRemoteQuery("Connection/%s" % methodName, args)
if methodName not in RequestHandler.__connectionCallbackTypes:
return S_ERROR("Invalid connection method %s" % methodName)
cbTypes = RequestHandler.__connectionCallbackTypes[methodName]
if args:
if len(args) != len(cbTypes):
return S_ERROR("Expected %s arguments" % len(cbTypes))
for i in range(len(cbTypes)):
if not isinstance(args[i], cbTypes[i]):
return S_ERROR("Invalid type for argument %s" % i)
self.__trPool.associateData(self.__trid, "connectData", args)
if not args:
args = self.__trPool.getAssociatedData(self.__trid, "connectData")
realMethod = "conn_%s" % methodName
gLogger.debug("Callback to %s" % realMethod)
try:
oMethod = getattr(self, realMethod)
except:
# No callback defined by handler
return S_OK()
try:
if args:
uReturnValue = oMethod(self.__trid, *args)
else:
uReturnValue = oMethod(self.__trid)
return uReturnValue
except Exception as e:
gLogger.exception("Uncaught exception when serving Connect", "Function %s" % realMethod, lException=e)
return S_ERROR("Server error while serving %s: %s" % (methodName, str(e)))
def _rh_executeMessageCallback(self, msgObj):
msgName = msgObj.getName()
if not self.__msgBroker.getMsgFactory().messageExists(self.__svcName, msgName):
return S_ERROR("Unknown message %s" % msgName)
methodName = "msg_%s" % msgName
self.__logRemoteQuery("Message/%s" % methodName, msgObj.dumpAttrs())
startTime = time.time()
try:
oMethod = getattr(self, methodName)
except:
return S_ERROR("Handler function for message %s does not exist!" % msgName)
self.__lockManager.lock(methodName)
try:
try:
uReturnValue = oMethod(msgObj)
except Exception as e:
gLogger.exception("Uncaught exception when serving message", methodName, lException=e)
return S_ERROR("Server error while serving %s: %s" % (msgName, str(e)))
finally:
self.__lockManager.unlock(methodName)
if not isReturnStructure(uReturnValue):
gLogger.error("Message does not return a S_OK/S_ERROR", msgName)
uReturnValue = S_ERROR("Message %s does not return a S_OK/S_ERROR" % msgName)
self.__logRemoteQueryResponse(uReturnValue, time.time() - startTime)
return uReturnValue
####
#
# Auth methods
#
####
# @classmethod
# def __authQuery( cls, method ):
# """
# Check if connecting user is allowed to perform an action
#
# :type method: string
# :param method: Method to check
# :return: S_OK/S_ERROR
# """
# return cls.__srvInfoDict[ 'authManager' ].authQuery( method, cls.getRemoteCredentials() )
def __logRemoteQuery(self, method, args):
"""
Log the contents of a remote query
:type method: string
:param method: Method to log
:type args: tuple
:param args: Arguments of the method called
"""
if self.srv_getCSOption("MaskRequestParams", True):
argsString = "<masked>"
else:
argsString = "\n\t%s\n" % ",\n\t".join([str(arg)[:50] for arg in args])
gLogger.notice("Executing action", "%s %s(%s)" % (self.srv_getFormattedRemoteCredentials(),
method,
argsString))
def __logRemoteQueryResponse(self, retVal, elapsedTime):
"""
Log the result of a query
:type retVal: dictionary
:param retVal: Return value of the query
"""
if retVal['OK']:
argsString = "OK"
else:
argsString = "ERROR: %s" % retVal['Message']
gLogger.notice("Returning response", "%s (%.2f secs) %s" % (self.srv_getFormattedRemoteCredentials(),
elapsedTime, argsString))
####
#
# Default ping method
#
####
types_ping = []
auth_ping = ['all']
def export_ping(self):
dInfo = {}
dInfo['version'] = DIRAC.version
dInfo['time'] = Time.dateTime()
# Uptime
try:
with open("/proc/uptime") as oFD:
iUptime = long(float(oFD.readline().split()[0].strip()))
dInfo['host uptime'] = iUptime
except:
pass
startTime = self.serviceInfoDict['serviceStartTime']
dInfo['service start time'] = self.serviceInfoDict['serviceStartTime']
serviceUptime = Time.dateTime() - startTime
dInfo['service uptime'] = serviceUptime.days * 3600 + serviceUptime.seconds
# Load average
try:
with open("/proc/loadavg") as oFD:
sLine = oFD.readline()
dInfo['load'] = " ".join(sLine.split()[:3])
except:
pass
dInfo['name'] = self.serviceInfoDict['serviceName']
stTimes = os.times()
dInfo['cpu times'] = {'user time': stTimes[0],
'system time': stTimes[1],
'children user time': stTimes[2],
'children system time': stTimes[3],
'elapsed real time': stTimes[4]
}
return S_OK(dInfo)
types_echo = [basestring]
@staticmethod
def export_echo(data):
"""
This method used for testing the performance of a service
"""
return S_OK(data)
####
#
# Utilities methods
#
####
def srv_getRemoteAddress(self):
"""
Get the address of the remote peer.
:return: Address of remote peer.
"""
return self.__trPool.get(self.__trid).getRemoteAddress()
def srv_getRemoteCredentials(self):
"""
Get the credentials of the remote peer.
:return: Credentials dictionary of remote peer.
"""
return self.__trPool.get(self.__trid).getConnectingCredentials()
def srv_getFormattedRemoteCredentials(self):
tr = self.__trPool.get(self.__trid)
if tr:
return tr.getFormattedCredentials()
return "unknown"
@classmethod
def srv_getCSOption(cls, optionName, defaultValue=False):
"""
Get an option from the CS section of the services
:return: Value for serviceSection/optionName in the CS being defaultValue the default
"""
if optionName[0] == "/":
return gConfig.getValue(optionName, defaultValue)
for csPath in cls.__srvInfoDict['csPaths']:
result = gConfig.getOption("%s/%s" % (csPath, optionName, ), defaultValue)
if result['OK']:
return result['Value']
return defaultValue
def srv_getTransportID(self):
return self.__trid
def srv_getClientSetup(self):
return self.serviceInfoDict['clientSetup']
def srv_getClientVO(self):
return self.serviceInfoDict['clientVO']
def srv_getActionTuple(self):
if 'actionTuple' not in self.serviceInfoDict:
return ('Unknown yet', )
return self.serviceInfoDict['actionTuple']
@classmethod
def srv_getURL(cls):
return cls.__srvInfoDict['URL']
@classmethod
def srv_getServiceName(cls):
return cls.__srvInfoDict['serviceName']
@classmethod
def srv_getMonitor(cls):
return cls.__monitor
def srv_msgReply(self, msgObj):
return self.__msgBroker.sendMessage(self.__trid, msgObj)
@classmethod
def srv_msgSend(cls, trid, msgObj):
return cls.__msgBroker.sendMessage(trid, msgObj)
@classmethod
def srv_msgCreate(cls, msgName):
return cls.__msgBroker.getMsgFactory().createMessage(cls.__svcName, msgName)
@classmethod
def srv_disconnectClient(cls, trid):
return cls.__msgBroker.removeTransport(trid)
def srv_disconnect(self, trid=None):
if not trid:
trid = self.srv_getTransportID()
return self.__msgBroker.removeTransport(trid)
|
arrabito/DIRAC
|
Core/DISET/RequestHandler.py
|
Python
|
gpl-3.0
| 19,987
|
[
"DIRAC"
] |
86c1a87e4d200ec88cf94088daba957bec801d7f994c584b292b975955c5a408
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements commands for running and interacting with Fuchsia on devices."""
import boot_data
import logging
import os
import pkg_repo
import re
import subprocess
import target
import time
import ffx_session
from common import ATTACH_RETRY_SECONDS, EnsurePathExists, \
GetHostToolPathFromPlatform, RunGnSdkFunction, \
SubprocessCallWithTimeout
# The maximum times to attempt mDNS resolution when connecting to a freshly
# booted Fuchsia instance before aborting.
BOOT_DISCOVERY_ATTEMPTS = 30
# Number of failed connection attempts before redirecting system logs to stdout.
CONNECT_RETRY_COUNT_BEFORE_LOGGING = 10
# Number of seconds between each device discovery.
BOOT_DISCOVERY_DELAY_SECS = 4
# Time between a reboot command is issued and when connection attempts from the
# host begin.
_REBOOT_SLEEP_PERIOD = 20
# File indicating version of an image downloaded to the host
_BUILD_ARGS = "buildargs.gn"
# File on device that indicates Fuchsia version.
_ON_DEVICE_VERSION_FILE = '/config/build-info/version'
# File on device that indicates Fuchsia product.
_ON_DEVICE_PRODUCT_FILE = '/config/build-info/product'
def GetTargetType():
return DeviceTarget
class DeviceTarget(target.Target):
"""Prepares a device to be used as a deployment target. Depending on the
command line parameters, it automatically handling a number of preparatory
steps relating to address resolution.
If |_node_name| is unset:
If there is one running device, use it for deployment and execution.
If there are more than one running devices, then abort and instruct the
user to re-run the command with |_node_name|
If |_node_name| is set:
If there is a running device with a matching nodename, then it is used
for deployment and execution.
If |_host| is set:
Deploy to a device at the host IP address as-is."""
def __init__(self, out_dir, target_cpu, host, node_name, port, ssh_config,
fuchsia_out_dir, os_check, logs_dir, system_image_dir):
"""out_dir: The directory which will contain the files that are
generated to support the deployment.
target_cpu: The CPU architecture of the deployment target. Can be
"x64" or "arm64".
host: The address of the deployment target device.
node_name: The node name of the deployment target device.
port: The port of the SSH service on the deployment target device.
ssh_config: The path to SSH configuration data.
fuchsia_out_dir: The path to a Fuchsia build output directory, for
deployments to devices paved with local Fuchsia builds.
os_check: If 'check', the target's SDK version must match.
If 'update', the target will be repaved if the SDK versions
mismatch.
If 'ignore', the target's SDK version is ignored.
system_image_dir: The directory which contains the files used to pave the
device."""
super(DeviceTarget, self).__init__(out_dir, target_cpu, logs_dir)
self._host = host
self._port = port
self._fuchsia_out_dir = None
self._node_name = node_name or os.environ.get('FUCHSIA_NODENAME')
self._system_image_dir = system_image_dir
self._os_check = os_check
self._pkg_repo = None
self._ffx_target = None
if not self._system_image_dir and self._os_check != 'ignore':
raise Exception("Image directory must be provided if a repave is needed.")
if self._host and self._node_name:
raise Exception('Only one of "--host" or "--name" can be specified.')
if fuchsia_out_dir:
if ssh_config:
raise Exception('Only one of "--fuchsia-out-dir" or "--ssh_config" can '
'be specified.')
self._fuchsia_out_dir = os.path.expanduser(fuchsia_out_dir)
# Use SSH keys from the Fuchsia output directory.
self._ssh_config_path = os.path.join(self._fuchsia_out_dir, 'ssh-keys',
'ssh_config')
self._os_check = 'ignore'
elif ssh_config:
# Use the SSH config provided via the commandline.
self._ssh_config_path = os.path.expanduser(ssh_config)
else:
return_code, ssh_config_raw, _ = RunGnSdkFunction(
'fuchsia-common.sh', 'get-fuchsia-sshconfig-file')
if return_code != 0:
raise Exception('Could not get Fuchsia ssh config file.')
self._ssh_config_path = os.path.expanduser(ssh_config_raw.strip())
@staticmethod
def CreateFromArgs(args):
return DeviceTarget(args.out_dir, args.target_cpu, args.host,
args.node_name, args.port, args.ssh_config,
args.fuchsia_out_dir, args.os_check, args.logs_dir,
args.system_image_dir)
@staticmethod
def RegisterArgs(arg_parser):
device_args = arg_parser.add_argument_group(
'device', 'External device deployment arguments')
device_args.add_argument('--host',
help='The IP of the target device. Optional.')
device_args.add_argument('--node-name',
help='The node-name of the device to boot or '
'deploy to. Optional, will use the first '
'discovered device if omitted.')
device_args.add_argument('--port',
'-p',
type=int,
default=None,
help='The port of the SSH service running on the '
'device. Optional.')
device_args.add_argument('--ssh-config',
'-F',
help='The path to the SSH configuration used for '
'connecting to the target device.')
device_args.add_argument(
'--os-check',
choices=['check', 'update', 'ignore'],
default='ignore',
help="Sets the OS version enforcement policy. If 'check', then the "
"deployment process will halt if the target\'s version doesn\'t "
"match. If 'update', then the target device will automatically "
"be repaved. If 'ignore', then the OS version won\'t be checked.")
device_args.add_argument('--system-image-dir',
help="Specify the directory that contains the "
"Fuchsia image used to pave the device. Only "
"needs to be specified if 'os_check' is not "
"'ignore'.")
def _Discover(self):
"""Queries mDNS for the IP address of a booted Fuchsia instance whose name
matches |_node_name| on the local area network. If |_node_name| isn't
specified, and there is only one device on the network, then returns the
IP address of that advice.
Sets |_host_name| and returns True if the device was found,
or waits up to |timeout| seconds and returns False if the device couldn't
be found."""
dev_finder_path = GetHostToolPathFromPlatform('device-finder')
with open(os.devnull, 'w') as devnull:
if self._node_name:
command = [
dev_finder_path,
'resolve',
'-device-limit',
'1', # Exit early as soon as a host is found.
self._node_name
]
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=devnull,
text=True)
else:
proc = self.RunFFXCommand(['target', 'list', '-f', 'simple'],
stdout=subprocess.PIPE,
stderr=devnull,
text=True)
output = set(proc.communicate()[0].strip().split('\n'))
if proc.returncode != 0:
return False
if self._node_name:
# Handle the result of "device-finder resolve".
self._host = output.pop().strip()
else:
name_host_pairs = [x.strip().split(' ') for x in output]
if len(name_host_pairs) > 1:
raise Exception('More than one device was discovered on the network. '
'Use --node-name <name> to specify the device to use.'
'List of devices: {}'.format(output))
assert len(name_host_pairs) == 1
# Check if device has both address and name.
if len(name_host_pairs[0]) < 2:
return False
self._host, self._node_name = name_host_pairs[0]
logging.info('Found device "%s" at address %s.' % (self._node_name,
self._host))
return True
def Start(self):
if self._host:
self._ConnectToTarget()
else:
device_found = self._Discover()
if device_found:
self._ConnectToTarget()
if self._os_check == 'ignore':
return
# If accessible, check version.
new_version = self._GetSdkHash()
installed_version = self._GetInstalledSdkVersion()
if new_version == installed_version:
logging.info('Fuchsia version installed on device matches Chromium '
'SDK version. Skipping pave.')
else:
if self._os_check == 'check':
raise Exception('Image and Fuchsia version installed on device '
'does not match. Abort.')
logging.info('Putting device in recovery mode')
self.RunCommandPiped(['dm', 'reboot-recovery'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._ProvisionDevice()
else:
if self._node_name:
logging.info('Could not detect device %s.' % self._node_name)
if self._os_check == 'update':
logging.info('Assuming it is in zedboot. Continuing with paving...')
self._ProvisionDevice()
return
raise Exception('Could not find device. If the device is connected '
'to the host remotely, make sure that --host flag '
'is set and that remote serving is set up.')
def GetFfxTarget(self):
assert self._ffx_target
return self._ffx_target
def _GetInstalledSdkVersion(self):
"""Retrieves installed OS version from device.
Returns:
Tuple of strings, containing (product, version number)
"""
return (self.GetFileAsString(_ON_DEVICE_PRODUCT_FILE).strip(),
self.GetFileAsString(_ON_DEVICE_VERSION_FILE).strip())
def _GetSdkHash(self):
"""Read version of hash in pre-installed package directory.
Returns:
Tuple of (product, version) of image to be installed.
Raises:
VersionNotFoundError: if contents of buildargs.gn cannot be found or the
version number cannot be extracted.
"""
# TODO(crbug.com/1261961): Stop processing buildargs.gn directly.
with open(os.path.join(self._system_image_dir, _BUILD_ARGS)) as f:
contents = f.readlines()
if not contents:
raise VersionNotFoundError('Could not retrieve %s' % _BUILD_ARGS)
version_key = 'build_info_version'
product_key = 'build_info_product'
info_keys = [product_key, version_key]
version_info = {}
for line in contents:
for k in info_keys:
match = re.match(r'%s = "(.*)"' % k, line)
if match:
version_info[k] = match.group(1)
if not (version_key in version_info and product_key in version_info):
raise VersionNotFoundError(
'Could not extract version info from %s. Contents: %s' %
(_BUILD_ARGS, contents))
return (version_info[product_key], version_info[version_key])
def GetPkgRepo(self):
if not self._pkg_repo:
if self._fuchsia_out_dir:
# Deploy to an already-booted device running a local Fuchsia build.
self._pkg_repo = pkg_repo.ExternalPkgRepo(
os.path.join(self._fuchsia_out_dir, 'amber-files'),
os.path.join(self._fuchsia_out_dir, '.build-id'))
else:
# Create an ephemeral package repository, then start both "pm serve" as
# well as the bootserver.
self._pkg_repo = pkg_repo.ManagedPkgRepo(self)
return self._pkg_repo
def _ParseNodename(self, output):
# Parse the nodename from bootserver stdout.
m = re.search(r'.*Proceeding with nodename (?P<nodename>.*)$', output,
re.MULTILINE)
if not m:
raise Exception('Couldn\'t parse nodename from bootserver output.')
self._node_name = m.groupdict()['nodename']
logging.info('Booted device "%s".' % self._node_name)
# Repeatedly search for a device for |BOOT_DISCOVERY_ATTEMPT|
# number of attempts. If a device isn't found, wait
# |BOOT_DISCOVERY_DELAY_SECS| before searching again.
logging.info('Waiting for device to join network.')
for _ in range(BOOT_DISCOVERY_ATTEMPTS):
if self._Discover():
break
time.sleep(BOOT_DISCOVERY_DELAY_SECS)
if not self._host:
raise Exception('Device %s couldn\'t be discovered via mDNS.' %
self._node_name)
self._ConnectToTarget()
def _GetEndpoint(self):
return (self._host, self._port)
def _ConnectToTarget(self):
logging.info('Connecting to Fuchsia using ffx.')
# Prefer connecting via node name over address:port. Assume that ffx already
# knows about the target, so there's no need to add/remove it.
self._ffx_target = ffx_session.FfxTarget(
self._ffx_runner, self._node_name) if self._node_name else \
ffx_session.FfxTarget(self._ffx_runner,
'%s:%s' % (self._host, self._port))
self._ffx_target.wait(ATTACH_RETRY_SECONDS)
return super(DeviceTarget, self)._ConnectToTarget()
def _DisconnectFromTarget(self):
super(DeviceTarget, self)._DisconnectFromTarget()
self._ffx_target = None
def _GetSshConfigPath(self):
return self._ssh_config_path
def _ProvisionDevice(self):
_, auth_keys, _ = RunGnSdkFunction('fuchsia-common.sh',
'get-fuchsia-auth-keys-file')
pave_command = [
os.path.join(self._system_image_dir, 'pave.sh'), '--authorized-keys',
auth_keys.strip()
]
if self._node_name:
pave_command.extend(['-n', self._node_name, '-1'])
logging.info(' '.join(pave_command))
return_code, stdout, stderr = SubprocessCallWithTimeout(pave_command,
timeout_secs=300)
if return_code != 0:
raise Exception('Could not pave device.')
self._ParseNodename(stderr)
def Restart(self):
"""Restart the device."""
self.RunCommandPiped('dm reboot')
time.sleep(_REBOOT_SLEEP_PERIOD)
self.Start()
def Stop(self):
try:
# End multiplexed ssh connection, ensure that ssh logging stops before
# tests/scripts return.
if self.IsStarted():
self.RunCommand(['-O', 'exit'])
finally:
self._DisconnectFromTarget()
super(DeviceTarget, self).Stop()
|
chromium/chromium
|
build/fuchsia/device_target.py
|
Python
|
bsd-3-clause
| 15,282
|
[
"Amber"
] |
626165b2f2a0a2548f7b32f0a83c6d449fb08e5c085ffbfbe9318ae542c07484
|
# -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Vlada Perić <vlada.peric@gmail.com>
# Copyright (C) 2011 Matt Keenan <matt.keenan@gmail.com>
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Narrator class for use by plugins.
"""
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.lib.date import Date
from gramps.gen.lib.person import Person
from gramps.gen.lib.eventroletype import EventRoleType
from gramps.gen.lib.eventtype import EventType
from gramps.gen.lib.familyreltype import FamilyRelType
from gramps.gen.display.name import displayer as _nd
from gramps.gen.utils.alive import probably_alive
from gramps.gen.plug.report import utils as ReportUtils
from gramps.plugins.lib.libtranslate import Translator
#-------------------------------------------------------------------------
#
# Private constants
#
#-------------------------------------------------------------------------
# In string arrays, the first strings should include the name, the second
# strings should not include the name.
_NAME_INDEX_INCLUDE_NAME = 0
_NAME_INDEX_EXCLUDE_NAME = 1
# In string arrays, the first strings should not include age.
# The following strings should include year, month and day units.
# And support format with precision (see gen/lib/date.py).
_AGE_INDEX_NO_AGE = 0
_AGE_INDEX = 1
#-------------------------------------------------------------------------
#
# Private functions
#
#-------------------------------------------------------------------------
def _get_empty_endnote_numbers(obj):
"""
Empty stab function for when endnotes are not needed
"""
return ""
# avoid normal translation!
##from gramps.gen.const import GRAMPS_LOCALE as glocale
##_ = glocale.get_translation().gettext
def _(message): return message
#------------------------------------------------------------------------
#
# Born strings
#
#------------------------------------------------------------------------
born_full_date_with_place = [
{
Person.UNKNOWN : _("%(unknown_gender_name)s was born on %(birth_date)s in %(birth_place)s."),
Person.MALE : _("%(male_name)s was born on %(birth_date)s in %(birth_place)s."),
Person.FEMALE : _("%(female_name)s was born on %(birth_date)s in %(birth_place)s."),
},
{
Person.UNKNOWN : _("This person was born on %(birth_date)s in %(birth_place)s."),
Person.MALE : _("He was born on %(birth_date)s in %(birth_place)s."),
Person.FEMALE : _("She was born on %(birth_date)s in %(birth_place)s."),
},
_("Born %(birth_date)s in %(birth_place)s."),
]
born_modified_date_with_place = [
{
Person.UNKNOWN : _("%(unknown_gender_name)s was born %(modified_date)s in %(birth_place)s."),
Person.MALE : _("%(male_name)s was born %(modified_date)s in %(birth_place)s."),
Person.FEMALE : _("%(female_name)s was born %(modified_date)s in %(birth_place)s."),
},
{
Person.UNKNOWN : _("This person was born %(modified_date)s in %(birth_place)s."),
Person.MALE : _("He was born %(modified_date)s in %(birth_place)s."),
Person.FEMALE : _("She was born %(modified_date)s in %(birth_place)s."),
},
_("Born %(modified_date)s in %(birth_place)s."),
]
born_full_date_no_place = [
{
Person.UNKNOWN : _("%(unknown_gender_name)s was born on %(birth_date)s."),
Person.MALE : _("%(male_name)s was born on %(birth_date)s."),
Person.FEMALE : _("%(female_name)s was born on %(birth_date)s."),
},
{
Person.UNKNOWN : _("This person was born on %(birth_date)s."),
Person.MALE : _("He was born on %(birth_date)s."),
Person.FEMALE : _("She was born on %(birth_date)s."),
},
_("Born %(birth_date)s."),
]
born_modified_date_no_place = [
{
Person.UNKNOWN : _("%(unknown_gender_name)s was born %(modified_date)s."),
Person.MALE : _("%(male_name)s was born %(modified_date)s."),
Person.FEMALE : _("%(female_name)s was born %(modified_date)s."),
},
{
Person.UNKNOWN : _("This person was born %(modified_date)s."),
Person.MALE : _("He was born %(modified_date)s."),
Person.FEMALE : _("She was born %(modified_date)s."),
},
_("Born %(modified_date)s."),
]
born_partial_date_with_place = [
{
Person.UNKNOWN : _("%(unknown_gender_name)s was born in %(month_year)s in %(birth_place)s."),
Person.MALE : _("%(male_name)s was born in %(month_year)s in %(birth_place)s."),
Person.FEMALE : _("%(female_name)s was born in %(month_year)s in %(birth_place)s."),
},
{
Person.UNKNOWN : _("This person was born in %(month_year)s in %(birth_place)s."),
Person.MALE : _("He was born in %(month_year)s in %(birth_place)s."),
Person.FEMALE : _("She was born in %(month_year)s in %(birth_place)s."),
},
_("Born %(month_year)s in %(birth_place)s."),
]
born_partial_date_no_place = [
{
Person.UNKNOWN : _("%(unknown_gender_name)s was born in %(month_year)s."),
Person.MALE : _("%(male_name)s was born in %(month_year)s."),
Person.FEMALE : _("%(female_name)s was born in %(month_year)s."),
},
{
Person.UNKNOWN : _("This person was born in %(month_year)s."),
Person.MALE : _("He was born in %(month_year)s."),
Person.FEMALE : _("She was born in %(month_year)s."),
},
_("Born %(month_year)s."),
]
born_no_date_with_place = [
{
Person.UNKNOWN : _("%(unknown_gender_name)s was born in %(birth_place)s."),
Person.MALE : _("%(male_name)s was born in %(birth_place)s."),
Person.FEMALE : _("%(female_name)s was born in %(birth_place)s."),
},
{
Person.UNKNOWN : _("This person was born in %(birth_place)s."),
Person.MALE : _("He was born in %(birth_place)s."),
Person.FEMALE : _("She was born in %(birth_place)s."),
},
_("Born in %(birth_place)s."),
]
#------------------------------------------------------------------------
#
# Died strings
#
#------------------------------------------------------------------------
died_full_date_with_place = [
{ Person.UNKNOWN : [
_("%(unknown_gender_name)s died on %(death_date)s in %(death_place)s."),
_("%(unknown_gender_name)s died on %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("%(male_name)s died on %(death_date)s in %(death_place)s."),
_("%(male_name)s died on %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("%(female_name)s died on %(death_date)s in %(death_place)s."),
_("%(female_name)s died on %(death_date)s in %(death_place)s at the age of %(age)s."),
],
},
{ Person.UNKNOWN : [
_("This person died on %(death_date)s in %(death_place)s."),
_("This person died on %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("He died on %(death_date)s in %(death_place)s."),
_("He died on %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("She died on %(death_date)s in %(death_place)s."),
_("She died on %(death_date)s in %(death_place)s at the age of %(age)s."),
],
},
[
_("Died %(death_date)s in %(death_place)s."),
_("Died %(death_date)s in %(death_place)s (%(age)s)."),
],
]
died_modified_date_with_place = [
{ Person.UNKNOWN : [
_("%(unknown_gender_name)s died %(death_date)s in %(death_place)s."),
_("%(unknown_gender_name)s died %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("%(male_name)s died %(death_date)s in %(death_place)s."),
_("%(male_name)s died %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("%(female_name)s died %(death_date)s in %(death_place)s."),
_("%(female_name)s died %(death_date)s in %(death_place)s at the age of %(age)s."),
],
},
{ Person.UNKNOWN : [
_("This person died %(death_date)s in %(death_place)s."),
_("This person died %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("He died %(death_date)s in %(death_place)s."),
_("He died %(death_date)s in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("She died %(death_date)s in %(death_place)s."),
_("She died %(death_date)s in %(death_place)s at the age of %(age)s."),
],
},
[
_("Died %(death_date)s in %(death_place)s."),
_("Died %(death_date)s in %(death_place)s (%(age)s)."),
],
]
died_full_date_no_place = [
{ Person.UNKNOWN : [
_("%(unknown_gender_name)s died on %(death_date)s."),
_("%(unknown_gender_name)s died on %(death_date)s at the age of %(age)s."),
],
Person.MALE : [
_("%(male_name)s died on %(death_date)s."),
_("%(male_name)s died on %(death_date)s at the age of %(age)s."),
],
Person.FEMALE : [
_("%(female_name)s died on %(death_date)s."),
_("%(female_name)s died on %(death_date)s at the age of %(age)s."),
],
},
{ Person.UNKNOWN : [
_("This person died on %(death_date)s."),
_("This person died on %(death_date)s at the age of %(age)s."),
],
Person.MALE : [
_("He died on %(death_date)s."),
_("He died on %(death_date)s at the age of %(age)s."),
],
Person.FEMALE : [
_("She died on %(death_date)s."),
_("She died on %(death_date)s at the age of %(age)s."),
],
},
[
_("Died %(death_date)s."),
_("Died %(death_date)s (%(age)s)."),
],
]
died_modified_date_no_place = [
{ Person.UNKNOWN : [
_("%(unknown_gender_name)s died %(death_date)s."),
_("%(unknown_gender_name)s died %(death_date)s at the age of %(age)s."),
],
Person.MALE : [
_("%(male_name)s died %(death_date)s."),
_("%(male_name)s died %(death_date)s at the age of %(age)s."),
],
Person.FEMALE : [
_("%(female_name)s died %(death_date)s."),
_("%(female_name)s died %(death_date)s at the age of %(age)s."),
],
},
{ Person.UNKNOWN : [
_("This person died %(death_date)s."),
_("This person died %(death_date)s at the age of %(age)s."),
],
Person.MALE : [
_("He died %(death_date)s."),
_("He died %(death_date)s at the age of %(age)s."),
],
Person.FEMALE : [
_("She died %(death_date)s."),
_("She died %(death_date)s at the age of %(age)s."),
],
},
[
_("Died %(death_date)s."),
_("Died %(death_date)s (%(age)s)."),
],
]
died_partial_date_with_place = [
{ Person.UNKNOWN : [
_("%(unknown_gender_name)s died in %(month_year)s in %(death_place)s."),
_("%(unknown_gender_name)s died in %(month_year)s in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("%(male_name)s died in %(month_year)s in %(death_place)s."),
_("%(male_name)s died in %(month_year)s in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("%(female_name)s died in %(month_year)s in %(death_place)s."),
_("%(female_name)s died in %(month_year)s in %(death_place)s at the age of %(age)s."),
],
},
{ Person.UNKNOWN : [
_("This person died in %(month_year)s in %(death_place)s."),
_("This person died in %(month_year)s in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("He died in %(month_year)s in %(death_place)s."),
_("He died in %(month_year)s in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("She died in %(month_year)s in %(death_place)s."),
_("She died in %(month_year)s in %(death_place)s at the age of %(age)s."),
]
},
[
_("Died %(month_year)s in %(death_place)s."),
_("Died %(month_year)s in %(death_place)s (%(age)s)."),
],
]
died_partial_date_no_place = [
{ Person.UNKNOWN : [
_("%(unknown_gender_name)s died in %(month_year)s."),
_("%(unknown_gender_name)s died in %(month_year)s at the age of %(age)s."),
],
Person.MALE : [
_("%(male_name)s died in %(month_year)s."),
_("%(male_name)s died in %(month_year)s at the age of %(age)s."),
],
Person.FEMALE : [
_("%(female_name)s died in %(month_year)s."),
_("%(female_name)s died in %(month_year)s at the age of %(age)s."),
],
},
{ Person.UNKNOWN : [
_("This person died in %(month_year)s."),
_("This person died in %(month_year)s at the age of %(age)s."),
],
Person.MALE : [
_("He died in %(month_year)s."),
_("He died in %(month_year)s at the age of %(age)s."),
],
Person.FEMALE : [
_("She died in %(month_year)s."),
_("She died in %(month_year)s at the age of %(age)s."),
],
},
[
_("Died %(month_year)s."),
_("Died %(month_year)s (%(age)s)."),
],
]
died_no_date_with_place = [
{ Person.UNKNOWN : [
_("%(unknown_gender_name)s died in %(death_place)s."),
_("%(unknown_gender_name)s died in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("%(male_name)s died in %(death_place)s."),
_("%(male_name)s died in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("%(female_name)s died in %(death_place)s."),
_("%(female_name)s died in %(death_place)s at the age of %(age)s."),
],
},
{
Person.UNKNOWN : [
_("This person died in %(death_place)s."),
_("This person died in %(death_place)s at the age of %(age)s."),
],
Person.MALE : [
_("He died in %(death_place)s."),
_("He died in %(death_place)s at the age of %(age)s."),
],
Person.FEMALE : [
_("She died in %(death_place)s."),
_("She died in %(death_place)s at the age of %(age)s."),
],
},
[
_("Died in %(death_place)s."),
_("Died in %(death_place)s (%(age)s)."),
],
]
died_no_date_no_place = [
{ Person.UNKNOWN : [
"",
_("%(unknown_gender_name)s died at the age of %(age)s."),
],
Person.MALE : [
"",
_("%(male_name)s died at the age of %(age)s."),
],
Person.FEMALE : [
"",
_("%(female_name)s died at the age of %(age)s."),
],
},
{ Person.UNKNOWN : [
"",
_("This person died at the age of %(age)s."),
],
Person.MALE : [
"",
_("He died at the age of %(age)s."),
],
Person.FEMALE : [
"",
_("She died at the age of %(age)s."),
],
},
[
"",
_("Died (%(age)s)."),
],
]
#------------------------------------------------------------------------
#
# Buried strings
#
#------------------------------------------------------------------------
buried_full_date_place = {
Person.MALE: [
_("%(male_name)s was buried on %(burial_date)s in %(burial_place)s%(endnotes)s."),
_("He was buried on %(burial_date)s in %(burial_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was buried on %(burial_date)s in %(burial_place)s%(endnotes)s."),
_("She was buried on %(burial_date)s in %(burial_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was buried on %(burial_date)s in %(burial_place)s%(endnotes)s."),
_("This person was buried on %(burial_date)s in %(burial_place)s%(endnotes)s."),
],
'succinct' : _("Buried %(burial_date)s in %(burial_place)s%(endnotes)s."),
}
buried_full_date_no_place = {
Person.MALE: [
_("%(male_name)s was buried on %(burial_date)s%(endnotes)s."),
_("He was buried on %(burial_date)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was buried on %(burial_date)s%(endnotes)s."),
_("She was buried on %(burial_date)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was buried on %(burial_date)s%(endnotes)s."),
_("This person was buried on %(burial_date)s%(endnotes)s."),
],
'succinct' : _("Buried %(burial_date)s%(endnotes)s."),
}
buried_partial_date_place = {
Person.MALE: [
_("%(male_name)s was buried in %(month_year)s in %(burial_place)s%(endnotes)s."),
_("He was buried in %(month_year)s in %(burial_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was buried in %(month_year)s in %(burial_place)s%(endnotes)s."),
_("She was buried in %(month_year)s in %(burial_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was buried in %(month_year)s in %(burial_place)s%(endnotes)s."),
_("This person was buried in %(month_year)s in %(burial_place)s%(endnotes)s."),
],
'succinct' : _("Buried %(month_year)s in %(burial_place)s%(endnotes)s."),
}
buried_partial_date_no_place = {
Person.MALE: [
_("%(male_name)s was buried in %(month_year)s%(endnotes)s."),
_("He was buried in %(month_year)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was buried in %(month_year)s%(endnotes)s."),
_("She was buried in %(month_year)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was buried in %(month_year)s%(endnotes)s."),
_("This person was buried in %(month_year)s%(endnotes)s."),
],
'succinct' : _("Buried %(month_year)s%(endnotes)s."),
}
buried_modified_date_place = {
Person.MALE: [
_("%(male_name)s was buried %(modified_date)s in %(burial_place)s%(endnotes)s."),
_("He was buried %(modified_date)s in %(burial_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was buried %(modified_date)s in %(burial_place)s%(endnotes)s."),
_("She was buried %(modified_date)s in %(burial_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was buried %(modified_date)s in %(burial_place)s%(endnotes)s."),
_("This person was buried %(modified_date)s in %(burial_place)s%(endnotes)s."),
],
'succinct' : _("Buried %(modified_date)s in %(burial_place)s%(endnotes)s."),
}
buried_modified_date_no_place = {
Person.MALE: [
_("%(male_name)s was buried %(modified_date)s%(endnotes)s."),
_("He was buried %(modified_date)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was buried %(modified_date)s%(endnotes)s."),
_("She was buried %(modified_date)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was buried %(modified_date)s%(endnotes)s."),
_("This person was buried %(modified_date)s%(endnotes)s."),
],
'succinct' : _("Buried %(modified_date)s%(endnotes)s."),
}
buried_no_date_place = {
Person.MALE : [
_("%(male_name)s was buried in %(burial_place)s%(endnotes)s."),
_("He was buried in %(burial_place)s%(endnotes)s."),
],
Person.FEMALE : [
_("%(female_name)s was buried in %(burial_place)s%(endnotes)s."),
_("She was buried in %(burial_place)s%(endnotes)s."),
],
Person.UNKNOWN : [
_("%(unknown_gender_name)s was buried in %(burial_place)s%(endnotes)s."),
_("This person was buried in %(burial_place)s%(endnotes)s."),
],
'succinct' : _("Buried in %(burial_place)s%(endnotes)s."),
}
buried_no_date_no_place = {
Person.MALE : [
_("%(male_name)s was buried%(endnotes)s."),
_("He was buried%(endnotes)s."),
],
Person.FEMALE : [
_("%(female_name)s was buried%(endnotes)s."),
_("She was buried%(endnotes)s."),
],
Person.UNKNOWN : [
_("%(unknown_gender_name)s was buried%(endnotes)s."),
_("This person was buried%(endnotes)s."),
],
'succinct' : _("Buried%(endnotes)s."),
}
#------------------------------------------------------------------------
#
# Baptized strings
#
#------------------------------------------------------------------------
baptised_full_date_place = {
Person.MALE: [
_("%(male_name)s was baptized on %(baptism_date)s in %(baptism_place)s%(endnotes)s."),
_("He was baptized on %(baptism_date)s in %(baptism_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was baptized on %(baptism_date)s in %(baptism_place)s%(endnotes)s."),
_("She was baptized on %(baptism_date)s in %(baptism_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was baptized on %(baptism_date)s in %(baptism_place)s%(endnotes)s."),
_("This person was baptized on %(baptism_date)s in %(baptism_place)s%(endnotes)s."),
],
'succinct' : _("Baptized %(baptism_date)s in %(baptism_place)s%(endnotes)s."),
}
baptised_full_date_no_place = {
Person.MALE: [
_("%(male_name)s was baptized on %(baptism_date)s%(endnotes)s."),
_("He was baptized on %(baptism_date)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was baptized on %(baptism_date)s%(endnotes)s."),
_("She was baptized on %(baptism_date)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was baptized on %(baptism_date)s%(endnotes)s."),
_("This person was baptized on %(baptism_date)s%(endnotes)s."),
],
'succinct' : _("Baptized %(baptism_date)s%(endnotes)s.")
}
baptised_partial_date_place = {
Person.MALE: [
_("%(male_name)s was baptized in %(month_year)s in %(baptism_place)s%(endnotes)s."),
_("He was baptized in %(month_year)s in %(baptism_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was baptized in %(month_year)s in %(baptism_place)s%(endnotes)s."),
_("She was baptized in %(month_year)s in %(baptism_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was baptized in %(month_year)s in %(baptism_place)s%(endnotes)s."),
_("This person was baptized in %(month_year)s in %(baptism_place)s%(endnotes)s."),
],
'succinct' : _("Baptized %(month_year)s in %(baptism_place)s%(endnotes)s."),
}
baptised_partial_date_no_place = {
Person.MALE: [
_("%(male_name)s was baptized in %(month_year)s%(endnotes)s."),
_("He was baptized in %(month_year)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was baptized in %(month_year)s%(endnotes)s."),
_("She was baptized in %(month_year)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was baptized in %(month_year)s%(endnotes)s."),
_("This person was baptized in %(month_year)s%(endnotes)s."),
],
'succinct' : _("Baptized %(month_year)s%(endnotes)s."),
}
baptised_modified_date_place = {
Person.MALE: [
_("%(male_name)s was baptized %(modified_date)s in %(baptism_place)s%(endnotes)s."),
_("He was baptized %(modified_date)s in %(baptism_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was baptized %(modified_date)s in %(baptism_place)s%(endnotes)s."),
_("She was baptized %(modified_date)s in %(baptism_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was baptized %(modified_date)s in %(baptism_place)s%(endnotes)s."),
_("This person was baptized %(modified_date)s in %(baptism_place)s%(endnotes)s."),
],
'succinct' : _("Baptized %(modified_date)s in %(baptism_place)s%(endnotes)s."),
}
baptised_modified_date_no_place = {
Person.MALE: [
_("%(male_name)s was baptized %(modified_date)s%(endnotes)s."),
_("He was baptized %(modified_date)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was baptized %(modified_date)s%(endnotes)s."),
_("She was baptized %(modified_date)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was baptized %(modified_date)s%(endnotes)s."),
_("This person was baptized %(modified_date)s%(endnotes)s."),
],
'succinct' : _("Baptized %(modified_date)s%(endnotes)s."),
}
baptised_no_date_place = {
Person.MALE : [
_("%(male_name)s was baptized in %(baptism_place)s%(endnotes)s."),
_("He was baptized in %(baptism_place)s%(endnotes)s."),
],
Person.FEMALE : [
_("%(female_name)s was baptized in %(baptism_place)s%(endnotes)s."),
_("She was baptized in %(baptism_place)s%(endnotes)s."),
],
Person.UNKNOWN : [
_("%(unknown_gender_name)s was baptized in %(baptism_place)s%(endnotes)s."),
_("This person was baptized in %(baptism_place)s%(endnotes)s."),
],
'succinct' : _("Baptized in %(baptism_place)s%(endnotes)s."),
}
baptised_no_date_no_place = {
Person.MALE : [
_("%(male_name)s was baptized%(endnotes)s."),
_("He was baptized%(endnotes)s."),
],
Person.FEMALE : [
_("%(female_name)s was baptized%(endnotes)s."),
_("She was baptized%(endnotes)s."),
],
Person.UNKNOWN : [
_("%(unknown_gender_name)s was baptized%(endnotes)s."),
_("This person was baptized%(endnotes)s."),
],
'succinct' : _("Baptized%(endnotes)s."),
}
#------------------------------------------------------------------------
#
# Christened strings
#
#------------------------------------------------------------------------
christened_full_date_place = {
Person.MALE: [
_("%(male_name)s was christened on %(christening_date)s in %(christening_place)s%(endnotes)s."),
_("He was christened on %(christening_date)s in %(christening_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was christened on %(christening_date)s in %(christening_place)s%(endnotes)s."),
_("She was christened on %(christening_date)s in %(christening_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was christened on %(christening_date)s in %(christening_place)s%(endnotes)s."),
_("This person was christened on %(christening_date)s in %(christening_place)s%(endnotes)s."),
],
'succinct' : _("Christened %(christening_date)s in %(christening_place)s%(endnotes)s."),
}
christened_full_date_no_place = {
Person.MALE: [
_("%(male_name)s was christened on %(christening_date)s%(endnotes)s."),
_("He was christened on %(christening_date)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was christened on %(christening_date)s%(endnotes)s."),
_("She was christened on %(christening_date)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was christened on %(christening_date)s%(endnotes)s."),
_("This person was christened on %(christening_date)s%(endnotes)s."),
],
'succinct' : _("Christened %(christening_date)s%(endnotes)s.")
}
christened_partial_date_place = {
Person.MALE: [
_("%(male_name)s was christened in %(month_year)s in %(christening_place)s%(endnotes)s."),
_("He was christened in %(month_year)s in %(christening_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was christened in %(month_year)s in %(christening_place)s%(endnotes)s."),
_("She was christened in %(month_year)s in %(christening_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was christened in %(month_year)s in %(christening_place)s%(endnotes)s."),
_("This person was christened in %(month_year)s in %(christening_place)s%(endnotes)s."),
],
'succinct' : _("Christened %(month_year)s in %(christening_place)s%(endnotes)s."),
}
christened_partial_date_no_place = {
Person.MALE: [
_("%(male_name)s was christened in %(month_year)s%(endnotes)s."),
_("He was christened in %(month_year)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was christened in %(month_year)s%(endnotes)s."),
_("She was christened in %(month_year)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was christened in %(month_year)s%(endnotes)s."),
_("This person was christened in %(month_year)s%(endnotes)s."),
],
'succinct' : _("Christened %(month_year)s%(endnotes)s."),
}
christened_modified_date_place = {
Person.MALE: [
_("%(male_name)s was christened %(modified_date)s in %(christening_place)s%(endnotes)s."),
_("He was christened %(modified_date)s in %(christening_place)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was christened %(modified_date)s in %(christening_place)s%(endnotes)s."),
_("She was christened %(modified_date)s in %(christening_place)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was christened %(modified_date)s in %(christening_place)s%(endnotes)s."),
_("This person was christened %(modified_date)s in %(christening_place)s%(endnotes)s."),
],
'succinct' : _("Christened %(modified_date)s in %(christening_place)s%(endnotes)s."),
}
christened_modified_date_no_place = {
Person.MALE: [
_("%(male_name)s was christened %(modified_date)s%(endnotes)s."),
_("He was christened %(modified_date)s%(endnotes)s."),
],
Person.FEMALE: [
_("%(female_name)s was christened %(modified_date)s%(endnotes)s."),
_("She was christened %(modified_date)s%(endnotes)s."),
],
Person.UNKNOWN: [
_("%(unknown_gender_name)s was christened %(modified_date)s%(endnotes)s."),
_("This person was christened %(modified_date)s%(endnotes)s."),
],
'succinct' : _("Christened %(modified_date)s%(endnotes)s."),
}
christened_no_date_place = {
Person.MALE : [
_("%(male_name)s was christened in %(christening_place)s%(endnotes)s."),
_("He was christened in %(christening_place)s%(endnotes)s."),
],
Person.FEMALE : [
_("%(female_name)s was christened in %(christening_place)s%(endnotes)s."),
_("She was christened in %(christening_place)s%(endnotes)s."),
],
Person.UNKNOWN : [
_("%(unknown_gender_name)s was christened in %(christening_place)s%(endnotes)s."),
_("This person was christened in %(christening_place)s%(endnotes)s."),
],
'succinct' : _("Christened in %(christening_place)s%(endnotes)s."),
}
christened_no_date_no_place = {
Person.MALE : [
_("%(male_name)s was christened%(endnotes)s."),
_("He was christened%(endnotes)s."),
],
Person.FEMALE : [
_("%(female_name)s was christened%(endnotes)s."),
_("She was christened%(endnotes)s."),
],
Person.UNKNOWN : [
_("%(unknown_gender_name)s was christened%(endnotes)s."),
_("This person was christened%(endnotes)s."),
],
'succinct' : _("Christened%(endnotes)s."),
}
#-------------------------------------------------------------------------
#
# child to parent relationships
#
#-------------------------------------------------------------------------
child_father_mother = {
Person.UNKNOWN: [
[
_("%(male_name)s is the child of %(father)s and %(mother)s."),
_("%(male_name)s was the child of %(father)s and %(mother)s."),
],
[
_("This person is the child of %(father)s and %(mother)s."),
_("This person was the child of %(father)s and %(mother)s."),
],
_("Child of %(father)s and %(mother)s."),
],
Person.MALE : [
[
_("%(male_name)s is the son of %(father)s and %(mother)s."),
_("%(male_name)s was the son of %(father)s and %(mother)s."),
],
[
_("He is the son of %(father)s and %(mother)s."),
_("He was the son of %(father)s and %(mother)s."),
],
_("Son of %(father)s and %(mother)s."),
],
Person.FEMALE : [
[
_("%(female_name)s is the daughter of %(father)s and %(mother)s."),
_("%(female_name)s was the daughter of %(father)s and %(mother)s."),
],
[
_("She is the daughter of %(father)s and %(mother)s."),
_("She was the daughter of %(father)s and %(mother)s."),
],
_("Daughter of %(father)s and %(mother)s."),
]
}
child_father = {
Person.UNKNOWN : [
[
_("%(male_name)s is the child of %(father)s."),
_("%(male_name)s was the child of %(father)s."),
],
[
_("This person is the child of %(father)s."),
_("This person was the child of %(father)s."),
],
_("Child of %(father)s."),
],
Person.MALE : [
[
_("%(male_name)s is the son of %(father)s."),
_("%(male_name)s was the son of %(father)s."),
],
[
_("He is the son of %(father)s."),
_("He was the son of %(father)s."),
],
_("Son of %(father)s."),
],
Person.FEMALE : [
[
_("%(female_name)s is the daughter of %(father)s."),
_("%(female_name)s was the daughter of %(father)s."),
],
[
_("She is the daughter of %(father)s."),
_("She was the daughter of %(father)s."),
],
_("Daughter of %(father)s."),
],
}
child_mother = {
Person.UNKNOWN : [
[
_("%(male_name)s is the child of %(mother)s."),
_("%(male_name)s was the child of %(mother)s."),
],
[
_("This person is the child of %(mother)s."),
_("This person was the child of %(mother)s."),
],
_("Child of %(mother)s."),
],
Person.MALE : [
[
_("%(male_name)s is the son of %(mother)s."),
_("%(male_name)s was the son of %(mother)s."),
],
[
_("He is the son of %(mother)s."),
_("He was the son of %(mother)s."),
],
_("Son of %(mother)s."),
],
Person.FEMALE : [
[
_("%(female_name)s is the daughter of %(mother)s."),
_("%(female_name)s was the daughter of %(mother)s."),
],
[
_("She is the daughter of %(mother)s."),
_("She was the daughter of %(mother)s."),
],
_("Daughter of %(mother)s."),
],
}
#------------------------------------------------------------------------
#
# Marriage strings - Relationship type MARRIED
#
#------------------------------------------------------------------------
marriage_first_date_place = {
Person.UNKNOWN : [
_('This person married %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('This person married %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('This person married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.MALE : [
_('He married %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('He married %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('He married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She married %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('She married %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('She married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
'succinct' : [
_('Married %(spouse)s %(partial_date)s in %(place)s%(endnotes)s.'),
_('Married %(spouse)s %(full_date)s in %(place)s%(endnotes)s.'),
_('Married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
}
marriage_also_date_place = {
Person.UNKNOWN : [
_('This person also married %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('This person also married %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('This person also married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.MALE : [
_('He also married %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('He also married %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('He also married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She also married %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('She also married %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('She also married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
'succinct' : [
_('Also married %(spouse)s %(partial_date)s in %(place)s%(endnotes)s.'),
_('Also married %(spouse)s %(full_date)s in %(place)s%(endnotes)s.'),
_('Also married %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
}
marriage_first_date = {
Person.UNKNOWN : [
_('This person married %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('This person married %(spouse)s on %(full_date)s%(endnotes)s.'),
_('This person married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.MALE : [
_('He married %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('He married %(spouse)s on %(full_date)s%(endnotes)s.'),
_('He married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She married %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('She married %(spouse)s on %(full_date)s%(endnotes)s.'),
_('She married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
'succinct' : [
_('Married %(spouse)s %(partial_date)s%(endnotes)s.'),
_('Married %(spouse)s %(full_date)s%(endnotes)s.'),
_('Married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
}
marriage_also_date = {
Person.UNKNOWN : [
_('This person also married %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('This person also married %(spouse)s on %(full_date)s%(endnotes)s.'),
_('This person also married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.MALE : [
_('He also married %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('He also married %(spouse)s on %(full_date)s%(endnotes)s.'),
_('He also married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She also married %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('She also married %(spouse)s on %(full_date)s%(endnotes)s.'),
_('She also married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
'succinct' : [
_('Also married %(spouse)s %(partial_date)s%(endnotes)s.'),
_('Also married %(spouse)s %(full_date)s%(endnotes)s.'),
_('Also married %(spouse)s %(modified_date)s%(endnotes)s.'),
],
}
marriage_first_place = {
Person.UNKNOWN : _('This person married %(spouse)s in %(place)s%(endnotes)s.'),
Person.MALE : _('He married %(spouse)s in %(place)s%(endnotes)s.'),
Person.FEMALE : _('She married %(spouse)s in %(place)s%(endnotes)s.'),
'succinct' : _('Married %(spouse)s in %(place)s%(endnotes)s.'),
}
marriage_also_place = {
Person.UNKNOWN : _('This person also married %(spouse)s in %(place)s%(endnotes)s.'),
Person.MALE : _('He also married %(spouse)s in %(place)s%(endnotes)s.'),
Person.FEMALE : _('She also married %(spouse)s in %(place)s%(endnotes)s.'),
'succinct' : _('Also married %(spouse)s in %(place)s%(endnotes)s.'),
}
marriage_first_only = {
Person.UNKNOWN : _('This person married %(spouse)s%(endnotes)s.'),
Person.MALE : _('He married %(spouse)s%(endnotes)s.'),
Person.FEMALE : _('She married %(spouse)s%(endnotes)s.'),
'succinct' : _('Married %(spouse)s%(endnotes)s.'),
}
marriage_also_only = {
Person.UNKNOWN : _('This person also married %(spouse)s%(endnotes)s.'),
Person.MALE : _('He also married %(spouse)s%(endnotes)s.'),
Person.FEMALE : _('She also married %(spouse)s%(endnotes)s.'),
'succinct' : _('Also married %(spouse)s%(endnotes)s.'),
}
#------------------------------------------------------------------------
#
# Marriage strings - Relationship type UNMARRIED
#
#------------------------------------------------------------------------
unmarried_first_date_place = {
Person.UNKNOWN : [
_('This person had an unmarried relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('This person had an unmarried relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('This person had an unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.MALE : [
_('He had an unmarried relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('He had an unmarried relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('He had an unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She had an unmarried relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('She had an unmarried relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('She had an unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
'succinct' : [
_('Unmarried relationship with %(spouse)s %(partial_date)s in %(place)s%(endnotes)s.'),
_('Unmarried relationship with %(spouse)s %(full_date)s in %(place)s%(endnotes)s.'),
_('Unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
}
unmarried_also_date_place = {
Person.UNKNOWN : [
_('This person also had an unmarried relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('This person also had an unmarried relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('This person also had an unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.MALE : [
_('He also had an unmarried relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('He also had an unmarried relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('He also had an unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She also had an unmarried relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('She also had an unmarried relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('She also had an unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
'succinct' : [
_('Unmarried relationship with %(spouse)s %(partial_date)s in %(place)s%(endnotes)s.'),
_('Unmarried relationship with %(spouse)s %(full_date)s in %(place)s%(endnotes)s.'),
_('Unmarried relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
}
unmarried_first_date = {
Person.UNKNOWN : [
_('This person had an unmarried relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('This person had an unmarried relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('This person had an unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.MALE : [
_('He had an unmarried relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('He had an unmarried relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('He had an unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She had an unmarried relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('She had an unmarried relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('She had an unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
'succinct' : [
_('Unmarried relationship with %(spouse)s %(partial_date)s%(endnotes)s.'),
_('Unmarried relationship with %(spouse)s %(full_date)s%(endnotes)s.'),
_('Unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
}
unmarried_also_date = {
Person.UNKNOWN : [
_('This person also had an unmarried relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('This person also had an unmarried relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('This person also had an unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.MALE : [
_('He also had an unmarried relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('He also had an unmarried relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('He also had an unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She also had an unmarried relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('She also had an unmarried relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('She also had an unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
'succinct' : [
_('Also unmarried relationship with %(spouse)s %(partial_date)s%(endnotes)s.'),
_('Also unmarried relationship with %(spouse)s %(full_date)s%(endnotes)s.'),
_('Also unmarried relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
}
unmarried_first_place = {
Person.UNKNOWN : _('This person had an unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.MALE : _('He had an unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.FEMALE : _('She had an unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
'succinct' : _('Unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
}
unmarried_also_place = {
Person.UNKNOWN : _('This person also had an unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.MALE : _('He also had an unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.FEMALE : _('She also had an unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
'succinct' : _('Unmarried relationship with %(spouse)s in %(place)s%(endnotes)s.'),
}
unmarried_first_only = {
Person.UNKNOWN : _('This person had an unmarried relationship with %(spouse)s%(endnotes)s.'),
Person.MALE : _('He had an unmarried relationship with %(spouse)s%(endnotes)s.'),
Person.FEMALE : _('She had an unmarried relationship with %(spouse)s%(endnotes)s.'),
'succinct' : _('Unmarried relationship with %(spouse)s%(endnotes)s.'),
}
unmarried_also_only = {
Person.UNKNOWN : _('This person also had an unmarried relationship with %(spouse)s%(endnotes)s.'),
Person.MALE : _('He also had an unmarried relationship with %(spouse)s%(endnotes)s.'),
Person.FEMALE : _('She also had an unmarried relationship with %(spouse)s%(endnotes)s.'),
'succinct' : _('Unmarried relationship with %(spouse)s%(endnotes)s.'),
}
#------------------------------------------------------------------------
#
# Marriage strings - Relationship type other than MARRIED or UNMARRIED
# i.e. CIVIL UNION or CUSTOM
#
#------------------------------------------------------------------------
relationship_first_date_place = {
Person.UNKNOWN : [
_('This person had a relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('This person had a relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('This person had a relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.MALE : [
_('He had a relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('He had a relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('He had a relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She had a relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('She had a relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('She had a relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
'succinct' : [
_('Relationship with %(spouse)s %(partial_date)s in %(place)s%(endnotes)s.'),
_('Relationship with %(spouse)s %(full_date)s in %(place)s%(endnotes)s.'),
_('Relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
}
relationship_also_date_place = {
Person.UNKNOWN : [
_('This person also had a relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('This person also had a relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('This person also had a relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.MALE : [
_('He also had a relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('He also had a relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('He also had a relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She also had a relationship with %(spouse)s in %(partial_date)s in %(place)s%(endnotes)s.'),
_('She also had a relationship with %(spouse)s on %(full_date)s in %(place)s%(endnotes)s.'),
_('She also had a relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
'succinct' : [
_('Also relationship with %(spouse)s %(partial_date)s in %(place)s%(endnotes)s.'),
_('Also relationship with %(spouse)s %(full_date)s in %(place)s%(endnotes)s.'),
_('Also relationship with %(spouse)s %(modified_date)s in %(place)s%(endnotes)s.'),
],
}
relationship_first_date = {
Person.UNKNOWN : [
_('This person had a relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('This person had a relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('This person had a relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.MALE : [
_('He had a relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('He had a relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('He had a relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She had a relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('She had a relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('She had a relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
'succinct' : [
_('Relationship with %(spouse)s %(partial_date)s%(endnotes)s.'),
_('Relationship with %(spouse)s %(full_date)s%(endnotes)s.'),
_('Relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
}
relationship_also_date = {
Person.UNKNOWN : [
_('This person also had a relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('This person also had a relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('This person also had a relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.MALE : [
_('He also had a relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('He also had a relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('He also had a relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
Person.FEMALE : [
_('She also had a relationship with %(spouse)s in %(partial_date)s%(endnotes)s.'),
_('She also had a relationship with %(spouse)s on %(full_date)s%(endnotes)s.'),
_('She also had a relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
'succinct' : [
_('Also relationship with %(spouse)s %(partial_date)s%(endnotes)s.'),
_('Also relationship with %(spouse)s %(full_date)s%(endnotes)s.'),
_('Also relationship with %(spouse)s %(modified_date)s%(endnotes)s.'),
],
}
relationship_first_place = {
Person.UNKNOWN : _('This person had a relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.MALE : _('He had a relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.FEMALE : _('She had a relationship with %(spouse)s in %(place)s%(endnotes)s.'),
'succinct' : _('Relationship with %(spouse)s in %(place)s%(endnotes)s.'),
}
relationship_also_place = {
Person.UNKNOWN : _('This person also had a relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.MALE : _('He also had a relationship with %(spouse)s in %(place)s%(endnotes)s.'),
Person.FEMALE : _('She also had a relationship with %(spouse)s in %(place)s%(endnotes)s.'),
'succinct' : _('Also relationship with %(spouse)s in %(place)s%(endnotes)s.'),
}
relationship_first_only = {
Person.UNKNOWN : _('This person had a relationship with %(spouse)s%(endnotes)s.'),
Person.MALE : _('He had a relationship with %(spouse)s%(endnotes)s.'),
Person.FEMALE : _('She had a relationship with %(spouse)s%(endnotes)s.'),
'succinct' : _('Relationship with %(spouse)s%(endnotes)s.'),
}
relationship_also_only = {
Person.UNKNOWN : _('This person also had a relationship with %(spouse)s%(endnotes)s.'),
Person.MALE : _('He also had a relationship with %(spouse)s%(endnotes)s.'),
Person.FEMALE : _('She also had a relationship with %(spouse)s%(endnotes)s.'),
'succinct' : _('Also relationship with %(spouse)s%(endnotes)s.'),
}
#------------------------------------------------------------------------
#
# Narrator
#
#------------------------------------------------------------------------
class Narrator(object):
"""
Narrator is a class which provides narration text.
"""
def __init__(self, dbase, verbose=True, use_call_name=False,use_fulldate=False,
empty_date="", empty_place="",
translator=None,
get_endnote_numbers=_get_empty_endnote_numbers):
"""
Initialize the narrator class.
:param dbase: The database that contains the data to be narrated.
:type dbase: :class:`~gen.db.base,DbBase`
:param verbose: Specifies whether complete sentences should be used.
:type verbose: bool
:param use_call_name: Specifies whether a person's call name should be
used for the first name.
:type use_call_name: bool
:param empty_date: String to use when a date is not known.
:type empty_date: str
:param empty_place: String to use when a place is not known.
:type empty_place: str
:param translate_text: A function that returns a translated message
string given a message id (similar to gettext).
:type translate_text: callable(str)
:param get_endnote_numbers: A callable to use for getting a string
representing endnote numbers.
The function takes a :class:`~gen.lib.CitationBase` instance.
A typical return value from get_endnote_numbers() would be "2a" and
would represent a reference to an endnote in a document.
:type get_endnote_numbers:
callable( :class:`~gen.lib.CitationBase` )
"""
self.__db = dbase
self.__verbose = verbose
self.__use_call = use_call_name
self.__use_fulldate = use_fulldate
self.__empty_date = empty_date
self.__empty_place = empty_place
self.__get_endnote_numbers = get_endnote_numbers
self.__person = None
self.__first_name = ""
self.__first_name_used = False
if translator is None:
translator = Translator(Translator.DEFAULT_TRANSLATION_STR)
self.__translate_text = translator.gettext
self.__get_date = translator.get_date
def set_subject(self, person):
"""
Start a new story about this person. The person's first name will be
used in the first sentence. A pronoun will be used as the subject for
each subsequent sentence.
:param person: The person to be the subject of the story.
:type dbase: :class:`~gen.lib.person,Person`
"""
self.__person = person
if self.__use_call and person.get_primary_name().get_call_name():
self.__first_name = person.get_primary_name().get_call_name()
else:
self.__first_name = person.get_primary_name().get_first_name()
self.__first_name_used = False
def get_born_string(self):
"""
Get a string narrating the birth of the subject.
Example sentences:
Person was born on Date.
Person was born on Date in Place.
Person was born in Place.
''
:returns: A sentence about the subject's birth.
:rtype: unicode
"""
if not self.__first_name_used:
name_index = _NAME_INDEX_INCLUDE_NAME
self.__first_name_used = True
else:
name_index = _NAME_INDEX_EXCLUDE_NAME
text = ""
bplace = self.__empty_place
bdate = self.__empty_date
birth_event = None
bdate_full = False
bdate_mod = False
birth_ref = self.__person.get_birth_ref()
if birth_ref and birth_ref.ref:
birth_event = self.__db.get_event_from_handle(birth_ref.ref)
if birth_event:
if self.__use_fulldate :
bdate = self.__get_date(birth_event.get_date_object())
else:
bdate = birth_event.get_date_object().get_year()
bplace_handle = birth_event.get_place_handle()
if bplace_handle:
place = self.__db.get_place_from_handle(bplace_handle)
bplace = place.get_title()
bdate_obj = birth_event.get_date_object()
bdate_full = bdate_obj and bdate_obj.get_day_valid()
bdate_mod = bdate_obj and \
bdate_obj.get_modifier() != Date.MOD_NONE
value_map = {
'name' : self.__first_name,
'male_name' : self.__first_name,
'unknown_gender_name' : self.__first_name,
'female_name' : self.__first_name,
'birth_date' : bdate,
'birth_place' : bplace,
'month_year' : bdate,
'modified_date' : bdate,
}
gender = self.__person.get_gender()
if bdate:
if bdate_mod:
if bplace and self.__verbose:
text = born_modified_date_with_place[name_index][gender]
elif bplace:
text = born_modified_date_with_place[2]
elif self.__verbose:
text = born_modified_date_no_place[name_index][gender]
else:
text = born_modified_date_no_place[2]
elif bdate_full:
if bplace and self.__verbose:
text = born_full_date_with_place[name_index][gender]
elif bplace:
text = born_full_date_with_place[2]
elif self.__verbose:
text = born_full_date_no_place[name_index][gender]
else:
text = born_full_date_no_place[2]
else:
if bplace and self.__verbose:
text = born_partial_date_with_place[name_index][gender]
elif bplace:
text = born_partial_date_with_place[2]
elif self.__verbose:
text = born_partial_date_no_place[name_index][gender]
else:
text = born_partial_date_no_place[2]
else:
if bplace and self.__verbose:
text = born_no_date_with_place[name_index][gender]
elif bplace:
text = born_no_date_with_place[2]
else:
text = ""
if text:
text = self.__translate_text(text) % value_map
if birth_event:
text = text.rstrip(". ")
text = text + self.__get_endnote_numbers(birth_event) + ". "
text = text + " "
return text
def get_died_string(self, include_age=False):
"""
Get a string narrating the death of the subject.
Example sentences:
Person died on Date
Person died on Date at the age of 'age'
Person died on Date in Place
Person died on Date in Place at the age of 'age'
Person died in Place
Person died in Place at the age of 'age'
Person died
''
where 'age' string is an advanced age calculation.
:returns: A sentence about the subject's death.
:rtype: unicode
"""
if not self.__first_name_used:
name_index = _NAME_INDEX_INCLUDE_NAME
self.__first_name_used = True
else:
name_index = _NAME_INDEX_EXCLUDE_NAME
text = ""
dplace = self.__empty_place
ddate = self.__empty_date
death_event = None
ddate_full = False
ddate_mod = False
death_ref = self.__person.get_death_ref()
if death_ref and death_ref.ref:
death_event = self.__db.get_event_from_handle(death_ref.ref)
if death_event:
if self.__use_fulldate :
ddate = self.__get_date(death_event.get_date_object())
else:
ddate = death_event.get_date_object().get_year()
dplace_handle = death_event.get_place_handle()
if dplace_handle:
place = self.__db.get_place_from_handle(dplace_handle)
dplace = place.get_title()
ddate_obj = death_event.get_date_object()
ddate_full = ddate_obj and ddate_obj.get_day_valid()
ddate_mod = ddate_obj and \
ddate_obj.get_modifier() != Date.MOD_NONE
if include_age:
age, age_index = self.__get_age_at_death()
else:
age = 0
age_index = _AGE_INDEX_NO_AGE
value_map = {
'name' : self.__first_name,
'unknown_gender_name' : self.__first_name,
'male_name' : self.__first_name,
'female_name' : self.__first_name,
'death_date' : ddate,
'modified_date' : ddate,
'death_place' : dplace,
'age' : age,
'month_year' : ddate,
}
gender = self.__person.get_gender()
if ddate and ddate_mod:
if dplace and self.__verbose:
text = died_modified_date_with_place[name_index][gender][age_index]
elif dplace:
text = died_modified_date_with_place[2][age_index]
elif self.__verbose:
text = died_modified_date_no_place[name_index][gender][age_index]
else:
text = died_modified_date_no_place[2][age_index]
elif ddate and ddate_full:
if dplace and self.__verbose:
text = died_full_date_with_place[name_index][gender][age_index]
elif dplace:
text = died_full_date_with_place[2][age_index]
elif self.__verbose:
text = died_full_date_no_place[name_index][gender][age_index]
else:
text = died_full_date_no_place[2][age_index]
elif ddate:
if dplace and self.__verbose:
text = died_partial_date_with_place[name_index][gender][age_index]
elif dplace:
text = died_partial_date_with_place[2][age_index]
elif self.__verbose:
text = died_partial_date_no_place[name_index][gender][age_index]
else:
text = died_partial_date_no_place[2][age_index]
elif dplace and self.__verbose:
text = died_no_date_with_place[name_index][gender][age_index]
elif dplace:
text = died_no_date_with_place[2][age_index]
elif self.__verbose:
text = died_no_date_no_place[name_index][gender][age_index]
else:
text = died_no_date_no_place[2][age_index]
if text:
text = self.__translate_text(text) % value_map
if death_event:
text = text.rstrip(". ")
text = text + self.__get_endnote_numbers(death_event) + ". "
text = text + " "
return text
def get_buried_string(self):
"""
Get a string narrating the burial of the subject.
Example sentences:
Person was buried on Date.
Person was buried on Date in Place.
Person was buried in Month_Year.
Person was buried in Month_Year in Place.
Person was buried in Place.
''
:returns: A sentence about the subject's burial.
:rtype: unicode
"""
if not self.__first_name_used:
name_index = _NAME_INDEX_INCLUDE_NAME
self.__first_name_used = True
else:
name_index = _NAME_INDEX_EXCLUDE_NAME
gender = self.__person.get_gender()
text = ""
bplace = self.__empty_place
bdate = self.__empty_date
bdate_full = False
bdate_mod = False
burial = None
for event_ref in self.__person.get_event_ref_list():
event = self.__db.get_event_from_handle(event_ref.ref)
if event and event.type.value == EventType.BURIAL \
and event_ref.role.value == EventRoleType.PRIMARY:
burial = event
break
if burial:
if self.__use_fulldate :
bdate = self.__get_date(burial.get_date_object())
else:
bdate = burial.get_date_object().get_year()
bplace_handle = burial.get_place_handle()
if bplace_handle:
place = self.__db.get_place_from_handle(bplace_handle)
bplace = place.get_title()
bdate_obj = burial.get_date_object()
bdate_full = bdate_obj and bdate_obj.get_day_valid()
bdate_mod = bdate_obj and bdate_obj.get_modifier() != Date.MOD_NONE
else:
return text
value_map = {
'unknown_gender_name' : self.__first_name,
'male_name' : self.__first_name,
'name' : self.__first_name,
'female_name' : self.__first_name,
'burial_date' : bdate,
'burial_place' : bplace,
'month_year' : bdate,
'modified_date' : bdate,
'endnotes' : self.__get_endnote_numbers(event),
}
if bdate and bdate_mod and self.__verbose:
if bplace: #male, date, place
text = buried_modified_date_place[gender][name_index]
else: #male, date, no place
text = buried_modified_date_no_place[gender][name_index]
elif bdate and bdate_mod:
if bplace: #male, date, place
text = buried_modified_date_place['succinct']
else: #male, date, no place
text = buried_modified_date_no_place['succinct']
elif bdate and bdate_full and self.__verbose:
if bplace: #male, date, place
text = buried_full_date_place[gender][name_index]
else: #male, date, no place
text = buried_full_date_no_place[gender][name_index]
elif bdate and bdate_full:
if bplace: #male, date, place
text = buried_full_date_place['succinct']
else: #male, date, no place
text = buried_full_date_no_place['succinct']
elif bdate and self.__verbose:
if bplace: #male, month_year, place
text = buried_partial_date_place[gender][name_index]
else: #male, month_year, no place
text = buried_partial_date_no_place[gender][name_index]
elif bdate:
if bplace: #male, month_year, place
text = buried_partial_date_place['succinct']
else: #male, month_year, no place
text = buried_partial_date_no_place['succinct']
elif bplace and self.__verbose: #male, no date, place
text = buried_no_date_place[gender][name_index]
elif bplace: #male, no date, place
text = buried_no_date_place['succinct']
elif self.__verbose:
text = buried_no_date_no_place[gender][name_index]
else: #male, no date, no place
text = buried_no_date_no_place['succinct']
if text:
text = self.__translate_text(text) % value_map
text = text + " "
return text
def get_baptised_string(self):
"""
Get a string narrating the baptism of the subject.
Example sentences:
Person was baptized on Date.
Person was baptized on Date in Place.
Person was baptized in Month_Year.
Person was baptized in Month_Year in Place.
Person was baptized in Place.
''
:returns: A sentence about the subject's baptism.
:rtype: unicode
"""
if not self.__first_name_used:
name_index = _NAME_INDEX_INCLUDE_NAME
self.__first_name_used = True
else:
name_index = _NAME_INDEX_EXCLUDE_NAME
gender = self.__person.get_gender()
text = ""
bplace = self.__empty_place
bdate = self.__empty_date
bdate_full = False
bdate_mod = False
baptism = None
for event_ref in self.__person.get_event_ref_list():
event = self.__db.get_event_from_handle(event_ref.ref)
if event and event.type.value == EventType.BAPTISM \
and event_ref.role.value == EventRoleType.PRIMARY:
baptism = event
break
if baptism:
if self.__use_fulldate :
bdate = self.__get_date(baptism.get_date_object())
else:
bdate = baptism.get_date_object().get_year()
bplace_handle = baptism.get_place_handle()
if bplace_handle:
place = self.__db.get_place_from_handle(bplace_handle)
bplace = place.get_title()
bdate_obj = baptism.get_date_object()
bdate_full = bdate_obj and bdate_obj.get_day_valid()
bdate_mod = bdate_obj and bdate_obj.get_modifier() != Date.MOD_NONE
else:
return text
value_map = {
'unknown_gender_name' : self.__first_name,
'male_name' : self.__first_name,
'name' : self.__first_name,
'female_name' : self.__first_name,
'baptism_date' : bdate,
'baptism_place' : bplace,
'month_year' : bdate,
'modified_date' : bdate,
'endnotes' : self.__get_endnote_numbers(event),
}
if bdate and bdate_mod and self.__verbose:
if bplace: #male, date, place
text = baptised_modified_date_place[gender][name_index]
else: #male, date, no place
text = baptised_modified_date_no_place[gender][name_index]
elif bdate and bdate_mod:
if bplace: #male, date, place
text = baptised_modified_date_place['succinct']
else: #male, date, no place
text = baptised_modified_date_no_place['succinct']
elif bdate and bdate_full and self.__verbose:
if bplace: #male, date, place
text = baptised_full_date_place[gender][name_index]
else: #male, date, no place
text = baptised_full_date_no_place[gender][name_index]
elif bdate and bdate_full:
if bplace: #male, date, place
text = baptised_full_date_place['succinct']
else: #male, date, no place
text = baptised_full_date_no_place['succinct']
elif bdate and self.__verbose:
if bplace: #male, month_year, place
text = baptised_partial_date_place[gender][name_index]
else: #male, month_year, no place
text = baptised_partial_date_no_place[gender][name_index]
elif bdate:
if bplace: #male, month_year, place
text = baptised_partial_date_place['succinct']
else: #male, month_year, no place
text = baptised_partial_date_no_place['succinct']
elif bplace and self.__verbose: #male, no date, place
text = baptised_no_date_place[gender][name_index]
elif bplace: #male, no date, place
text = baptised_no_date_place['succinct']
elif self.__verbose:
text = baptised_no_date_no_place[gender][name_index]
else: #male, no date, no place
text = baptised_no_date_no_place['succinct']
if text:
text = self.__translate_text(text) % value_map
text = text + " "
return text
def get_christened_string(self):
"""
Get a string narrating the christening of the subject.
Example sentences:
Person was christened on Date.
Person was christened on Date in Place.
Person was christened in Month_Year.
Person was christened in Month_Year in Place.
Person was christened in Place.
''
:returns: A sentence about the subject's christening.
:rtype: unicode
"""
if not self.__first_name_used:
name_index = _NAME_INDEX_INCLUDE_NAME
self.__first_name_used = True
else:
name_index = _NAME_INDEX_EXCLUDE_NAME
gender = self.__person.get_gender()
text = ""
cplace = self.__empty_place
cdate = self.__empty_date
cdate_full = False
cdate_mod = False
christening = None
for event_ref in self.__person.get_event_ref_list():
event = self.__db.get_event_from_handle(event_ref.ref)
if event and event.type.value == EventType.CHRISTEN \
and event_ref.role.value == EventRoleType.PRIMARY:
christening = event
break
if christening:
if self.__use_fulldate :
cdate = self.__get_date(christening.get_date_object())
else:
cdate = christening.get_date_object().get_year()
cplace_handle = christening.get_place_handle()
if cplace_handle:
place = self.__db.get_place_from_handle(cplace_handle)
cplace = place.get_title()
cdate_obj = christening.get_date_object()
cdate_full = cdate_obj and cdate_obj.get_day_valid()
cdate_mod = cdate_obj and cdate_obj.get_modifier() != Date.MOD_NONE
else:
return text
value_map = {
'unknown_gender_name' : self.__first_name,
'male_name' : self.__first_name,
'name' : self.__first_name,
'female_name' : self.__first_name,
'christening_date' : cdate,
'christening_place' : cplace,
'month_year' : cdate,
'modified_date' : cdate,
'endnotes' : self.__get_endnote_numbers(event),
}
if cdate and cdate_mod and self.__verbose:
if cplace: #male, date, place
text = christened_modified_date_place[gender][name_index]
else: #male, date, no place
text = christened_modified_date_no_place[gender][name_index]
elif cdate and cdate_mod:
if cplace: #male, date, place
text = christened_modified_date_place['succinct']
else: #male, date, no place
text = christened_modified_date_no_place['succinct']
elif cdate and cdate_full and self.__verbose:
if cplace: #male, date, place
text = christened_full_date_place[gender][name_index]
else: #male, date, no place
text = christened_full_date_no_place[gender][name_index]
elif cdate and cdate_full:
if cplace: #male, date, place
text = christened_full_date_place['succinct']
else: #male, date, no place
text = christened_full_date_no_place['succinct']
elif cdate and self.__verbose:
if cplace: #male, month_year, place
text = christened_partial_date_place[gender][name_index]
else: #male, month_year, no place
text = christened_partial_date_no_place[gender][name_index]
elif cdate:
if cplace: #male, month_year, place
text = christened_partial_date_place['succinct']
else: #male, month_year, no place
text = christened_partial_date_no_place['succinct']
elif cplace and self.__verbose: #male, no date, place
text = christened_no_date_place[gender][name_index]
elif cplace: #male, no date, place
text = christened_no_date_place['succinct']
elif self.__verbose:
text = christened_no_date_no_place[gender][name_index]
else: #male, no date, no place
text = christened_no_date_no_place['succinct']
if text:
text = self.__translate_text(text) % value_map
text = text + " "
return text
def get_married_string(self, family, is_first=True, name_display=None):
"""
Get a string narrating the marriage of the subject.
Example sentences:
Person was married to Spouse on Date.
Person was married to Spouse.
Person was also married to Spouse on Date.
Person was also married to Spouse.
""
:param family: The family that contains the Spouse for this marriage.
:type family: :class:`~gen.lib.family,Family`
:param is_first: Indicates whether this sentence represents the first
marriage. If it is not the first marriage, the sentence will
include "also".
:type is_first: bool
:param name_display: An object to be used for displaying names
:type is_first: :class:`~gen.display.name,NameDisplay`
:returns: A sentence about the subject's marriage.
:rtype: unicode
"""
spouse_handle = ReportUtils.find_spouse(self.__person, family)
spouse = self.__db.get_person_from_handle(spouse_handle)
event = ReportUtils.find_marriage(self.__db, family)
date = self.__empty_date
place = self.__empty_place
if spouse:
if not name_display:
spouse_name = _nd.display(spouse)
else:
spouse_name = name_display.display(spouse)
else:
# not all families have a spouse.
spouse_name = _("Unknown")
if event:
if self.__use_fulldate :
mdate = self.__get_date(event.get_date_object())
else:
mdate = event.get_date_object().get_year()
if mdate:
date = mdate
place_handle = event.get_place_handle()
if place_handle:
place_obj = self.__db.get_place_from_handle(place_handle)
place = place_obj.get_title()
relationship = family.get_relationship()
value_map = {
'spouse' : spouse_name,
'endnotes' : self.__get_endnote_numbers(event),
'full_date' : date,
'modified_date' : date,
'partial_date' : date,
'place' : place,
}
date_full = 0
if event:
dobj = event.get_date_object()
if dobj.get_modifier() != Date.MOD_NONE:
date_full = 2
elif dobj and dobj.get_day_valid():
date_full = 1
gender = self.__person.get_gender()
# This would be much simpler, excepting for translation considerations
# Currently support FamilyRelType's:
# MARRIED : civil and/or religious
# UNMARRIED
# CIVIL UNION : described as a relationship
# UNKNOWN : also described as a relationship
# CUSTOM : also described as a relationship
#
# In the future, there may be a need to distinguish between
# CIVIL UNION, UNKNOWN and CUSTOM relationship types
# CUSTOM will be difficult as user can supply any arbitrary string to
# describe type
if is_first:
if date and place and self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_date_place[gender][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_date_place[gender][date_full]
else:
text = relationship_first_date_place[gender][date_full]
elif date and place:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_date_place['succinct'][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_date_place['succinct'][date_full]
else:
text = relationship_first_date_place['succinct'][date_full]
elif date and self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_date[gender][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_date[gender][date_full]
else:
text = relationship_first_date[gender][date_full]
elif date:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_date['succinct'][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_date['succinct'][date_full]
else:
text = relationship_first_date['succinct'][date_full]
elif place and self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_place[gender]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_place[gender]
else:
text = relationship_first_place[gender]
elif place:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_place['succinct']
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_place['succinct']
else:
text = relationship_first_place['succinct']
elif self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_only[gender]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_only[gender]
else:
text = relationship_first_only[gender]
else:
if relationship == FamilyRelType.MARRIED:
text = marriage_first_only['succinct']
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_first_only['succinct']
else:
text = relationship_first_only['succinct']
else:
if date and place and self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_date_place[gender][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_date_place[gender][date_full]
else:
text = relationship_also_date_place[gender][date_full]
elif date and place:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_date_place['succinct'][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_date_place['succinct'][date_full]
else:
text = relationship_also_date_place['succinct'][date_full]
elif date and self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_date[gender][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_date[gender][date_full]
else:
text = relationship_also_date[gender][date_full]
elif date:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_date['succinct'][date_full]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_date['succinct'][date_full]
else:
text = relationship_also_date['succinct'][date_full]
elif place and self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_place[gender]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_place[gender]
else:
text = relationship_also_place[gender]
elif place:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_place['succinct']
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_place['succinct']
else:
text = relationship_also_place['succinct']
elif self.__verbose:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_only[gender]
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_only[gender]
else:
text = relationship_also_only[gender]
else:
if relationship == FamilyRelType.MARRIED:
text = marriage_also_only['succinct']
elif relationship == FamilyRelType.UNMARRIED:
text = unmarried_also_only['succinct']
else:
text = relationship_also_only['succinct']
if text:
text = self.__translate_text(text) % value_map
text = text + " "
return text
def get_child_string(self, father_name="", mother_name=""):
"""
Get a string narrating the relationship to the parents of the subject.
Missing information will be omitted without loss of readability.
Example sentences:
Person was the son of father_name and mother_name.
Person was the daughter of father_name and mother_name.
""
:param father_name: The name of the Subjects' father.
:type father_name: unicode
:param mother_name: The name of the Subjects' mother.
:type mother_name: unicode
:returns: A sentence about the subject's parents.
:rtype: unicode
"""
value_map = {
'father' : father_name,
'mother' : mother_name,
'male_name' : self.__first_name,
'name' : self.__first_name,
'female_name' : self.__first_name,
'unknown_gender_name' : self.__first_name,
}
dead = not probably_alive(self.__person, self.__db)
if not self.__first_name_used:
index = _NAME_INDEX_INCLUDE_NAME
self.__first_name_used = True
else:
index = _NAME_INDEX_EXCLUDE_NAME
gender = self.__person.get_gender()
text = ""
if mother_name and father_name and self.__verbose:
text = child_father_mother[gender][index][dead]
elif mother_name and father_name:
text = child_father_mother[gender][2]
elif mother_name and self.__verbose:
text = child_mother[gender][index][dead]
elif mother_name:
text = child_mother[gender][2]
elif father_name and self.__verbose:
text = child_father[gender][index][dead]
elif father_name:
text = child_father[gender][2]
if text:
text = self.__translate_text(text) % value_map
text = text + " "
return text
def __get_age_at_death(self):
"""
Calculate the age the person died.
Returns a tuple representing (age, age_index).
"""
birth_ref = self.__person.get_birth_ref()
if birth_ref:
birth_event = self.__db.get_event_from_handle(birth_ref.ref)
birth = birth_event.get_date_object()
birth_year_valid = birth.get_year_valid()
else:
birth_year_valid = False
death_ref = self.__person.get_death_ref()
if death_ref:
death_event = self.__db.get_event_from_handle(death_ref.ref)
death = death_event.get_date_object()
death_year_valid = death.get_year_valid()
else:
death_year_valid = False
# wihtout at least a year for each event no age can be calculated
if birth_year_valid and death_year_valid:
span = death - birth
if span and span.is_valid():
if span:
age = span
age_index = _AGE_INDEX
else:
age = 0
age_index = _AGE_INDEX_NO_AGE
else:
age = 0
age_index = _AGE_INDEX_NO_AGE
else:
age = 0
age_index = _AGE_INDEX_NO_AGE
return age, age_index
|
Forage/Gramps
|
gramps/plugins/lib/libnarrate.py
|
Python
|
gpl-2.0
| 92,851
|
[
"Brian"
] |
752bef4a6abe7aaf8e65cf341e71917cad06464c5e5bacd4fe1b8f21df50ea08
|
"""
Copyright (c) 2015 Andreea Georgescu
Created on Wed Nov 19 00:18:55 2014
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from __future__ import division
import numpy as np
from interp import interp1d
from globalfnc import ConfidenceLevel, chi_squared1
pi = np.pi
name = "SuperCDMS"
modulated = False
energy_resolution_type = "Dirac"
def EnergyResolution(e):
return np.ones_like(e)
FFSD = 'GaussianFFSD'
FFSI = 'HelmFF'
FF = {'SI': FFSI,
'SDPS': FFSD,
'SDAV': FFSD,
}
target_nuclide_AZC_list = \
np.array([[70., 32., 0.19608], [72., 32., 0.27040], [73., 32., 0.07790],
[74., 32., 0.37378], [76., 32., 0.08184]])
target_nuclide_JSpSn_list = \
np.array([[0., 0., 0.], [0., 0., 0.],
[9./2, 0.0392517 * np.sqrt(((2*9./2 + 1)*(9./2 + 1))/(4*pi*9./2)),
.375312 * np.sqrt(((2*9./2 + 1)*(9./2 + 1))/(4*pi*9./2))],
[0., 0., 0.], [0., 0., 0.]])
target_nuclide_mass_list = np.array([65.134, 66.995, 67.9278, 68.8571, 70.7203])
num_target_nuclides = target_nuclide_mass_list.size
def QuenchingFactor(e):
return np.ones_like(e)
Ethreshold = 1.63799
Emaximum = 10.0011
ERmaximum = 10.0011
Efficiency_interp = \
interp1d(np.array([1.63799, 1.93525, 2.35928, 2.37871, 3.12938, 3.15831,
3.8895, 3.90877, 4.2841, 4.30358, 4.63016, 4.64942, 5.38539,
5.4095, 5.78968, 6.15036, 6.16481, 6.8911, 6.92511, 9.16257,
9.18213, 10.0011]),
np.array([0.044225, 0.071339, 0.086737, 0.105692, 0.112107, 0.196045,
0.19975, 0.260222, 0.26388, 0.268395, 0.275658, 0.339739, 0.366008,
0.43731, 0.44819, 0.459066, 0.506, 0.514216, 0.543101, 0.544292,
0.529854, 0.532668]))
def Efficiency(e, er):
return np.ones_like(er)
def Efficiency_ER(e):
return Efficiency_interp(e) if Ethreshold <= e < Emaximum else np.array(0.)
Exposure = 577.0 * (5./7.)
ERecoilList = np.array([1.7, 1.8, 1.9, 2.7])
# BinBkgr = np.array([0.03, 1.4, 1.8, 0.4, 1.7])
# BinEdges_left = np.array([1.64,1.64,1.64,1.64,1.64])
# BinEdges_right = np.array([10.0,10.0,10.0,10.0,10.0])
# BinSize = 8.4
# BinData = np.array([0, 2, 2, 0, 0])
# BinExposure = np.array([577./7.,577./7.,577./7.,577./7.,577./7.])
BinBkgr = np.array([5.33])
BinEdges_left = np.array([1.64])
BinEdges_right = np.array([10.0])
BinSize = 8.4
BinData = np.array([4])
BinExposure = np.array([577. * 5. / 7.])
Expected_limit = 3.32
|
SamWitte/Codds_DarkMatter
|
src/Data/SuperCDMSLessT5.py
|
Python
|
gpl-2.0
| 3,116
|
[
"DIRAC"
] |
084a41409f4f0e7e8fa44dbeb1dc010bef7b43a73536241abbfb2ffcd0dd5d10
|
from dateutil.relativedelta import relativedelta
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, tag
from edc_appointment.models import Appointment
from edc_base import get_utcnow
from edc_facility.import_holidays import import_holidays
from edc_visit_schedule.site_visit_schedules import site_visit_schedules
from edc_visit_tracking.constants import SCHEDULED
from .models import SubjectVisit, CrfOneInline, OtherModel
from .models import CrfOne, BadCrfOneInline
from .helper import Helper
from .visit_schedule import visit_schedule1, visit_schedule2
class TestVisit(TestCase):
helper_cls = Helper
def setUp(self):
import_holidays()
self.subject_identifier = '12345'
self.helper = self.helper_cls(
subject_identifier=self.subject_identifier)
site_visit_schedules._registry = {}
site_visit_schedules.register(visit_schedule=visit_schedule1)
site_visit_schedules.register(visit_schedule=visit_schedule2)
def test_crf_visit_model_attrs(self):
"""Assert models using the CrfModelMixin can determine which
attribute points to the visit model foreignkey.
"""
self.assertEqual(CrfOne().visit_model_attr(), 'subject_visit')
self.assertEqual(CrfOne.objects.all().count(), 0)
def test_crf_visit_model(self):
"""Assert models using the CrfModelMixin can determine which
visit model is in use for the app_label.
"""
self.assertEqual(CrfOne().visit_model(), SubjectVisit)
self.assertEqual(CrfOne.objects.all().count(), 0)
def test_crf_inline_model_attrs(self):
"""Assert inline model can find visit instance from parent.
"""
self.helper.consent_and_put_on_schedule()
appointment = Appointment.objects.all().order_by(
'timepoint_datetime')[0]
subject_visit = SubjectVisit.objects.create(
appointment=appointment, reason=SCHEDULED)
crf_one = CrfOne.objects.create(subject_visit=subject_visit)
other_model = OtherModel.objects.create()
crf_one_inline = CrfOneInline.objects.create(
crf_one=crf_one, other_model=other_model)
self.assertEqual(crf_one_inline.visit.pk, subject_visit.pk)
def test_crf_inline_model_parent_model(self):
"""Assert inline model cannot find parent, raises exception.
"""
self.helper.consent_and_put_on_schedule()
appointment = Appointment.objects.all()[0]
subject_visit = SubjectVisit.objects.create(
appointment=appointment,
reason=SCHEDULED)
crf_one = CrfOne.objects.create(subject_visit=subject_visit)
other_model = OtherModel.objects.create()
self.assertRaises(
ImproperlyConfigured,
BadCrfOneInline.objects.create,
crf_one=crf_one,
other_model=other_model)
def test_crf_inline_model_attrs2(self):
"""Assert inline model can find visit instance from parent.
"""
self.helper.consent_and_put_on_schedule()
appointment = Appointment.objects.all()[0]
subject_visit = SubjectVisit.objects.create(
appointment=appointment,
reason=SCHEDULED)
crf_one = CrfOne.objects.create(subject_visit=subject_visit)
other_model = OtherModel.objects.create()
crf_one_inline = CrfOneInline.objects.create(
crf_one=crf_one,
other_model=other_model)
self.assertIsInstance(crf_one_inline.visit, SubjectVisit)
def test_get_previous_model_instance(self):
"""Assert model can determine the previous.
"""
self.helper.consent_and_put_on_schedule()
for index, appointment in enumerate(Appointment.objects.all().order_by(
'visit_code')):
SubjectVisit.objects.create(
appointment=appointment,
report_datetime=get_utcnow() -
relativedelta(months=10 - index),
reason=SCHEDULED)
subject_visits = SubjectVisit.objects.all().order_by(
'appointment__timepoint_datetime')
self.assertEqual(subject_visits.count(), 4)
subject_visit = subject_visits[0]
self.assertIsNone(subject_visit.previous_visit)
subject_visit = subject_visits[1]
self.assertEqual(subject_visit.previous_visit.pk, subject_visits[0].pk)
subject_visit = subject_visits[2]
self.assertEqual(subject_visit.previous_visit.pk, subject_visits[1].pk)
subject_visit = subject_visits[3]
self.assertEqual(subject_visit.previous_visit.pk, subject_visits[2].pk)
|
botswana-harvard/edc-visit-tracking
|
edc_visit_tracking/tests/test_visit.py
|
Python
|
gpl-2.0
| 4,695
|
[
"VisIt"
] |
ae7aafb578f76c3ac7a300e22a8a1afba36177fdcf2da24b5be08611441a7f6e
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffy(RPackage):
"""The package contains functions for exploratory oligonucleotide array
analysis. The dependence on tkWidgets only concerns few convenience
functions. 'affy' is fully functional without it."""
homepage = "https://bioconductor.org/packages/affy/"
git = "https://git.bioconductor.org/packages/affy.git"
version('1.54.0', commit='a815f02906fcf491b28ed0a356d6fce95a6bd20e')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-affyio', type=('build', 'run'))
depends_on('r-biocinstaller', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r@3.4.0:3.4.9', when='@1.54.0')
|
mfherbst/spack
|
var/spack/repos/builtin/packages/r-affy/package.py
|
Python
|
lgpl-2.1
| 2,060
|
[
"Bioconductor"
] |
a22a16c436729fcf1cc1866fb35de042926d3824d43d76c0041433ff05ae9354
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDeseq(RPackage):
"""Estimate variance-mean dependence in count data from
high-throughput sequencing assays and test for differential
expression based on a model using the negative binomial
distribution."""
homepage = "https://www.bioconductor.org/packages/DESeq/"
git = "https://git.bioconductor.org/packages/DESeq.git"
version('1.28.0', commit='738371466e6ccf00179fd35b617c8ba0e1e91630')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-geneplotter', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-deseq/package.py
|
Python
|
lgpl-2.1
| 2,110
|
[
"Bioconductor"
] |
05abda210518ec4f32ad2676f1f405eb3df1390f7b8b8405ef877cdc2057c28f
|
import os
import pytest
from sphinx.errors import ConfigError, ExtensionError
import sphinx_gallery
from sphinx_gallery.gen_gallery import _complete_gallery_conf
from sphinx_gallery.scrapers import (figure_rst, mayavi_scraper, SG_IMAGE,
matplotlib_scraper, ImagePathIterator,
save_figures, _KNOWN_IMG_EXTS,
_reset_matplotlib)
from sphinx_gallery.utils import _get_image
@pytest.fixture(scope='function')
def gallery_conf(tmpdir):
"""Sets up a test sphinx-gallery configuration"""
# Skip if numpy not installed
pytest.importorskip("numpy")
gallery_conf = _complete_gallery_conf({}, str(tmpdir), True, False)
gallery_conf.update(examples_dir=str(tmpdir), gallery_dir=str(tmpdir))
return gallery_conf
class matplotlib_svg_scraper():
def __repr__(self):
return self.__class__.__name__
def __call__(self, *args, **kwargs):
return matplotlib_scraper(*args, format='svg', **kwargs)
@pytest.mark.parametrize('ext', ('png', 'svg'))
def test_save_matplotlib_figures(gallery_conf, ext):
"""Test matplotlib figure save."""
if ext == 'svg':
gallery_conf['image_scrapers'] = (matplotlib_svg_scraper(),)
import matplotlib.pyplot as plt # nest these so that Agg can be set
plt.plot(1, 1)
fname_template = os.path.join(gallery_conf['gallery_dir'], 'image{0}.png')
image_path_iterator = ImagePathIterator(fname_template)
block = ('',) * 3
block_vars = dict(image_path_iterator=image_path_iterator)
image_rst = save_figures(block, block_vars, gallery_conf)
assert len(image_path_iterator) == 1
fname = '/image1.{0}'.format(ext)
assert fname in image_rst
fname = gallery_conf['gallery_dir'] + fname
assert os.path.isfile(fname)
# Test capturing 2 images with shifted start number
image_path_iterator.next()
image_path_iterator.next()
plt.plot(1, 1)
plt.figure()
plt.plot(1, 1)
image_rst = save_figures(block, block_vars, gallery_conf)
assert len(image_path_iterator) == 5
for ii in range(4, 6):
fname = '/image{0}.{1}'.format(ii, ext)
assert fname in image_rst
fname = gallery_conf['gallery_dir'] + fname
assert os.path.isfile(fname)
def test_save_matplotlib_figures_hidpi(gallery_conf):
"""Test matplotlib hidpi figure save."""
ext = 'png'
gallery_conf['image_srcset'] = ["2x"]
import matplotlib.pyplot as plt # nest these so that Agg can be set
plt.plot(1, 1)
fname_template = os.path.join(gallery_conf['gallery_dir'], 'image{0}.png')
image_path_iterator = ImagePathIterator(fname_template)
block = ('',) * 3
block_vars = dict(image_path_iterator=image_path_iterator)
image_rst = save_figures(block, block_vars, gallery_conf)
fname = f'/image1.{ext}'
assert fname in image_rst
assert f'/image1_2_0x.{ext} 2.0x' in image_rst
assert len(image_path_iterator) == 1
fname = gallery_conf['gallery_dir'] + fname
fnamehi = gallery_conf['gallery_dir'] + f'/image1_2_0x.{ext}'
assert os.path.isfile(fname)
assert os.path.isfile(fnamehi)
# Test capturing 2 images with shifted start number
image_path_iterator.next()
image_path_iterator.next()
plt.plot(1, 1)
plt.figure()
plt.plot(1, 1)
image_rst = save_figures(block, block_vars, gallery_conf)
assert len(image_path_iterator) == 5
for ii in range(4, 6):
fname = f'/image{ii}.{ext}'
assert fname in image_rst
fname = gallery_conf['gallery_dir'] + fname
assert os.path.isfile(fname)
fname = f'/image{ii}_2_0x.{ext}'
assert fname in image_rst
fname = gallery_conf['gallery_dir'] + fname
assert os.path.isfile(fname)
def test_save_mayavi_figures(gallery_conf, req_mpl, req_pil):
"""Test file naming when saving figures. Requires mayavi."""
import numpy as np
Image = _get_image()
try:
from mayavi import mlab
except ImportError:
raise pytest.skip('Mayavi not installed')
import matplotlib.pyplot as plt
mlab.options.offscreen = True
gallery_conf.update(
image_scrapers=(matplotlib_scraper, mayavi_scraper))
fname_template = os.path.join(gallery_conf['gallery_dir'], 'image{0}.png')
image_path_iterator = ImagePathIterator(fname_template)
block = ('',) * 3
block_vars = dict(image_path_iterator=image_path_iterator)
plt.axes([-0.1, -0.1, 1.2, 1.2])
plt.pcolor([[0]], cmap='Greens')
mlab.test_plot3d()
image_rst = save_figures(block, block_vars, gallery_conf)
assert len(plt.get_fignums()) == 0
assert len(image_path_iterator) == 2
assert '/image0.png' not in image_rst
assert '/image1.png' in image_rst
assert '/image2.png' in image_rst
assert '/image3.png' not in image_rst
assert not os.path.isfile(fname_template.format(0))
assert os.path.isfile(fname_template.format(1))
assert os.path.isfile(fname_template.format(2))
assert not os.path.isfile(fname_template.format(0))
with Image.open(fname_template.format(1)) as img:
pixels = np.asarray(img.convert("RGB"))
assert (pixels == [247, 252, 245]).all() # plt first
# Test next-value handling, plus image_scrapers modification
gallery_conf.update(image_scrapers=(matplotlib_scraper,))
mlab.test_plot3d()
plt.axes([-0.1, -0.1, 1.2, 1.2])
plt.pcolor([[0]], cmap='Reds')
image_rst = save_figures(block, block_vars, gallery_conf)
assert len(plt.get_fignums()) == 0
assert len(image_path_iterator) == 3
assert '/image1.png' not in image_rst
assert '/image2.png' not in image_rst
assert '/image3.png' in image_rst
assert '/image4.png' not in image_rst
assert not os.path.isfile(fname_template.format(0))
for ii in range(3):
assert os.path.isfile(fname_template.format(ii + 1))
assert not os.path.isfile(fname_template.format(4))
with Image.open(fname_template.format(3)) as img:
pixels = np.asarray(img.convert("RGB"))
assert (pixels == [255, 245, 240]).all()
def _custom_func(x, y, z):
return y['image_path_iterator'].next()
def test_custom_scraper(gallery_conf, monkeypatch):
"""Test custom scrapers."""
# Test the API contract for custom scrapers
complete_args = (gallery_conf, gallery_conf['gallery_dir'], True, False)
with monkeypatch.context() as m:
m.setattr(sphinx_gallery, '_get_sg_image_scraper',
lambda: _custom_func, raising=False)
for cust in (_custom_func, 'sphinx_gallery'):
gallery_conf.update(image_scrapers=[cust])
# smoke test that it works
_complete_gallery_conf(*complete_args, check_keys=False)
# degenerate
# without the monkey patch to add sphinx_gallery._get_sg_image_scraper,
# we should get an error
gallery_conf.update(image_scrapers=['sphinx_gallery'])
with pytest.raises(ConfigError,
match="has no attribute '_get_sg_image_scraper'"):
_complete_gallery_conf(*complete_args, check_keys=False)
# other degenerate conditions
gallery_conf.update(image_scrapers=['foo'])
with pytest.raises(ConfigError, match='Unknown image scraper'):
_complete_gallery_conf(*complete_args, check_keys=False)
gallery_conf.update(image_scrapers=[_custom_func])
fname_template = os.path.join(gallery_conf['gallery_dir'],
'image{0}.png')
image_path_iterator = ImagePathIterator(fname_template)
block = ('',) * 3
block_vars = dict(image_path_iterator=image_path_iterator)
with pytest.raises(ExtensionError, match='did not produce expected image'):
save_figures(block, block_vars, gallery_conf)
gallery_conf.update(image_scrapers=[lambda x, y, z: 1.])
with pytest.raises(ExtensionError, match='was not a string'):
save_figures(block, block_vars, gallery_conf)
# degenerate string interface
gallery_conf.update(image_scrapers=['sphinx_gallery'])
with monkeypatch.context() as m:
m.setattr(sphinx_gallery, '_get_sg_image_scraper', 'foo',
raising=False)
with pytest.raises(ConfigError, match='^Unknown image.*\n.*callable'):
_complete_gallery_conf(*complete_args, check_keys=False)
with monkeypatch.context() as m:
m.setattr(sphinx_gallery, '_get_sg_image_scraper', lambda: 'foo',
raising=False)
with pytest.raises(ConfigError, match='^Scraper.*was not callable'):
_complete_gallery_conf(*complete_args, check_keys=False)
@pytest.mark.parametrize('ext', _KNOWN_IMG_EXTS)
def test_figure_rst(ext):
"""Testing rst of images"""
figure_list = ['sphx_glr_plot_1.' + ext]
image_rst = figure_rst(figure_list, '.')
single_image = f"""
.. image-sg:: /sphx_glr_plot_1.{ext}
:alt: pl
:srcset: /sphx_glr_plot_1.{ext}
:class: sphx-glr-single-img
"""
assert image_rst == single_image
image_rst = figure_rst(figure_list + ['second.' + ext], '.')
image_list_rst = f"""
.. rst-class:: sphx-glr-horizontal
*
.. image-sg:: /sphx_glr_plot_1.{ext}
:alt: pl
:srcset: /sphx_glr_plot_1.{ext}
:class: sphx-glr-multi-img
*
.. image-sg:: /second.{ext}
:alt: pl
:srcset: /second.{ext}
:class: sphx-glr-multi-img
"""
assert image_rst == image_list_rst
# test issue #229
local_img = [os.path.join(os.getcwd(), 'third.' + ext)]
image_rst = figure_rst(local_img, '.')
single_image = SG_IMAGE % ("third." + ext, '', "/third." + ext)
assert image_rst == single_image
@pytest.mark.parametrize('ext', ['png'])
def test_figure_rst_srcset(ext):
"""Testing rst of images"""
figure_list = ['sphx_glr_plot_1.' + ext]
hipaths = [{0: 'sphx_glr_plot_1.png', 2.0: 'sphx_glr_plot_1_2_0.png'}]
image_rst = figure_rst(figure_list, '.', srcsetpaths=hipaths)
single_image = f"""
.. image-sg:: /sphx_glr_plot_1.{ext}
:alt: pl
:srcset: /sphx_glr_plot_1.{ext}, /sphx_glr_plot_1_2_0.{ext} 2.0x
:class: sphx-glr-single-img
"""
assert image_rst == single_image
hipaths += [{0: 'second.png', 2.0: 'second_2_0.png'}]
image_rst = figure_rst(figure_list + ['second.' + ext], '.',
srcsetpaths=hipaths+[])
image_list_rst = f"""
.. rst-class:: sphx-glr-horizontal
*
.. image-sg:: /sphx_glr_plot_1.{ext}
:alt: pl
:srcset: /sphx_glr_plot_1.png, /sphx_glr_plot_1_2_0.png 2.0x
:class: sphx-glr-multi-img
*
.. image-sg:: /second.{ext}
:alt: pl
:srcset: /second.{ext}, /second_2_0.{ext} 2.0x
:class: sphx-glr-multi-img
"""
assert image_rst == image_list_rst
# test issue #229
local_img = [os.path.join(os.getcwd(), 'third.' + ext)]
image_rst = figure_rst(local_img, '.')
single_image = SG_IMAGE % ("third." + ext, '', "/third." + ext)
assert image_rst == single_image
def test_iterator():
"""Test ImagePathIterator."""
ipi = ImagePathIterator('foo{0}')
ipi._stop = 10
with pytest.raises(ExtensionError, match='10 images'):
for ii in ipi:
pass
def test_reset_matplotlib(gallery_conf):
"""Test _reset_matplotlib."""
import matplotlib
matplotlib.rcParams['lines.linewidth'] = 42
matplotlib.units.registry.clear()
_reset_matplotlib(gallery_conf, '')
assert matplotlib.rcParams['lines.linewidth'] != 42
assert len(matplotlib.units.registry) > 0
|
sphinx-gallery/sphinx-gallery
|
sphinx_gallery/tests/test_scrapers.py
|
Python
|
bsd-3-clause
| 11,618
|
[
"Mayavi"
] |
4d8beca938276b4f956843fa3697901e666a1036d59d993e02832cb06d5da917
|
from pvlib.iotools.tmy import read_tmy2, read_tmy3 # noqa: F401
from pvlib.iotools.epw import read_epw, parse_epw # noqa: F401
from pvlib.iotools.srml import read_srml # noqa: F401
from pvlib.iotools.srml import read_srml_month_from_solardat # noqa: F401
from pvlib.iotools.surfrad import read_surfrad # noqa: F401
from pvlib.iotools.midc import read_midc # noqa: F401
from pvlib.iotools.midc import read_midc_raw_data_from_nrel # noqa: F401
from pvlib.iotools.ecmwf_macc import read_ecmwf_macc # noqa: F401
from pvlib.iotools.ecmwf_macc import get_ecmwf_macc # noqa: F401
from pvlib.iotools.crn import read_crn # noqa: F401
from pvlib.iotools.solrad import read_solrad # noqa: F401
from pvlib.iotools.psm3 import get_psm3 # noqa: F401
from pvlib.iotools.psm3 import read_psm3 # noqa: F401
from pvlib.iotools.psm3 import parse_psm3 # noqa: F401
from pvlib.iotools.pvgis import get_pvgis_tmy, read_pvgis_tmy # noqa: F401
|
anomam/pvlib-python
|
pvlib/iotools/__init__.py
|
Python
|
bsd-3-clause
| 935
|
[
"EPW"
] |
90e17f8a1353c7b80cd4ce05446c5fdbd4071a8577d83d0bb0939c9cff6cf507
|
'''
This file in tracpy is an example init file.
Functions to initialize various numerical experiments.
Contains:
test1
test2
galveston
hab1b
Make a new init_* for your application.
loc Path to directory of grid and output files
nsteps Number of steps to do between model outputs (iter in tracmass)
ndays number of days to track the particles from start date
ff ff=1 to go forward in time and ff=-1 for backward in time
date Start date in datetime object
tseas Time between outputs in seconds
ah Horizontal diffusion in m^2/s.
See project values of 350, 100, 0, 2000. For -turb,-diffusion
av Vertical diffusion in m^2/s.
do3d for 3d flag, do3d=0 makes the run 2d and do3d=1 makes the run 3d
doturb turbulence/diffusion flag.
doturb=0 means no turb/diffusion,
doturb=1 means adding parameterized turbulence
doturb=2 means adding diffusion on a circle
doturb=3 means adding diffusion on an ellipse (anisodiffusion)
lon0 Drifter starting locations in x/zonal direction.
lat0 Drifter starting locations in y/meridional direction.
z0/zpar For 3D drifter movement, turn off twodim flag in makefile.
Then z0 should be an array of initial drifter depths.
The array should be the same size as lon0 and be negative
for under water. Currently drifter depths need to be above
the seabed for every x,y particle location for the script to run.
To do 3D but start at surface, use z0=zeros(ia.shape) and have
either zpar='fromMSL'
choose fromMSL to have z0 starting depths be for that depth below the base
time-independent sea level (or mean sea level).
choose 'fromZeta' to have z0 starting depths be for that depth below the
time-dependent sea surface. Haven't quite finished the 'fromZeta' case.
For 2D drifter movement, turn on twodim flag in makefile.
Then:
set z0 to 's' for 2D along a terrain-following slice
and zpar to be the index of s level you want to use (0 to km-1)
set z0 to 'rho' for 2D along a density surface
and zpar to be the density value you want to use
Can do the same thing with salinity ('salt') or temperature ('temp')
The model output doesn't currently have density though.
set z0 to 'z' for 2D along a depth slice
and zpar to be the constant (negative) depth value you want to use
To simulate drifters at the surface, set z0 to 's'
and zpar = grid['km']-1 to put them in the upper s level
z0='s' is currently not working correctly!!!
In the meantime, do surface using the 3d set up option but with 2d flag set
xp x-locations in x,y coordinates for drifters
yp y-locations in x,y coordinates for drifters
zp z-locations (depths from mean sea level) for drifters
t time for drifter tracks
name Name of simulation to be used for netcdf file containing final tracks
'''
import numpy as np
import os
import netCDF4 as netCDF
import pdb
import glob
from datetime import datetime, timedelta
from matplotlib.mlab import *
import inout
import tools
def galveston():
'''
Start drifters outside Galveston Bay and see where they move backward in time.
'''
# Location of TXLA model output
if 'rainier' in os.uname():
loc = '/Users/kthyng/Documents/research/postdoc/' # for model outputs
elif 'hafen.tamu.edu' in os.uname():
loc = '/home/kthyng/shelf/' # for model outputs
# Initialize parameters
nsteps = 10
ndays = 2
ff = -1
# Start date
date = datetime(2009,11, 30, 0)
# Time between outputs
# Dt = 14400. # in seconds (4 hours), nc.variables['dt'][:]
tseas = 4*3600 # 4 hours between outputs, in seconds, time between model outputs
ah = 100.
av = 1.e-5 # m^2/s, or try 5e-6
## Input starting locations as real space lon,lat locations
lon0,lat0 = np.meshgrid(np.linspace(-95.3,-94.3,10),
np.linspace(28.6,29.6,10))
# pdb.set_trace()
lon0 = lon0.flatten()
lat0 = lat0.flatten()
## Choose method for vertical placement of drifters
# Also update makefile accordingly. Choose the twodim flag for isoslice.
# See above for more notes, but do the following two lines for an isoslice
z0 = 's' #'z' #'salt' #'s'
zpar = 29 #-10 #grid['km']-1 # 30 #grid['km']-1
# Do the following two for a 3d simulation
# z0 = np.ones(xstart0.shape)*-40 # below the surface
# zpar = 'fromMSL'
# pdb.set_trace()
## Set flags
# for 3d flag, do3d=0 makes the run 2d and do3d=1 makes the run 3d
do3d = 0
# turbulence/diffusion flag. doturb=0 means no turb/diffusion,
# doturb=1 means adding parameterized turbulence
# doturb=2 means adding diffusion on a circle
# doturb=3 means adding diffusion on an ellipse (anisodiffusion)
doturb = 3
# simulation name, used for saving results into netcdf file
name = 'galveston'
return loc,nsteps,ndays,ff,date,tseas,ah,av,lon0,lat0,z0,zpar,do3d,doturb,name
def test1(loc=None, nsteps=None, ff=None, ah=None, grid=None, nlon=None, nlat=None, doturb=None, name=None):
'''
A drifter test using TXLA model output.
The comparison case for this simulation is 2D (do3d=0)
with no turbulence/diffusion (doturb=0).
Drifters are started at the surface and run forward
for ten days (ndays=10) from 11/25/09 (in date). Compare results with figure in examples/test1.png.
Optional inputs for making tests easy to run:
loc 'thredds' or 'local', default = 'thredds'
nsteps Number of particle steps to record between model outputs
Default = 5
ff Backward (-1) or forward (1) in time. Default is forward (1).
ah Horizontal viscosity, default = 5
grid If input, will not redo this step. Default is to load in grid.
nlon, nlat Number of drifters to use in the lon/lat direction in seed array
Default = 110, 98 (10 km spacing)
doturb What, if any, subgrid parameterization to use. Default is 'none'
name Specific name for track and figure files. Default is 'temp'
'''
# Location of TXLA model output
# file and then grid.
# 0150 file goes from (2009, 11, 19, 12, 0) to (2009, 12, 6, 0, 0)
if loc is None or loc == 'thredds':
loc = 'http://barataria.tamu.edu:8080/thredds/dodsC/NcML/txla_nesting6.nc'
elif loc is 'local':
# Location of TXLA model output
if 'rainier' in os.uname():
loc = '/Users/kthyng/Documents/research/postdoc/' # for model outputs
elif 'hafen.tamu.edu' in os.uname():
loc = '/home/kthyng/shelf/' # for model outputs
# Initialize parameters
if nsteps is None:
nsteps = 5
else:
nsteps = nsteps
ndays = .5 #1 #16
if ff is None:
ff = 1
else:
ff = ff
# Start date
date = datetime(2009,11, 25, 0)
# date = datetime(2009,11, 20, 0)
# Time between outputs
# Dt = 14400. # in seconds (4 hours), nc.variables['dt'][:]
tseas = 4*3600 # 4 hours between outputs, in seconds, time between model outputs
if ah is None:
ah = 5. #100.
else:
ah = ah
av = 1.e-5 # m^2/s, or try 5e-6
# grid = netCDF.Dataset(loc+'grid.nc')
# lonr = grid.variables['lon_rho'][:]
# latr = grid.variables['lat_rho'][:]
if grid is None:
grid = inout.readgrid(loc)
else:
grid = grid
## Input starting locations as real space lon,lat locations
# lon0,lat0 = np.meshgrid(-95.498218005315309,23.142258627126882) # [0,0] (SE) corner
# lon0,lat0 = np.meshgrid(-97.748582291691989,23.000027311710628) # [-1,0] (SW) corner
# lon0,lat0 = np.meshgrid(-87.757124031927574,29.235771320764623) # [0,-1] (NE) corner
# lon0,lat0 = np.meshgrid(-88.3634073986196,30.388542615201313) # [-1,-1] (NW) corner
# lon0,lat0 = np.meshgrid(np.linspace(-94,-93,10),np.linspace(28,29,10)) # grid outside Galveston Bay
# lon0,lat0 = np.meshgrid(np.linspace(-95,-91,100),np.linspace(28,29,50)) # rectangle outside Galveston
# lon0,lat0 = np.meshgrid(np.linspace(-98.5,-87.5,1100),np.linspace(22.5,31,980)) # whole domain, 1 km
# lon0,lat0 = np.meshgrid(np.linspace(-98.5,-87.5,220),np.linspace(22.5,31,196)) # whole domain, 5 km
# # FOR TEST1:
# lon0,lat0 = np.meshgrid(np.linspace(-98.5,-87.5,110),np.linspace(22.5,31,98)) # whole domain, 10 km
# lon0,lat0 = np.meshgrid(np.linspace(-98.5,-87.5,21),np.linspace(22.5,31,20)) # whole domain, 50 km
if nlon is None:
nlon = 110
else:
nlon = nlon
if nlat is None:
nlat = 98
else:
nlat = nlat
lon0,lat0 = np.meshgrid(np.linspace(-98.5,-87.5,nlon),np.linspace(22.5,31,nlat)) # whole domain, 10 km
# Eliminate points that are outside domain or in masked areas
lon0,lat0 = tools.check_points(lon0,lat0,grid)
## Choose method for vertical placement of drifters
# Also update makefile accordingly. Choose the twodim flag for isoslice.
# See above for more notes, but do the following two lines for an isoslice
z0 = 's' #'salt' #'s' #'z' #'salt' #'s'
zpar = 29 #30 #29 #-10 #grid['km']-1 # 30 #grid['km']-1
# Do the following two for a 3d simulation
# z0 = np.ones(xstart0.shape)*-40 # below the surface
# zpar = 'fromMSL'
# pdb.set_trace()
# for 3d flag, do3d=0 makes the run 2d and do3d=1 makes the run 3d
do3d = 0
# turbulence/diffusion flag. doturb=0 means no turb/diffusion,
# doturb=1 means adding parameterized turbulence
# doturb=2 means adding diffusion on a circle
# doturb=3 means adding diffusion on an ellipse (anisodiffusion)
if doturb is None:
doturb = 0
else:
doturb = doturb
# simulation name, used for saving results into netcdf file
if name is None:
name = 'temp' #'5_5_D5_F'
else:
name = name
return loc,nsteps,ndays,ff,date,tseas,ah,av,lon0,lat0,z0,zpar,do3d,doturb,name,grid
def test2():
'''
A drifter test using TXLA model output.
This simulation is 3D (do3d=1) with turbulence (doturb=1) added in.
Drifters are started at 10 meters below the mean sea level and run backward (ff=-1)
for five days from 11/25/09. Compare results with figure in examples/test2.png.
'''
# Location of TXLA model output
# file and then grid
loc = ['http://barataria.tamu.edu:8080/thredds/dodsC/txla_nesting6/ocean_his_0150.nc', \
'http://barataria.tamu.edu:8080//thredds/dodsC/txla_nesting6_grid/txla_grd_v4_new.nc']
# Initialize parameters
nsteps = 10
ndays = 5
ff = -1
# Start date
date = datetime(2009,11, 25, 0)
# Time between outputs
# Dt = 14400. # in seconds (4 hours), nc.variables['dt'][:]
tseas = 4*3600 # 4 hours between outputs, in seconds, time between model outputs
ah = 100.
av = 1.e-5 # m^2/s, or try 5e-6
## Input starting locations as real space lon,lat locations
lon0,lat0 = np.meshgrid(np.linspace(-94,-93,5),
np.linspace(28,29,5))
lon0 = lon0.flatten()
lat0 = lat0.flatten()
## Choose method for vertical placement of drifters
# # Also update makefile accordingly. Choose the twodim flag for isoslice.
# # See above for more notes, but do the following two lines for an isoslice
# z0 = 'z' #'salt' #'s'
# zpar = -10 #grid['km']-1 # 30 #grid['km']-1
# Do the following two for a 3d simulation
z0 = np.ones(lon0.shape)*-10 # below the surface
zpar = 'fromMSL'
# for 3d flag, do3d=0 makes the run 2d and do3d=1 makes the run 3d
do3d = 1
# turbulence/diffusion flag. doturb=0 means no turb/diffusion,
# doturb=1 means adding parameterized turbulence
# doturb=2 means adding diffusion on a circle
# doturb=3 means adding diffusion on an ellipse (anisodiffusion)
doturb = 1
# simulation name, used for saving results into netcdf file
name = 'test2'
return loc,nsteps,ndays,ff,date,tseas,ah,av,lon0,lat0,z0,zpar,do3d,doturb,name
def hab1b():
'''
Initialize a drifter run using the starting locations from
HAB experiment 1b.
'''
if 'rainier' in os.uname():
loc = '/Users/kthyng/Documents/research/postdoc/' # for model outputs
elif 'hafen.tamu.edu' in os.uname():
loc = '/home/kthyng/shelf/' # for model outputs
# Initialize parameters
nsteps = 10
ndays = 10
ff = 1
# Start date
date = datetime(2009,11, 30, 0)
# Time between outputs
# Dt = 14400. # in seconds (4 hours), nc.variables['dt'][:]
tseas = 4*3600 # 4 hours between outputs, in seconds, time between model outputs
ah = 100.
av = 1.e-5 # m^2/s, or try 5e-6
## Input starting locations as real space lon,lat locations
# Read in starting locations from HAB experiment to test
d = np.load(loc + 'hab/data/exp1b/starting_locations.npz')
lon0 = d['lon0']
lat0 = d['lat0']
## Choose method for vertical placement of drifters
# Also update makefile accordingly. Choose the twodim flag for isoslice.
# See above for more notes, but do the following two lines for an isoslice
z0 = 's' #'salt' #'s'
zpar = 29 #grid['km']-1 # 30 #grid['km']-1
# Do the following two for a 3d simulation
# z0 = np.ones(xstart0.shape)*-40 # below the surface
# zpar = 'fromMSL'
# for 3d flag, do3d=0 makes the run 2d and do3d=1 makes the run 3d
do3d = 0
# turbulence/diffusion flag. doturb=0 means no turb/diffusion,
# doturb=1 means adding parameterized turbulence
# doturb=2 means adding diffusion on a circle
# doturb=3 means adding diffusion on an ellipse (anisodiffusion)
doturb = 0
# simulation name, used for saving results into netcdf file
name = 'hab1b'
return loc,nsteps,ndays,ff,date,tseas,ah,av,lon0,lat0,z0,zpar,do3d,doturb,name
|
dcherian/tracpy
|
tests/init.py
|
Python
|
mit
| 12,965
|
[
"NetCDF"
] |
83adcce25ec37f484836a38ae67fc683827f03ec8b50df959765df084f74fb8d
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
import unittest
from django.contrib.auth.models import User
from authentication.models import FacebookBackend
from profiles.models import UserProfile
class FacebookBackendTestCase(unittest.TestCase):
def setUp(self):
self.test_user = User.objects.create(username='4321', password='test')
def tearDown(self):
self.test_user.delete()
def test_get_user(self):
fbb = FacebookBackend()
#user does not exist
user = fbb.get_user(1234)
self.assertEqual(user, None)
#user exists
user = fbb.get_user(self.test_user.id)
self.assertEqual(user, self.test_user)
def test_random_password(self):
fbb = FacebookBackend()
password = fbb._FacebookBackend__random_password()
self.assertEqual(len(password), 8)
def test_get_or_create_user(self):
fbb = FacebookBackend()
#user exists
facebook_info = {
'uid': self.test_user.username,
'proxied_email': 'dna@douglasadams.com',
}
user = fbb._FacebookBackend__get_or_create_user(facebook_info)
self.assertEqual(user, self.test_user)
#user does not exist and must be created
#does it return the new user
facebook_info = {
'uid': '666',
'proxied_email': 'dna@douglasadams.com',
}
user = fbb._FacebookBackend__get_or_create_user(facebook_info)
self.assertEqual(user.username, facebook_info['uid'])
self.assertEqual(user.email, facebook_info['proxied_email'])
self.assertEqual(str(user.get_profile().facebook_id), facebook_info['uid'])
#did it create the user in the database correctly?
user = User.objects.get(username=facebook_info['uid'])
self.assertEqual(user.username, facebook_info['uid'])
self.assertEqual(user.email, facebook_info['proxied_email'])
self.assertEqual(str(user.get_profile().facebook_id), facebook_info['uid'])
|
brianboyer/newsmixer
|
pie/authentication/tests.py
|
Python
|
gpl-3.0
| 2,908
|
[
"Brian"
] |
85ac72f67264691a1016c35e517674deb9b3a091b69f4e0fa740653951e4d960
|
"""
Test functions for models.GLM
"""
from statsmodels.compat import range
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
assert_allclose, assert_, assert_array_less, dec)
from scipy import stats
import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from statsmodels.discrete import discrete_model as discrete
from nose import SkipTest
import warnings
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
def teardown_module():
if have_matplotlib:
plt.close('all')
if pdf_output:
pdf.close()
class CheckModelResultsMixin(object):
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, self.decimal_bse)
decimal_resids = DECIMAL_4
def test_residuals(self):
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
self.res1.resid_anscombe, self.res1.resid_response))
assert_almost_equal(resids, self.res2.resids, self.decimal_resids)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Doesn't with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
assert_almost_equal(self.res1.aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_almost_equal(self.res1.null_deviance, self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
params = self.res1.params
tvalues = params / self.res1.bse
pvalues = stats.norm.sf(np.abs(tvalues)) * 2
half_width = stats.norm.isf(0.025) * self.res1.bse
conf_int = np.column_stack((params - half_width, params + half_width))
assert_almost_equal(self.res1.tvalues, tvalues)
assert_almost_equal(self.res1.pvalues, pvalues)
assert_almost_equal(self.res1.conf_int(), conf_int)
class CheckComparisonMixin(object):
def test_compare_discrete(self):
res1 = self.res1
resd = self.resd
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params)
score_obsd = resd.model.score_obs(resd.params)
assert_allclose(score_obs1, score_obsd, rtol=1e-10)
# score
score1 = res1.model.score(res1.params)
assert_allclose(score1, score_obs1.sum(0), atol=1e-20)
assert_allclose(score1, np.zeros(score_obs1.shape[1]), atol=1e-7)
hessian1 = res1.model.hessian(res1.params, observed=False)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-10)
hessian1 = res1.model.hessian(res1.params, observed=True)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-9)
def test_score_test(self):
res1 = self.res1
# fake example, should be zero, k_constraint should be 0
st, pv, df = res1.model.score_test(res1.params, k_constraints=1)
assert_allclose(st, 0, atol=1e-20)
assert_allclose(pv, 1, atol=1e-10)
assert_equal(df, 1)
st, pv, df = res1.model.score_test(res1.params, k_constraints=0)
assert_allclose(st, 0, atol=1e-20)
assert_(np.isnan(pv), msg=repr(pv))
assert_equal(df, 0)
# TODO: no verified numbers largely SMOKE test
exog_extra = res1.model.exog[:,1]**2
st, pv, df = res1.model.score_test(res1.params, exog_extra=exog_extra)
assert_array_less(0.1, st)
assert_array_less(0.1, pv)
assert_equal(df, 1)
class TestGlmGaussian(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit()
from .results.results_glm import Longley
self.res2 = Longley()
def test_compare_OLS(self):
res1 = self.res1
# OLS doesn't define score_obs
from statsmodels.regression.linear_model import OLS
resd = OLS(self.data.endog, self.data.exog).fit()
self.resd = resd # attach to access from the outside
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params, scale=None)
score_obsd = resd.resid[:, None] / resd.scale * resd.model.exog
# low precision because of badly scaled exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
score_obs1 = res1.model.score_obs(res1.params, scale=1)
score_obsd = resd.resid[:, None] * resd.model.exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
hess_obs1 = res1.model.hessian(res1.params, scale=None)
hess_obsd = -1. / resd.scale * resd.model.exog.T.dot(resd.model.exog)
# low precision because of badly scaled exog
assert_allclose(hess_obs1, hess_obsd, rtol=1e-8)
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGaussianLog(CheckModelResultsMixin):
def __init__(self):
# Test Precision
self.decimal_aic_R = DECIMAL_0
self.decimal_aic_Stata = DECIMAL_2
self.decimal_loglike = DECIMAL_0
self.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(self.lny, self.X, \
family=sm.families.Gaussian(sm.families.links.log))
self.res1 = GaussLog_Model.fit()
from .results.results_glm import GaussianLog
self.res2 = GaussianLog()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(self.lny, self.X, r.glm, family=GaussLogLink)
# self.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_bic = DECIMAL_1
self.decimal_aic_R = DECIMAL_1
self.decimal_aic_Stata = DECIMAL_3
self.decimal_loglike = DECIMAL_1
self.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(self.y_inv, self.X,
family=sm.families.Gaussian(sm.families.links.inverse_power))
InverseLink_Res = InverseLink_Model.fit()
self.res1 = InverseLink_Res
from .results.results_glm import GaussianInverse
self.res2 = GaussianInverse()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(self.y_inv, self.X, r.glm, family=InverseLink)
# self.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
self.decimal_resids = DECIMAL_1
self.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from .results.results_glm import Star98
data = load()
data.exog = add_constant(data.exog, prepend=False)
self.res1 = GLM(data.endog, data.exog, \
family=sm.families.Binomial()).fit()
#NOTE: if you want to replicate with RModel
#res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
self.res2 = Star98()
#TODO:
#Non-Canonical Links for the Binomial family require the algorithm to be
#slightly changed
#class TestGlmBinomialLog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialProbit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialPower(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLoglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogc(CheckModelResultsMixin):
#TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
from .results.results_glm import Lbw
self.res2 = Lbw()
self.res1 = GLM(self.res2.endog, self.res2.exog,
family=sm.families.Binomial()).fit()
modd = discrete.Logit(self.res2.endog, self.res2.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
def score_test_r(self):
res1 = self.res1
res2 = self.res2
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 1]**2)
st_res = 0.2837680293459376 # (-0.5326988167303712)**2
assert_allclose(st, st_res, rtol=1e-4)
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 0]**2)
st_res = 0.6713492821514992 # (-0.8193590679009413)**2
assert_allclose(st, st_res, rtol=1e-4)
select = list(range(9))
select.pop(7)
res1b = GLM(res2.endog, res2.exog[:, select],
family=sm.families.Binomial()).fit()
tres = res1b.model.score_test(res1b.params,
exog_extra=res1.model.exog[:, -2])
tres = np.asarray(tres[:2]).ravel()
tres_r = (2.7864148487452, 0.0950667)
assert_allclose(tres, tres_r, rtol=1e-4)
cmd_r = """\
data = read.csv("...statsmodels\\statsmodels\\genmod\\tests\\results\\stata_lbw_glm.csv")
data["race_black"] = data["race"] == "black"
data["race_other"] = data["race"] == "other"
mod = glm(low ~ age + lwt + race_black + race_other + smoke + ptl + ht + ui, family=binomial, data=data)
options(digits=16)
anova(mod, test="Rao")
library(statmod)
s = glm.scoretest(mod, data["age"]**2)
s**2
s = glm.scoretest(mod, data["lwt"]**2)
s**2
"""
#class TestGlmBernoulliIdentity(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliProbit(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliPower(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLoglog(CheckModelResultsMixin):
# pass
#class test_glm_bernoulli_logc(CheckModelResultsMixin):
# pass
class TestGlmGamma(CheckModelResultsMixin):
def __init__(self):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
self.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
self.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from .results.results_glm import Scotvote
data = load()
data.exog = add_constant(data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = GLM(data.endog, data.exog,
family=sm.families.Gamma()).fit()
self.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R doesn't count degree of freedom for scale with gamma
self.res2 = res2
class TestGlmGammaLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_aic_R = DECIMAL_0
self.decimal_fittedvalues = DECIMAL_3
from .results.results_glm import CancerLog
res2 = CancerLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="log"))
# self.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# self.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = -100 #TODO Very off from Stata?
self.decimal_params = DECIMAL_2
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_1
from .results.results_glm import CancerIdentity
res2 = CancerIdentity()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.identity)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# self.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from .results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
self.data = load()
self.data.exog[:,3] = np.log(self.data.exog[:,3])
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Poisson()).fit()
self.res2 = Cpunish()
# compare with discrete, start close to save time
modd = discrete.Poisson(self.data.endog, self.data.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
#class TestGlmPoissonIdentity(CheckModelResultsMixin):
# pass
#class TestGlmPoissonPower(CheckModelResultsMixin):
# pass
class TestGlmInvgauss(CheckModelResultsMixin):
def __init__(self):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_0
from .results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog, \
family=sm.families.InverseGaussian()).fit()
self.res1 = res1
self.res2 = res2
class TestGlmInvgaussLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 # Big difference vs R.
self.decimal_resids = DECIMAL_3
from .results.results_glm import InvGaussLog
res2 = InvGaussLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(link=\
sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 #TODO: Big difference vs R
self.decimal_fittedvalues = DECIMAL_3
self.decimal_params = DECIMAL_3
from .results.results_glm import Medpar1
data = Medpar1()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.identity)).fit()
from .results.results_glm import InvGaussIdentity
self.res2 = InvGaussIdentity()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Negative Binomial family with canonical log link
'''
# Test Precision
self.decimal_resid = DECIMAL_1
self.decimal_params = DECIMAL_3
self.decimal_resids = -1 # 1 % mismatch at 0
self.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
self.data = load()
self.data.exog[:,2] = np.log(self.data.exog[:,2])
interaction = self.data.exog[:,2]*self.data.exog[:,1]
self.data.exog = np.column_stack((self.data.exog,interaction))
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.NegativeBinomial()).fit()
from .results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They don't count a degree of freedom for the scale
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this doesn't work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
#class TestGlmNegbinomial_log(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_power(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_nbinom(CheckModelResultsMixin):
# pass
#NOTE: hacked together version to test poisson offset
class TestGlmPoissonOffset(CheckModelResultsMixin):
@classmethod
def setupClass(cls):
from .results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
data = load()
data.exog[:,3] = np.log(data.exog[:,3])
data.exog = add_constant(data.exog, prepend=False)
exposure = [100] * len(data.endog)
cls.data = data
cls.exposure = exposure
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res1.params[-1] += np.log(100) # add exposure back in to param
# to make the results the same
cls.res2 = Cpunish()
def test_missing(self):
# make sure offset is dropped correctly
endog = self.data.endog.copy()
endog[[2,4,6,8]] = np.nan
mod = GLM(endog, self.data.exog, family=sm.families.Poisson(),
exposure=self.exposure, missing='drop')
assert_equal(mod.exposure.shape[0], 13)
def test_offset_exposure(self):
# exposure=x and offset=log(x) should have the same effect
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
offset = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset, exposure=exposure).fit()
offset2 = offset + np.log(exposure)
mod2 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset2).fit()
assert_almost_equal(mod1.params, mod2.params)
# test recreating model
mod1_ = mod1.model
kwds = mod1_._get_init_kwds()
assert_allclose(kwds['exposure'], exposure, rtol=1e-14)
assert_allclose(kwds['offset'], mod1_.offset, rtol=1e-14)
mod3 = mod1_.__class__(mod1_.endog, mod1_.exog, **kwds)
assert_allclose(mod3.exposure, mod1_.exposure, rtol=1e-14)
assert_allclose(mod3.offset, mod1_.offset, rtol=1e-14)
def test_predict(self):
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
exposure=exposure).fit()
exog1 = np.random.normal(size=(10,3))
exposure1 = np.random.uniform(1, 2, 10)
# Doubling exposure time should double expected response
pred1 = mod1.predict(exog=exog1, exposure=exposure1)
pred2 = mod1.predict(exog=exog1, exposure=2*exposure1)
assert_almost_equal(pred2, 2*pred1)
# Check exposure defaults
pred3 = mod1.predict()
pred4 = mod1.predict(exposure=exposure)
pred5 = mod1.predict(exog=exog, exposure=exposure)
assert_almost_equal(pred3, pred4)
assert_almost_equal(pred4, pred5)
# Check offset defaults
offset = np.random.uniform(1, 2, 100)
mod2 = GLM(endog, exog, offset=offset, family=sm.families.Poisson()).fit()
pred1 = mod2.predict()
pred2 = mod2.predict(offset=offset)
pred3 = mod2.predict(exog=exog, offset=offset)
assert_almost_equal(pred1, pred2)
assert_almost_equal(pred2, pred3)
# Check that offset shifts the linear predictor
mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit()
offset = np.random.uniform(1, 2, 10)
pred1 = mod3.predict(exog=exog1, offset=offset, linear=True)
pred2 = mod3.predict(exog=exog1, offset=2*offset, linear=True)
assert_almost_equal(pred2, pred1+offset)
def test_prefect_pred():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris = np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
assert_raises(PerfectSeparationError, glm.fit)
def test_score_test_OLS():
# nicer example than Longley
from statsmodels.regression.linear_model import OLS
np.random.seed(5)
nobs = 100
sige = 0.5
x = np.random.uniform(0, 1, size=(nobs, 5))
x[:, 0] = 1
beta = 1. / np.arange(1., x.shape[1] + 1)
y = x.dot(beta) + sige * np.random.randn(nobs)
res_ols = OLS(y, x).fit()
res_olsc = OLS(y, x[:, :-2]).fit()
co = res_ols.compare_lm_test(res_olsc, demean=False)
res_glm = GLM(y, x[:, :-2], family=sm.families.Gaussian()).fit()
co2 = res_glm.model.score_test(res_glm.params, exog_extra=x[:, -2:])
# difference in df_resid versus nobs in scale see #1786
assert_allclose(co[0] * 97 / 100., co2[0], rtol=1e-13)
def test_attribute_writable_resettable():
# Regression test for mutables and class constructors.
data = sm.datasets.longley.load()
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
class Test_start_params(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
params = sm.OLS(self.data.endog, self.data.exog).fit().params
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
self.res2 = Longley()
def test_glm_start_params():
# see 1604
y2 = np.array('0 1 0 0 0 1'.split(), int)
wt = np.array([50,1,50,1,5,10])
y2 = np.repeat(y2, wt)
x2 = np.repeat([0,0,0.001,100,-1,-1], wt)
mod = sm.GLM(y2, sm.add_constant(x2), family=sm.families.Binomial())
res = mod.fit(start_params=[-4, -5])
np.testing.assert_almost_equal(res.params, [-4.60305022, -5.29634545], 6)
def test_loglike_no_opt():
# see 1728
y = np.asarray([0, 1, 0, 0, 1, 1, 0, 1, 1, 1])
x = np.arange(10, dtype=np.float64)
def llf(params):
lin_pred = params[0] + params[1]*x
pr = 1 / (1 + np.exp(-lin_pred))
return np.sum(y*np.log(pr) + (1-y)*np.log(1-pr))
for params in [0,0], [0,1], [0.5,0.5]:
mod = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial())
res = mod.fit(start_params=params, maxiter=0)
like = llf(params)
assert_almost_equal(like, res.llf)
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
family = sm.families.Gaussian(link=sm.families.links.log)
mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure,
family=family)
assert_(type(mod.exposure) is np.ndarray, msg='Exposure is not ndarray')
exposure = pd.Series(np.random.uniform(size=5))
assert_raises(ValueError, smf.glm, "Foo ~ Bar", data=df,
exposure=exposure, family=family)
assert_raises(ValueError, GLM, df.Foo, df[['constant', 'Bar']],
exposure=exposure, family=family)
@dec.skipif(not have_matplotlib)
def test_plots():
np.random.seed(378)
n = 200
exog = np.random.normal(size=(n, 2))
lin_pred = exog[:, 0] + exog[:, 1]**2
prob = 1 / (1 + np.exp(-lin_pred))
endog = 1 * (np.random.uniform(size=n) < prob)
model = sm.GLM(endog, exog, family=sm.families.Binomial())
result = model.fit()
import matplotlib.pyplot as plt
import pandas as pd
from statsmodels.graphics.regressionplots import add_lowess
# array interface
for j in 0,1:
fig = result.plot_added_variable(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
model = sm.GLM.from_formula("y ~ x1 + x2", data, family=sm.families.Binomial())
result = model.fit()
for j in 0,1:
xname = ["x1", "x2"][j]
fig = result.plot_added_variable(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu)
else:
raise ValueError
return endog
def test_summary():
"""
Smoke test for summary.
"""
np.random.seed(4323)
n = 100
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
endog = np.random.normal(size=n)
for method in "irls", "cg":
fa = sm.families.Gaussian()
model = sm.GLM(endog, exog, family=fa)
rslt = model.fit(method=method)
s = rslt.summary()
def test_gradient_irls():
"""
Compare the results when using gradient optimization and IRLS.
"""
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
for family_class, family_links in families:
for link in family_links:
for binom_version in 0,1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, family=family_class(link=link))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in (0, rslt_irls.params), (1, None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, family=family_class(link=link))
rslt_gradient = mod_gradient.fit(max_start_irls=max_start_irls,
start_params=start_params,
method="newton")
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params, observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6, atol=1e-6)
if __name__=="__main__":
#run_module_suite()
#taken from Fernando Perez:
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
|
DonBeo/statsmodels
|
statsmodels/genmod/tests/test_glm.py
|
Python
|
bsd-3-clause
| 37,718
|
[
"Gaussian"
] |
ac3262475ffb23ff71d02277932505630edf3ab171eaa7ce90c1f8a5a6446d5f
|
import vtk
from vmtk import vmtkscripts
from vmtk import pypes
########################################################
# #
# Author: Noel Conlisk #
# Email: noecon@gmail.com #
# Script function: Creates a volume image from #
# a stack of dicom files #
# #
# Prerequisites: VMTK and VTK must be installed #
# #
########################################################
# Input file format
image_type = 'dicom'
# path to files
path = 'C:\\Some\\folder\\folder' # Enter path to dicom files
# read in images
myArguments = 'vmtkimagereader -f dicom -d %s --pipe vmtkimageviewer' % path
myPype = pypes.PypeRun(myArguments)
|
nconlisk/python
|
VTK/vol_from_images.py
|
Python
|
gpl-3.0
| 919
|
[
"VTK"
] |
2a33b5303b8c0804ee19695e43d10e23c7a1b7f982861805f5c2339031616628
|
#!/usr/bin/env python
'''
NN modules
Based on the paper:
2012 Nov IEEE Signal Processing Magazine
"Deep Neural Networks for Acoustic Modeling in Speech Recognition
-The shared views of four research groups"
'''
class layer():
kind = 'layer' # property shared by all instances of this class
def __init__(self, depth=0, breadth=1, weights=np.zeros((N,1))):
self.depth = depth
self.breadth = N
self.weights = w
class neuron():
def __init__(self):
def logistic():
bla
def sigmoid(x):
''' Non-linear logistic function '''
# For each output/hidden unit
for j in range(0,J):
# For each input unit
for i in range(0,i):
# Sum the product of inputs and weights
acc_layer_below += y[i]*w[i][j]
# Add the bias term
x[j] = b[j] + acc_layer_below
# Non-linearity: logistic function
# XXX: how to vectorise this in Python?
y[j] = 1 ./ (1 + np.exp(-x[j]))
return y
def softmax(x, K):
''' Multi-class Non-linearity '''
# "For multiclass classification, output unit j
# converts its total input x_j into a class probability pj"
for k in range(0,K):
sumk += np.exp(x[k])
p[j] = np.exp(x[???]) / sumk # XXX: how to take the exp of a vector? elementwise?
print 'wtf is going on?'
def crossentropy(d, p):
# d[j] = 0 or 1
for j in range(0,J):
acc -= d[j]*np.log(p[j])
C = acc
def minibatch(x, B):
# How big is the data?
sz_data = np.size(x)
# If we break it up into B # of batches, how big is each batch?
sz_batch = int(sz/B)
# Initialise the batch memory
batches = np.zeros((1,sz_batch))
# Populate all the minibatches in the full batch
for b in range(0,B):
batches[b] = x[b:b+sz_batch]
return batches
def update():
if weights:
d_w
if biases:
# update rule for weights can be derived by treating them as
# weights on connections coming from units that always have value=1
d_b
if __name__ = '__main__':
# Generate random data to play with
random.seed(1)
x = np.rand((100,1))
|
yunque/PyML
|
pynn.py
|
Python
|
gpl-2.0
| 1,960
|
[
"NEURON"
] |
2227ada9aef7784cb1fe858f5af39671c0c24bdbd8273a4cc157ea879bdd5aae
|
import sys
import os
import copy
from subprocess import call
from rdkit import Chem
from rdkit.Chem import AllChem
import coot_git
import pyrogen_swig as pysw
import pyrogen_boost
import atom_types
from optparse import OptionParser
import tautomer
import urllib
from jay_util import *
global pyrogen_version
pyrogen_version = "0.0-pre"
global run_mogul
global smiles_dict
run_mogul = True
smiles_dict = False
def make_mogul_ins_file(mogul_ins_file_name, mogul_out_file_name, sdf_file_name):
f = open(mogul_ins_file_name, 'w')
if f:
f.write('mogul molecule file ')
f.write(sdf_file_name)
f.write('\n')
f.write('mogul output file ')
f.write(mogul_out_file_name)
f.write('\n')
f.write('mogul output distribution all on\n')
f.write('bond all\n')
f.write('angle all\n')
# f.write('torsion all\n')
# f.write('ring all\n')
f.write('config output format CSV\n')
f.write('config output items fragment_type atom_indices query_value nhits mean median sd z-score dmin\n')
f.write('config search all filter exclude_solvents\n')
f.write('config output invalid_fragments exclude\n')
f.close()
return f
# return True for good, False for bad/not-run
#
def execute_mogul(sdf_file_name, mogul_ins_file_name, mogul_out_file_name):
f = make_mogul_ins_file(mogul_ins_file_name, mogul_out_file_name, sdf_file_name)
if f:
# print 'now run mogul using ins file %s' % mogul_ins_file_name
if run_mogul:
state = call(['mogul', '-ins', mogul_ins_file_name])
return (state == 0)
else:
return False
else:
return False
def atom_name_from_atomic_number_and_count(element, count):
name = element
name += str(count)
return name
def add_atom_names(mol):
nz = {}
atom_names = []
for atom in mol.GetAtoms():
try:
n = atom.GetProp('name')
atom_names.append(n)
except KeyError:
z = atom.GetAtomicNum()
if z in nz:
nz[z] = nz[z] + 1
else:
nz[z] = 1;
ele = atom.GetSymbol().upper()
name = atom_name_from_atomic_number_and_count(ele, nz[z])
p_name = pad_atom_name(name, ele)
atom.SetProp("name", p_name)
atom_names.append(p_name)
return atom_names
def convert_to_coot_bond_type(rdkit_type):
out_type = 'single'
if (rdkit_type == Chem.rdchem.BondType.SINGLE):
out_type = 'single'
if (rdkit_type == Chem.rdchem.BondType.AROMATIC):
out_type = 'aromatic'
if (rdkit_type == Chem.rdchem.BondType.DOUBLE):
out_type = 'double'
if (rdkit_type == Chem.rdchem.BondType.TRIPLE):
out_type = 'triple'
if (rdkit_type == Chem.rdchem.BondType.ONEANDAHALF):
out_type = 'deloc'
return out_type
def pad_atom_name(name, element):
padded = name
if (len(element) == 1):
if (len(name) == 2):
padded = ' ' + name + ' '
if (len(name) == 3):
padded = ' ' + name
if (len(element) == 2):
if (len(name) == 2):
padded = name + ' '
if (len(name) == 3):
padded = name + ' '
return padded
def is_smiles_file(file_name):
bits = file_name.rsplit(".")
if len(bits) > 1:
return bits[1] == 'smi'
else:
return False
def is_comp_id(comp_id):
return len(comp_id) == 3
def is_mdl_file(file_name):
bits = file_name.rsplit(".")
if (len(bits) < 2):
return False
else:
idx = len(bits) - 1
if (bits[idx] == 'mol'):
return True
else:
if (bits[idx] == 'mdl'):
return True
else:
return False
# return the contents of file_name
def read_file(file_name):
f = open(file_name)
return f.read()
# return False or a file_name
#
def get_pdbe_cif_for_comp_id(comp_id):
try:
file_name = "PDBe-" + comp_id + ".cif"
url = 'ftp://ftp.ebi.ac.uk/pub/databases/msd/pdbechem/files/mmcif/' + comp_id + '.cif'
status = urllib.urlretrieve(url, file_name)
return file_name
except IOError as e:
print e
print "Failed: Can't ftp fr", url, "and write file", file_name
exit(2)
def make_restraints_for_bond_orders(mol):
restraints = {}
bond_list = []
for bond in mol.GetBonds():
type = bond.GetBondType()
coot_bond_type = convert_to_coot_bond_type(type)
at_1 = bond.GetBeginAtom()
at_2 = bond.GetEndAtom()
name_1 = at_1.GetProp('name')
name_2 = at_2.GetProp('name')
item = [name_1, name_2, coot_bond_type, 1.0, 1.0]
bond_list.append(item)
restraints['_chem_comp_bond'] = bond_list
restraints['_chem_comp'] = [mol.GetProp('comp_id'),
mol.GetProp('comp_id'),
mol.GetProp('name'),
'non-polymer',
mol.GetNumAtoms(),
mol.GetNumAtoms(),
'.']
return restraints
# return True if mogul is not run or mogul exe is in place.
# return False if mogul is expected but not found.
def test_for_mogul():
if run_mogul:
mogol_exe = which('mogul')
if (mogol_exe == None):
print "mogul not found in path"
return False
else:
return True
else:
return True # OK, really
# this can throw a TypeError
#
def get_smiles_from_comp_id(comp_id):
global smiles_dict
if (not smiles_dict):
read_smiles_tab('smiles.tab')
return smiles_dict[comp_id]
# return a dictionary or False (if the file does not exist)
# (can this go inside get_smiles_from_comp_id?)
#
def read_smiles_tab(file_name):
global smiles_dict
try:
smiles_dict = {}
f = open(file_name)
lines = f.readlines()
for line in lines:
bits = line.rstrip().rsplit()
smiles_dict[bits[0]] = bits[2]
return True
except IOError as e:
smiles_dict = True # we've tested for it
return False
# return a pair, the smiles string and the molecule name (which might be blank)
#
def get_smiles_from_file(file_name):
if not os.path.exists(file_name):
return False,False
else:
f = open(file_name)
smi_line = f.readline()
parts = smi_line.split()
return parts[0], ' '.join(parts[1:])
def make_picture(mol, conf_id, comp_id, output_postfix):
output_file_name = comp_id + "-" + output_postfix + '.png'
make_picture_to_file(mol, conf_id, output_file_name)
def make_picture_to_file(mol, conf_id, output_file_name):
try:
from rdkit.Chem import Draw
import Image
state = Draw.MolToFile(mol, size=(300,300), fileName=output_file_name, confId=conf_id)
# print 'INFO:: wrote PNG "' + output_file_name + '"'
# img = Draw.MolToImage(mol, fitImage=True, size=(900,900))
# img2 = img.resize((300, 300), Image.ANTIALIAS)
# img2.save(output_file_name + "resampled.png")
except ImportError as e:
print 'ImportError:', e
except ValueError as e:
print 'ValueError in make_picture():', e
def make_restraints_from_smiles(smiles_string, comp_id, compound_name, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name, quartet_planes, quartet_hydrogen_planes, use_mmff, match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_file_for_names_match):
if not test_for_mogul():
# return False
exit(1)
m = Chem.MolFromSmiles(smiles_string)
if compound_name:
m.SetProp('_Name', compound_name)
return make_restraints(m, comp_id, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name, quartet_planes, quartet_hydrogen_planes, use_mmff, match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_file_for_names_match)
# return the molecule and return value from make_restraints
#
def make_restraints_from_mdl(mol_file_name, comp_id, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name, quartet_planes, quartet_hydrogen_planes, use_mmff, match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_files_for_names_match):
if (not (test_for_mogul())):
# return False, False
exit(1)
if not os.path.exists(mol_file_name):
print "No such file:", mol_file_name
exit(1)
compound_name = '.'
m = Chem.MolFromMolFile(mol_file_name)
return m, make_restraints(m, comp_id, mogul_dir, name_stub, pdb_out_file_name, mmcif_dict_name,
quartet_planes, quartet_hydrogen_planes, use_mmff,
match_atom_names_to_dict_flag, comp_id_list_for_names_match,
dict_files_for_names_match)
# return a list of (mol, comp_id) pairs for every ligand in the cif
# file. Often only one of course.
#
def make_restraints_from_mmcif_dict(cif_file_name_in, comp_id, mogul_dir,
output_dir, output_postfix,
quartet_planes, quartet_hydrogen_planes, use_mmff,
pdb_out_file_name, mmcif_restraints_out_file_name):
if not test_for_mogul():
return [(None, None)]
if comp_id == "TRY_ALL_COMP_IDS":
types = pysw.types_from_mmcif_dictionary(cif_file_name_in)
l = []
for r_type in types:
file_name_stub = r_type + "-" + output_postfix
if options.output_dir != ".":
file_name_stub = os.path.join(options.output_dir, file_name_stub)
pdb_out_file_name_local = file_name_stub + ".pdb"
mmcif_restraints_out_file_name_local = file_name_stub + ".cif"
#
t_mol = make_restraints_from_mmcif_dict_single(cif_file_name_in, r_type, mogul_dir,
output_postfix,
quartet_planes,
quartet_hydrogen_planes, use_mmff,
pdb_out_file_name_local,
mmcif_restraints_out_file_name_local)
l.append((t_mol, r_type))
return l
else:
# just the one
m = make_restraints_from_mmcif_dict_single(cif_file_name_in, comp_id, mogul_dir, output_postfix,
quartet_planes, quartet_hydrogen_planes, use_mmff,
pdb_out_file_name, mmcif_restraints_out_file_name)
return [(m, comp_id)]
# return a mol, given a sensible comp_id.
#
# Return None on failure
#
def make_restraints_from_mmcif_dict_single(cif_file_name_in, comp_id, mogul_dir, output_postfix,
quartet_planes, quartet_hydrogen_planes, use_mmff,
pdb_out_file_name, mmcif_restraints_out_file_name):
# print 'in make_restraints_from_mmcif_dict_single() comp_id is ', comp_id
# print 'in make_restraints_from_mmcif_dict_single() cif_file_name_in is ', cif_file_name_in
if not test_for_mogul():
return [(None, None)]
mogul_file_name_stub = comp_id + '-' + output_postfix # file component of files within mogul_dir
m = pyrogen_boost.rdkit_mol_chem_comp_pdbx(cif_file_name_in, comp_id)
if False: # debugging
for atom in m.GetAtoms():
try:
name = atom.GetProp('name')
chir = atom.GetProp('_CIPCode')
print ' atom', atom, 'name', name, 'chir', chir
except KeyError as e:
print 'pyrogen.py:: atom', atom, " with name ", name, ' has no _CIPCode property'
pass
# maybe user didn't select the correct comp_id for the given dictionary mmcif
if m.GetNumAtoms() == 0:
print 'No atoms for comp_id', comp_id
return False
else :
name = ''
try:
name = m.GetProp('_Name')
except KeyError:
print 'caught KeyError in make_restraints_from_mmcif_dict_single() trying GetProp _Name'
return make_restraints(m, comp_id, mogul_dir, mogul_file_name_stub,
pdb_out_file_name, mmcif_restraints_out_file_name,
quartet_planes, quartet_hydrogen_planes, use_mmff, False, False, False)
def n_hydrogens(mol):
n_H = 0
for atom in mol.GetAtoms():
if atom.GetAtomicNum() == 1:
n_H += 1
return n_H
# return sane_H_mol
#
def make_restraints(m, comp_id, mogul_dir, mogul_file_name_stub, pdb_out_file_name, mmcif_dict_name,
quartet_planes, quartet_hydrogen_planes, use_mmff,
match_atom_names_to_dict_flag,
comp_id_list_for_names_match,
dict_files_for_names_match):
# test here (or in calling functions) if m is sane (i.e. is an rdkit molecule)
if not isinstance(m, Chem.rdchem.Mol):
print 'ERROR:: not a molecule'
return False
n_attempts = 20 * m.GetNumAtoms() # default is 10 * number of atoms.
# pH-dependent protonation or deprotonation
#
do_hydrogen_atoms_shift = True
try:
compound_name = m.GetProp('_Name');
except KeyError:
# this happens all the time when we start from a SMILES, users don't need to see it.
# print 'caught key error in trying to get _Name in make_restraints() for m'
compound_name = '.'
except AttributeError as e:
# Do we need to see this? Perhaps make_restraints() needs to return a status.
# print 'AttributeError: problem with molecule in make_restraints()', e, ' on object:', m
return
m_H = m
if n_hydrogens(m) == 0:
m_H = AllChem.AddHs(m)
if do_hydrogen_atoms_shift:
# simple sane pH H-exchanges
sane_H_mol = pyrogen_boost.hydrogen_transformations(m_H)
# print >>file('sane_H.mol','w+'),Chem.MolToMolBlock(sane_H_mol)
else:
sane_H_mol = m_H
# This makes UFF types, which can fail sometimes.
conf_id = AllChem.EmbedMolecule(sane_H_mol, maxAttempts=n_attempts)
if use_mmff:
AllChem.MMFFOptimizeMolecule(sane_H_mol, confId=conf_id)
if False: # debugging output
ba = pyrogen_boost.mmff_bonds_and_angles(sane_H_mol) # uses _forcefield_ of the molecule
n_bonds = ba.bonds_size()
if n_bonds > 0:
for i_bond in range(n_bonds):
bond = ba.get_bond(i_bond)
print bond.get_idx_1(), bond.get_idx_2(), bond.get_type(), \
bond.get_resting_bond_length(), bond.get_sigma()
n_angles = ba.angles_size()
if n_angles > 0:
for i_angle in range(n_angles):
angle = ba.get_angle(i_angle)
print angle.get_idx_1(), angle.get_idx_2(), angle.get_idx_3(), \
angle.get_resting_angle(), angle.get_sigma()
else:
AllChem.UFFOptimizeMolecule(sane_H_mol, confId=conf_id)
# AllChem.UFFOptimizeMolecule(sane_H_mol)
atom_names = add_atom_names(sane_H_mol)
all_set = atom_types.set_atom_types(sane_H_mol) # has deloc bonds now, potentially
# debug sane_H_mol
if True:
molblock = Chem.MolToMolBlock(sane_H_mol)
print >> file("sane_H_mol.mol",'w'), molblock
if (all_set != True):
return False
else:
sane_H_mol.SetProp('comp_id', comp_id)
sane_H_mol.SetProp('name', compound_name)
sd_local = mogul_file_name_stub + ".sdf"
sdf_file_name = os.path.join(mogul_dir, mogul_file_name_stub + '-mogul.sdf')
mogul_ins_file_name = os.path.join(mogul_dir, mogul_file_name_stub + '-mogul.ins')
mogul_out_file_name = os.path.join(mogul_dir, mogul_file_name_stub + '-mogul.out')
Chem.AllChem.ComputeGasteigerCharges(sane_H_mol)
moguled_mol = pyrogen_boost.mogulify(sane_H_mol) # Nitro bond orders (and other things?)
if not os.path.isdir(mogul_dir):
checked_mkdir(mogul_dir)
if os.path.isdir(mogul_dir):
mb = Chem.MolToMolBlock(moguled_mol)
print >> file(sdf_file_name,'w'), mb
else:
mb = Chem.MolToMolBlock(moguled_mol)
print >> file(sdf_file_name,'w'), mb
bor = make_restraints_for_bond_orders(sane_H_mol)
# print out the set types:
print_atom_props = False
if print_atom_props:
print '--- Atom Props ---'
for atom in sane_H_mol.GetAtoms():
charge = atom.GetProp('_GasteigerCharge') # string?
name = atom.GetProp('name')
try:
atom_type = atom.GetProp('atom_type')
is_aromatic = atom.GetIsAromatic()
hybrid = atom.GetHybridization()
f_charge = float(charge)
if print_atom_props:
print " atom: %s %s type: %s arom: %s hybrid: %s charge: %6.3f" % (name, atom.GetSymbol(),
atom_type.ljust(4),
str(is_aromatic).ljust(5),
str(hybrid).rjust(3),
f_charge)
except KeyError:
print "miss", name, atom.GetSymbol(), charge
#
replace_with_mmff_b_a_restraints = False
if use_mmff:
replace_with_mmff_b_a_restraints = True
# execute_mogul() tests if mogul is executable
#
mogul_state = execute_mogul(sdf_file_name, mogul_ins_file_name, mogul_out_file_name)
if mogul_state:
# Here we need to think about matching to reference
# dictionary of amino acids (for standard atom names).
# That function takes a dictionary and a mmdb::Residue.
# How does that fit in here?
#
restraints = pysw.mogul_out_to_mmcif_dict_by_mol(mogul_out_file_name, comp_id,
compound_name, sane_H_mol, bor,
mmcif_dict_name, # not used
quartet_planes,
quartet_hydrogen_planes,
replace_with_mmff_b_a_restraints)
# match_atom_names_to_dict_flag, comp_id_list_for_names_match, dict_file_for_names_match
if match_atom_names_to_dict_flag:
restraints = atom_match_dictionary(restraints, sane_H_mol,
comp_id_list_for_names_match,
dict_files_for_names_match)
pysw.write_restraints(restraints, mmcif_dict_name)
pysw.regularize_and_write_pdb(sane_H_mol, restraints, comp_id, pdb_out_file_name)
else:
# mogul failed or was not in the path:
if run_mogul == False:
# ... but that's OK if we told pyrogen to run without mogul
# sane_H_mol:
# print >>file('debug_sane_H.mol','w+'),Chem.MolToMolBlock(sane_H_mol)
restraints = pysw.mmcif_dict_from_mol(comp_id, compound_name, sane_H_mol,
mmcif_dict_name,
quartet_planes, quartet_hydrogen_planes,
replace_with_mmff_b_a_restraints)
if restraints == None:
print "No restraints"
return True # hacked in value
if match_atom_names_to_dict_flag:
restraints = atom_match_dictionary(restraints, sane_H_mol,
comp_id_list_for_names_match,
dict_files_for_names_match)
pysw.write_restraints(restraints, mmcif_dict_name)
pysw.write_pdb_from_mol(sane_H_mol, comp_id, pdb_out_file_name)
else:
# ... but not if we wanted to use mogul.
# (We get here if there is a licence error for mogul)
exit(1)
return sane_H_mol
def atom_match_dictionary(restraints, sane_H_mol, comp_id_list_for_names_match, dict_files_for_names_match):
template_comp_ids = ['CYS', 'ASP', 'GLU', 'HIS', 'ILE', 'LYS', 'LEU', 'MET',
'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL', 'TRP', 'TYR',
'G', 'C', 'GLC', 'MAN']
if isinstance(comp_id_list_for_names_match, basestring):
template_comp_ids = comp_id_list_for_names_match.split(',')
template_cif_dict_files_names = []
if isinstance(dict_files_for_names_match, basestring):
template_cif_dict_files_names = dict_files_for_names_match.split(',')
# don't use my set of comp_ids then
template_comp_ids = []
success,new_restraints,at_name_list = pysw.match_restraints_to_dictionaries(restraints,
template_comp_ids,
template_cif_dict_files_names)
if success:
n = len(sane_H_mol.GetAtoms())
if len(restraints['_chem_comp_atom']) == n:
restraints = new_restraints
for iat in range(n):
name = sane_H_mol.GetAtomWithIdx(iat).GetProp('name')
if name != restraints['_chem_comp_atom'][iat][0]:
# print " changing name from", name, "to", restraints['_chem_comp_atom'][iat][0]
sane_H_mol.GetAtomWithIdx(iat).SetProp('name', restraints['_chem_comp_atom'][iat][0]);
return restraints
def score_and_print_tautomers(mol, comp_id, output_postfix, do_drawings):
results = tautomer.enumerate_tautomers(mol)
for i in range(len(results)):
m = results[i]
s = Chem.MolToSmiles(m)
print "comp_id :", comp_id, ": SMILES", s, 'score:', tautomer.tautomer_score(m)
if do_drawings:
file_name = comp_id + '-tautomer-' + str(i)
file_name += '-' + options.output_postfix + '.png'
n = m.GetNumConformers()
conf_id = 0
if n == 0:
conf_id = AllChem.Compute2DCoords(m)
conf = m.GetConformer(conf_id)
if conf.Is3D():
mol_for_drawing = Chem.RemoveHs(m, implicitOnly=False)
conf2D_id = AllChem.Compute2DCoords(mol_for_drawing)
make_picture_to_file(mol_for_drawing, conf2D_id, file_name)
else:
make_picture_to_file(m, -1, file_name)
if __name__ == "__main__":
def checked_mkdir(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
else:
if os.path.isdir(dirname):
pass # this happens most of the time, I imagine
else:
print 'Stop:: File', dirname, 'exists but is not a directory'
def smiles_and_name_from(smi_raw):
extension = os.path.splitext(smi_raw)[1]
smiles_string = ''
name=''
if extension == '.smi' or extension == '.smiles':
if not os.path.exists(smi_raw):
print "File not found:", smi_raw
exit(1)
else:
smiles_string,name = get_smiles_from_file(smi_raw)
else:
smiles_string = smi_raw
return smiles_string,name
parser = OptionParser(usage='pyrogen [options] file-or-SMILES'+
'\n if file-or-SMILES has extension ".smi" or ".smiles" ' +
'then it is treated as a file')
parser.add_option("-c", "--mmcif", dest="mmcif_file_name",
help="Make restraints from input mmcif FILE", metavar="FILE")
parser.add_option("-m", "--mol", dest="sdf_file",
help="Make restraints from input sdf/mol FILE", metavar="FILE")
parser.add_option("-r", "--residue-type", dest="comp_id", default='default',
help="Create restraints for this type. Default is LIG")
parser.add_option("-4", "--quartet-planes", dest="quartet_planes",
default=False,
help="Use 4-atom plane restraints,\n " +
"forces --quartet-hydrogens", action="store_true")
parser.add_option("-H", "--quartet-hydrogens", dest="quartet_hydrogen_planes",
default=False,
help="Use 4-atom hydrogen plane restraints",
action="store_true")
parser.add_option("-n", "--no-mogul", dest="use_mogul",
default=True, action="store_false",
help='Don\'t run CSD Mogul to update bond and angle restraints')
parser.add_option("-N", '--name', dest='compound_name', default=False,
help='Compound name')
parser.add_option('-S', '--smiles', dest="show_smiles",
default=False, action="store_true", help="Write the SMILES for the input molecule")
parser.add_option("-t", "--tautomers", dest="show_tautomers",
default=False, action="store_true",
help='Show SMILES for tautomers, don\'t generate restraints')
parser.add_option("-T", '--tmp-directory', dest='mogul_dir',
help='Directory into which the tmp files (e.g. for mogul) are written',
default='pyrogen-mogul')
parser.add_option("-d", '--directory', dest='output_dir',
help='Directory into which the output files (e.g. mmCIF and PDB) are written',
default='.')
parser.add_option('-o', '--output-postfix', default='pyrogen',
dest='output_postfix',
help='string to add to output file names, default is "pyrogen"')
parser.add_option('-p', '--picture', dest='drawing',
help='Additionally output a chemical diagram PNG',
action='store_true', default=False)
parser.add_option('-v', '--version', dest='show_version', default=False,
action='store_true', help='Print version information')
parser.add_option('-M', '--MMFF', dest='use_mmff', default=False,
action='store_true', help='Use MMFF fallbacks for bonds and angles')
parser.add_option('-a', '--no-match-vs-reference-dictionaries', default=False,
action='store_true', dest='no_match_names_flag',
help="Don't match atom names vs. dictionary molecules (default False)")
parser.add_option('-R', '--reference-dictionary-files', dest='dict_files_for_names_match',
help='Try to match the atom names of the output molecule '+
'to this dictionary in these files (comma-separated list)', default=False)
parser.add_option('-C', '--reference-dictionary-comp-ids', dest='comp_id_list_for_names_match',
help='Try to match the atom names of the output molecule to these comp-ids' +
' (comma-separated list)',
default=False)
parser.add_option('-w', '--wwPDB', default=False, dest="wwPDB", action="store_true",
help='Fetch the wwPDB ligand definition and use that')
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="print less messages")
(options, args) = parser.parse_args()
# print 'DEBUG:: options:', options
if options.show_version:
print 'pyrogen-' + pyrogen_version, "revision", coot_git.revision_count()
comp_id = options.comp_id
if options.comp_id == 'default':
comp_id = 'LIG'
if options.mmcif_file_name != None:
if options.comp_id == 'default':
comp_id = 'TRY_ALL_COMP_IDS'
file_name_stub = comp_id + '-' + options.output_postfix
if options.output_dir != ".":
file_name_stub = os.path.join(options.output_dir, file_name_stub)
pdb_out_file_name = file_name_stub + '.pdb'
mmcif_restraints_out_file_name = file_name_stub + '.cif'
# this is a bit ugly, perhaps. this value is inspected inside
# the following functions
#
if options.use_mogul == False:
run_mogul = False
if run_mogul:
if len(options.mogul_dir) > 0:
if options.mogul_dir[0] == '-':
print 'Stop:: you probably didn\'t mean that you wanted',options.mogul_dir, 'as your tmp directory.'
exit(1)
checked_mkdir(options.mogul_dir)
if options.show_tautomers or options.show_smiles:
# ------------------------ Tautomers and SMILES ---------------------------------------------
mol = False
if len(args) > 0:
smi_raw = args[0]
smiles,compound_name = smiles_and_name_from(smi_raw)
mol = Chem.MolFromSmiles(smiles)
else:
if options.sdf_file != None:
mol = Chem.MolFromMolFile(options.sdf_file)
else:
if options.mmcif_file_name != None:
types = pysw.types_from_mmcif_dictionary(options.mmcif_file_name)
print '-- tautomer mode: mmcif file types:', types
for type in types:
mol_local = pyrogen_boost.rdkit_mol_chem_comp_pdbx(options.mmcif_file_name, type)
score_and_print_tautomers(mol_local, type, options.output_postfix, options.drawing)
if mol:
if options.show_tautomers:
score_and_print_tautomers(mol, comp_id, options.output_postfix, options.drawing)
if options.show_smiles:
s = Chem.MolToSmiles(mol);
print s
else:
# ------------------------ dict-build-mode ---------------------------------------------------
mmcif_file_name = options.mmcif_file_name
# shall we go get the dictionary?
if options.wwPDB:
mmcif_file_name = get_pdbe_cif_for_comp_id(comp_id)
if os.path.isfile(mmcif_file_name):
pass # good
else:
print "Missing downloaded file for comp-id:", comp_id
exit(2)
# JED mode for hydrogen planes
#
quartet_hydrogen_planes = options.quartet_hydrogen_planes
if options.quartet_planes:
quartet_hydrogen_planes = True
match_names_flag = True
if options.no_match_names_flag:
match_names_flag = False
if mmcif_file_name:
mol_pairs = make_restraints_from_mmcif_dict(mmcif_file_name,
comp_id,
options.mogul_dir,
options.output_dir,
options.output_postfix,
options.quartet_planes,
quartet_hydrogen_planes,
options.use_mmff,
pdb_out_file_name,
mmcif_restraints_out_file_name)
# this needs to be in a try block, I suppose, for example if the mmcif file
# does not exist.
for mol_info in mol_pairs:
(mol, comp_id) = mol_info
if not mol:
print 'No molecule'
else:
# Happy path
if options.drawing:
# make_picture() by default draws the first conformer in the given molecule.
# For mol, that is a 3D conformer. We want to draw a nice 2D diagram
#
mol_for_drawing = Chem.RemoveHs(mol, implicitOnly=False)
conf2D_id = AllChem.Compute2DCoords(mol_for_drawing)
make_picture(mol_for_drawing, conf2D_id, comp_id, options.output_postfix)
else:
if options.sdf_file != None:
(mol, results) = make_restraints_from_mdl(options.sdf_file, comp_id,
options.mogul_dir, file_name_stub,
pdb_out_file_name,
mmcif_restraints_out_file_name,
options.quartet_planes,
quartet_hydrogen_planes,
options.use_mmff,
match_names_flag,
options.comp_id_list_for_names_match,
options.dict_files_for_names_match)
if options.drawing:
make_picture(mol, -1, comp_id, options.output_postfix)
else:
if len(args) > 0:
smi_raw = args[0]
smiles,compound_name_from_file = smiles_and_name_from(smi_raw)
compound_name=False
if len(compound_name_from_file) > 0:
compound_name = compound_name_from_file
if isinstance(options.compound_name, basestring):
compound_name = options.compound_name
status = make_restraints_from_smiles(smiles, comp_id, compound_name,
options.mogul_dir, file_name_stub,
pdb_out_file_name,
mmcif_restraints_out_file_name,
options.quartet_planes,
quartet_hydrogen_planes,
options.use_mmff,
match_names_flag,
options.comp_id_list_for_names_match,
options.dict_files_for_names_match)
if options.drawing:
mol = Chem.MolFromSmiles(smiles)
make_picture(mol, -1, comp_id, options.output_postfix)
|
jlec/coot
|
pyrogen/pyrogen.py
|
Python
|
gpl-3.0
| 32,455
|
[
"RDKit"
] |
a2fc23d3105214ac3eb0fdcc5e4f44b2080d82244616ae876f973ae431edc1fe
|
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://svn.ecdf.ed.ac.uk/repo/inf/dnn_tts/
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import math
import ConfigParser
import os
import logging
import StringIO
import sys
import textwrap
import datetime
class configuration(object):
"""Configuration settings. Any user-specific values are read from an external file
and parsed by an instance of the built-in ConfigParser class"""
def __init__(self):
# doesn't do anything
pass
def configure(self, configFile=None, use_logging=True):
# get a logger
logger = logging.getLogger("configuration")
# this (and only this) logger needs to be configured immediately, otherwise it won't work
# we can't use the full user-supplied configuration mechanism in this particular case,
# because we haven't loaded it yet!
#
# so, just use simple console-only logging
logger.setLevel(logging.DEBUG) # this level is hardwired here - should change it to INFO
# add a handler & its formatter - will write only to console
ch = logging.StreamHandler()
logger.addHandler(ch)
formatter = logging.Formatter('%(asctime)s %(levelname)8s%(name)15s: %(message)s')
ch.setFormatter(formatter)
# first, set up some default configuration values
self.initial_configuration()
# next, load in any user-supplied configuration values
# that might over-ride the default values
self.user_configuration(configFile)
# now that we have loaded the user's configuration, we can load the
# separate config file for logging (the name of that file will be specified in the config file)
if use_logging:
self.logging_configuration()
# finally, set up all remaining configuration values
# that depend upon either default or user-supplied values
self.complete_configuration()
logger.debug('configuration completed')
def initial_configuration(self):
# to be called before loading any user specific values
# things to put here are
# 1. variables that the user cannot change
# 2. variables that need to be set before loading the user's config file
UTTID_REGEX = '(.*)\..*'
def user_configuration(self,configFile=None):
# get a logger
logger = logging.getLogger("configuration")
# load and parse the provided configFile, if provided
if not configFile:
logger.warn('no user configuration file provided; using only built-in default settings')
return
# load the config file
try:
configparser = ConfigParser.ConfigParser()
configparser.readfp(open(configFile))
logger.debug('successfully read and parsed user configuration file %s' % configFile)
except:
logger.fatal('error reading user configuration file %s' % configFile)
raise
#work_dir must be provided before initialising other directories
self.work_dir = None
if self.work_dir == None:
try:
self.work_dir = configparser.get('Paths', 'work')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
if self.work_dir == None:
logger.critical('Paths:work has no value!')
raise Exception
# look for those items that are user-configurable, and get their values
# sptk_bindir= ....
# a list instead of a dict because OrderedDict is not available until 2.7
# and I don't want to import theano here just for that one class
# each entry is a tuple of (variable name, default value, section in config file, option name in config file)
#
# the type of the default value is important and controls the type that the corresponding
# variable will have
#
# to set a default value of 'undefined' use an empty string
# or the special value 'impossible', as appropriate
#
impossible_int=int(-99999)
impossible_float=float(-99999.0)
user_options = [
('work_dir', self.work_dir, 'Paths','work'),
('data_dir', '', 'Paths','data'),
('plot_dir', '', 'Paths','plot'),
('plot', True, 'Utility', 'plot'),
('profile', False, 'Utility', 'profile'),
('file_id_scp' , os.path.join(self.work_dir, 'data/file_id_list.scp') , 'Paths', 'file_id_list'),
('test_id_scp' , os.path.join(self.work_dir, 'data/test_id_list.scp') , 'Paths', 'test_id_list'),
('GV_dir' , os.path.join(self.work_dir, 'data/GV' ) , 'Paths', 'GV_dir'),
('in_stepw_dir' , os.path.join(self.work_dir, 'data/stepw'), 'Paths', 'in_stepw_dir'),
('in_mgc_dir' , os.path.join(self.work_dir, 'data/mgc') , 'Paths', 'in_mgc_dir'),
('in_fft_dir' , os.path.join(self.work_dir, 'data/fft') , 'Paths', 'in_fft_dir'),
('in_samp_dir' , os.path.join(self.work_dir, 'data/samp') , 'Paths', 'in_samp_dir'),
('in_lf0_dir' , os.path.join(self.work_dir, 'data/lf0') , 'Paths', 'in_lf0_dir'),
('in_bap_dir' , os.path.join(self.work_dir, 'data/bap') , 'Paths', 'in_bap_dir'),
('in_sp_dir' , os.path.join(self.work_dir, 'data/sp' ) , 'Paths', 'in_sp_dir'),
('in_seglf0_dir', os.path.join(self.work_dir, 'data/lf03') , 'Paths', 'in_seglf0_dir'),
## for glottHMM
('in_F0_dir' , os.path.join(self.work_dir, 'data/F0') , 'Paths', 'in_F0_dir'),
('in_Gain_dir' , os.path.join(self.work_dir, 'data/Gain') , 'Paths', 'in_Gain_dir'),
('in_HNR_dir' , os.path.join(self.work_dir, 'data/HNR') , 'Paths', 'in_HNR_dir'),
('in_LSF_dir' , os.path.join(self.work_dir, 'data/LSF') , 'Paths', 'in_LSF_dir'),
('in_LSFsource_dir' , os.path.join(self.work_dir, 'data/LSFsource') , 'Paths', 'in_LSFsource_dir'),
## for joint duration
('in_seq_dur_dir' , os.path.join(self.work_dir, 'data/S2S_dur') , 'Paths', 'in_seq_dur_dir'),
('in_dur_dir' , os.path.join(self.work_dir, 'data/dur') , 'Paths', 'in_dur_dir'),
('nn_norm_temp_dir', os.path.join(self.work_dir, 'data/step_hidden9'), 'Paths', 'nn_norm_temp_dir'),
('process_labels_in_work_dir', False, 'Labels', 'process_labels_in_work_dir'),
('label_style' , 'HTS' , 'Labels', 'label_style'),
('label_type' , 'state_align' , 'Labels', 'label_type'),
('in_label_align_dir' , os.path.join(self.work_dir, 'data/label_state_align') , 'Labels', 'label_align'),
('question_file_name' , os.path.join(self.work_dir, 'data/questions.hed') , 'Labels', 'question_file_name'),
('silence_pattern' , ['*-#+*'] , 'Labels', 'silence_pattern'),
('subphone_feats' , 'full' , 'Labels', 'subphone_feats'),
('additional_features', {} , 'Labels', 'additional_features'),
('xpath_file_name', os.path.join(self.work_dir, 'data/xml_labels/xpaths.txt'), 'Labels', 'xpath_file_name'),
('label_config_file', 'configuration/examplelabelconfigfile.py', 'Labels', 'label_config'),
('add_frame_features', True, 'Labels', 'add_frame_features'),
('fill_missing_values', False, 'Labels', 'fill_missing_values'),
('xpath_label_align_dir', os.path.join(self.work_dir, 'data/label_state_align'), 'Labels', 'xpath_label_align'),
('enforce_silence', False, 'Labels', 'enforce_silence'),
('remove_silence_using_binary_labels', False, 'Labels', 'remove_silence_using_binary_labels'),
('precompile_xpaths', True, 'Labels', 'precompile_xpaths'),
('iterate_over_frames', True, 'Labels', 'iterate_over_frames'),
('appended_input_dim' , 0 , 'Labels' , 'appended_input_dim'),
('buffer_size', 200000, 'Data', 'buffer_size'),
('train_file_number', impossible_int, 'Data','train_file_number'),
('valid_file_number', impossible_int, 'Data','valid_file_number'),
('test_file_number' , impossible_int, 'Data','test_file_number'),
('log_path', os.path.join(self.work_dir, 'log'), 'Paths', 'log_path'),
('log_file', '', 'Paths','log_file'),
('log_config_file', 'configuration/exampleloggingconfigfile.conf', 'Paths', 'log_config_file'),
('sptk_bindir', 'tools/bin/SPTK-3.9', 'Paths','sptk'),
('straight_bindir', 'tools/bin/straight', 'Paths','straight'),
('world_bindir', 'tools/bin/WORLD', 'Paths','world'),
('network_type' , 'RNN' , 'Architecture', 'network_type'),
('model_type' , 'DNN' , 'Architecture', 'model_type'),
('hidden_layer_type' , ['TANH', 'TANH', 'TANH', 'TANH', 'TANH', 'TANH'] , 'Architecture', 'hidden_layer_type'),
('output_layer_type' , 'LINEAR' , 'Architecture', 'output_layer_type'),
('sequential_training' , False , 'Architecture', 'sequential_training'),
('dropout_rate' , 0.0 , 'Architecture', 'dropout_rate'),
## some config variables for token projection DNN
('scheme' , 'stagewise' , 'Architecture', 'scheme'),
('index_to_project' , 0 , 'Architecture', 'index_to_project'),
('projection_insize' , 10000 , 'Architecture', 'projection_insize'),
('projection_outsize' , 10 , 'Architecture', 'projection_outsize'),
('initial_projection_distrib' , 'gaussian' , 'Architecture', 'initial_projection_distrib'),
('projection_weights_output_dir' , 'some_path', 'Architecture', 'projection_weights_output_dir'),
('layers_with_projection_input' , [0], 'Architecture', 'layers_with_projection_input'),
('projection_learning_rate_scaling' , 1.0, 'Architecture', 'projection_learning_rate_scaling'),
('learning_rate' , 0.0002 , 'Architecture', 'learning_rate'),
('l2_reg' , 0.00001 , 'Architecture', 'L2_regularization'),
('l1_reg' , 0.0 , 'Architecture', 'L1_regularization'),
('batch_size' , 16 , 'Architecture', 'batch_size'),
('training_epochs' , 25 , 'Architecture', 'training_epochs'),
('hidden_activation' , 'tanh' , 'Architecture', 'hidden_activation'),
('output_activation' , 'linear' , 'Architecture', 'output_activation'),
('do_pretraining' , False , 'Architecture', 'do_pretraining'),
('pretraining_epochs' , 10 , 'Architecture', 'pretraining_epochs'),
('pretraining_lr' , 0.0001 , 'Architecture', 'pretraining_lr'),
('hidden_layer_size' , [1024, 1024, 1024, 1024, 1024, 1024], 'Architecture', 'hidden_layer_size'),
('private_hidden_sizes' , [1024] , 'Architecture', 'private_hidden_sizes'),
('stream_weights' , [1.0] , 'Architecture', 'stream_weights'),
('private_l2_reg' , 0.00001 , 'Architecture', 'private_l2_reg'),
('warmup_epoch' , 5 , 'Architecture', 'warmup_epoch'),
('warmup_momentum' , 0.3 , 'Architecture', 'warmup_momentum'),
('momentum' , 0.9 , 'Architecture', 'momentum'),
('warmup_epoch' , 5 , 'Architecture', 'warmup_epoch'),
('mdn_component', 1 , 'Architecture', 'mdn_component'),
('var_floor', 0.01 , 'Architecture', 'var_floor'),
('beta_opt', False , 'Architecture', 'beta_opt'),
('eff_sample_size', 0.8 , 'Architecture', 'eff_sample_size'),
('mean_log_det', -100.0 , 'Architecture', 'mean_log_det'),
('start_from_trained_model', '_' , 'Architecture', 'start_from_trained_model'),
('use_rprop', 0 , 'Architecture', 'use_rprop'),
('mgc_dim' ,60 ,'Outputs','mgc'),
('fft_dim' ,512 ,'Outputs','fft'),
('samp_dim' ,180 ,'Outputs','samp'),
('dmgc_dim',60 * 3 ,'Outputs','dmgc'),
('vuv_dim' ,1 ,'Outputs','vuv'),
('lf0_dim' ,1 ,'Outputs','lf0'),
('dlf0_dim',1 * 3 ,'Outputs','dlf0'),
('bap_dim' ,25 ,'Outputs','bap'),
('dbap_dim',25 * 3 ,'Outputs','dbap'),
('cmp_dim' ,(60 * 3) + 1 + (1 * 3) + (25 * 3) ,'Outputs','cmp'),
('stepw_dim' , 55, 'Outputs', 'stepw_dim'),
('temp_sp_dim' , 1025, 'Outputs', 'temp_sp_dim'),
('seglf0_dim' , 7 , 'Outputs', 'seglf0_dim'),
('delta_win' , [-0.5, 0.0, 0.5] , 'Outputs', 'delta_win'),
('acc_win' , [1.0, -2.0, 1.0] , 'Outputs', 'acc_win'),
('do_MLPG' , True , 'Outputs', 'do_MLPG'),
## for GlottHMM
('F0_dim' ,1 ,'Outputs','F0'),
('dF0_dim',1 * 3 ,'Outputs','dF0'),
('Gain_dim' ,1 ,'Outputs','Gain'),
('dGain_dim',1 * 3 ,'Outputs','dGain'),
('HNR_dim' ,5 ,'Outputs','HNR'),
('dHNR_dim',5 * 3 ,'Outputs','dHNR'),
('LSF_dim' ,30 ,'Outputs','LSF'),
('dLSF_dim',30 * 3 ,'Outputs','dLSF'),
('LSFsource_dim' ,10 ,'Outputs','LSFsource'),
('dLSFsource_dim',10 * 3 ,'Outputs','dLSFsource'),
## for joint dur:-
('seq_dur_dim' ,1 ,'Outputs','seq_dur'),
('remove_silence_from_dur' , True , 'Outputs', 'remove_silence_from_dur'),
('dur_dim' ,5 ,'Outputs','dur'),
('dur_feature_type' , 'numerical' , 'Outputs', 'dur_feature_type'),
('output_feature_normalisation', 'MVN', 'Outputs', 'output_feature_normalisation'),
('multistream_switch' , False , 'Streams', 'multistream_switch'),
# ('use_private_hidden' , False, 'Streams', 'use_private_hidden'),
('output_features' , ['mgc','lf0', 'vuv', 'bap'], 'Streams', 'output_features'),
('gen_wav_features', ['mgc', 'bap', 'lf0'] , 'Streams', 'gen_wav_features'),
# ('stream_mgc_hidden_size' , 192 , 'Streams', 'stream_mgc_hidden_size'),
# ('stream_lf0_hidden_size' , 32 , 'Streams', 'stream_lf0_hidden_size'),
# ('stream_vuv_hidden_size' , 32 , 'Streams', 'stream_vuv_hidden_size'),
# ('stream_bap_hidden_size' , 128 , 'Streams', 'stream_bap_hidden_size'),
# ('stream_stepw_hidden_size' , 64 , 'Streams', 'stream_stepw_hidden_size'),
# ('stream_seglf0_hidden_size', 64 , 'Streams', 'stream_seglf0_hidden_size'),
# ('stream_cmp_hidden_size' , 256 , 'Streams', 'stream_cmp_hidden_size'), #when multi-stream is disabled, use this to indicate the final hidden layer size
#if this is also not provided, use the top common hidden layer size
## Glott HMM -- dummy values -- haven't used private streams:--
# ('stream_F0_hidden_size' , 192 , 'Streams', 'stream_F0_hidden_size'),
# ('stream_Gain_hidden_size' , 192 , 'Streams', 'stream_Gain_hidden_size'),
# ('stream_HNR_hidden_size' , 192 , 'Streams', 'stream_HNR_hidden_size'),
# ('stream_LSF_hidden_size' , 192 , 'Streams', 'stream_LSF_hidden_size'),
# ('stream_LSFsource_hidden_size' , 192 , 'Streams', 'stream_LSFsource_hidden_size'),
## joint dur -- dummy values -- haven't used private streams:--
# ('stream_dur_hidden_size' , 192 , 'Streams', 'stream_dur_hidden_size'),
# ('stream_sp_hidden_size' , 1024, 'Streams', 'stream_sp_hidden_size'),
# ('stream_weight_mgc' , 1.0, 'Streams', 'stream_weight_mgc'),
# ('stream_weight_lf0' , 3.0, 'Streams', 'stream_weight_lf0'),
# ('stream_weight_vuv' , 1.0, 'Streams', 'stream_weight_vuv'),
# ('stream_weight_bap' , 1.0, 'Streams', 'stream_weight_bap'),
# ('stream_weight_stepw' , 0.0, 'Streams', 'stream_weight_stepw'),
# ('stream_weight_seglf0', 1.0, 'Streams', 'stream_weight_seglf0'),
# ('stream_weight_sp' , 1.0, 'Streams', 'stream_weight_sp'),
## Glott HMM - unused?
# ('stream_weight_F0' , 1.0, 'Streams', 'stream_weight_F0'),
# ('stream_weight_Gain' , 1.0, 'Streams', 'stream_weight_Gain'),
# ('stream_weight_HNR' , 1.0, 'Streams', 'stream_weight_HNR'),
# ('stream_weight_LSF' , 1.0, 'Streams', 'stream_weight_LSF'),
# ('stream_weight_LSFsource' , 1.0, 'Streams', 'stream_weight_LSFsource'),
## dur - unused?
# ('stream_weight_dur' , 1.0, 'Streams', 'stream_weight_dur'),
# ('stream_lf0_lr' , 0.5, 'Streams', 'stream_lf0_lr'),
# ('stream_vuv_lr' , 0.5, 'Streams', 'stream_vuv_lr'),
('vocoder_type' ,'STRAIGHT' ,'Waveform' , 'vocoder_type'),
('sr' ,48000 ,'Waveform' , 'samplerate'),
('fl' ,4096 ,'Waveform' , 'framelength'),
('shift' ,1000 * 240 / 48000 ,'Waveform' , 'frameshift'),
('sp_dim' ,(4096 / 2) + 1 ,'Waveform' , 'sp_dim'),
# fw_alpha: 'Bark' or 'ERB' allowing deduction of alpha, or explicity float value (e.g. 0.77)
('fw_alpha' ,0.77 ,'Waveform' , 'fw_alpha'),
('pf_coef' ,1.4 ,'Waveform' , 'postfilter_coef'),
('co_coef' ,2047 ,'Waveform' , 'minimum_phase_order'),
('use_cep_ap' ,True ,'Waveform' , 'use_cep_ap'),
('do_post_filtering',True ,'Waveform' , 'do_post_filtering'),
('apply_GV' ,False ,'Waveform' , 'apply_GV'),
('test_synth_dir' ,'test_synthesis/wav' ,'Waveform' , 'test_synth_dir'),
('DurationModel' , False, 'Processes', 'DurationModel'),
('AcousticModel' , False, 'Processes', 'AcousticModel'),
('GenTestList' , False, 'Processes', 'GenTestList'),
('NORMLAB' , False, 'Processes', 'NORMLAB'),
('MAKEDUR' , False, 'Processes', 'MAKEDUR'),
('MAKECMP' , False, 'Processes', 'MAKECMP'),
('NORMCMP' , False, 'Processes', 'NORMCMP'),
('TRAINDNN' , False, 'Processes', 'TRAINDNN'),
('DNNGEN' , False, 'Processes', 'DNNGEN'),
('GENWAV' , False, 'Processes', 'GENWAV'),
('CALMCD' , False, 'Processes', 'CALMCD'),
('NORMSTEP' , False, 'Processes', 'NORMSTEP'),
('GENBNFEA' , False, 'Processes', 'GENBNFEA'),
('mgc_ext' , '.mgc' , 'Extensions', 'mgc_ext'),
('bap_ext' , '.bap' , 'Extensions', 'bap_ext'),
('lf0_ext' , '.lf0' , 'Extensions', 'lf0_ext'),
('cmp_ext' , '.cmp' , 'Extensions', 'cmp_ext'),
('lab_ext' , '.lab' , 'Extensions', 'lab_ext'),
('utt_ext' , '.utt' , 'Extensions', 'utt_ext'),
('stepw_ext' , '.stepw' , 'Extensions', 'stepw_ext'),
('sp_ext' , '.sp' , 'Extensions', 'sp_ext'),
##Ashish
('fft_ext' , '.fft' , 'Extensions', 'fft_ext'),
('samp_ext' , '.samp' , 'Extensions', 'samp_ext'),
## GlottHMM
('F0_ext' , '.F0' , 'Extensions', 'F0_ext'),
('Gain_ext' , '.Gain' , 'Extensions', 'Gain_ext'),
('HNR_ext' , '.HNR' , 'Extensions', 'HNR_ext'),
('LSF_ext' , '.LSF' , 'Extensions', 'LSF_ext'),
('LSFsource_ext' , '.LSFsource' , 'Extensions', 'LSFsource_ext'),
## joint dur
('dur_ext' , '.dur' , 'Extensions', 'dur_ext'),
]
# this uses exec(...) which is potentially dangerous since arbitrary code could be executed
for (variable,default,section,option) in user_options:
value=None
try:
# first, look for a user-set value for this variable in the config file
value = configparser.get(section,option)
user_or_default='user'
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
# use default value, if there is one
if (default == None) or \
(default == '') or \
((type(default) == int) and (default == impossible_int)) or \
((type(default) == float) and (default == impossible_float)) :
logger.critical('%20s has no value!' % (section+":"+option) )
raise Exception
else:
value = default
user_or_default='default'
if type(default) == str:
exec('self.%s = "%s"' % (variable,value))
elif type(default) == int:
exec('self.%s = int(%s)' % (variable,value))
elif type(default) == float:
exec('self.%s = float(%s)' % (variable,value))
elif type(default) == bool:
exec('self.%s = bool(%s)' % (variable,value))
elif type(default) == list:
exec('self.%s = list(%s)' % (variable,value))
elif type(default) == dict:
exec('self.%s = dict(%s)' % (variable,value))
else:
logger.critical('Variable %s has default value of unsupported type %s',variable,type(default))
raise Exception('Internal error in configuration settings: unsupported default type')
logger.info('%20s has %7s value %s' % (section+":"+option,user_or_default,value) )
self.combined_feature_name = ''
for feature_name in self.output_features:
self.combined_feature_name += '_'
self.combined_feature_name += feature_name
self.combined_model_name = self.model_type
for hidden_type in self.hidden_layer_type:
self.combined_model_name += '_' + hidden_type
self.combined_model_name += '_' + self.output_layer_type
def complete_configuration(self):
# to be called after reading any user-specific settings
# because the values set here depend on those user-specific settings
print "Configurations ?? Ashish"
# get a logger
logger = logging.getLogger("configuration")
# tools
self.SPTK = {
'X2X' : os.path.join(self.sptk_bindir,'x2x'),
'MERGE' : os.path.join(self.sptk_bindir,'merge'),
'BCP' : os.path.join(self.sptk_bindir,'bcp'),
'MLPG' : os.path.join(self.sptk_bindir,'mlpg'),
'MGC2SP' : os.path.join(self.sptk_bindir,'mgc2sp'),
'VSUM' : os.path.join(self.sptk_bindir,'vsum'),
'VSTAT' : os.path.join(self.sptk_bindir,'vstat'),
'SOPR' : os.path.join(self.sptk_bindir,'sopr'),
'VOPR' : os.path.join(self.sptk_bindir,'vopr'),
'FREQT' : os.path.join(self.sptk_bindir,'freqt'),
'C2ACR' : os.path.join(self.sptk_bindir,'c2acr'),
'MC2B' : os.path.join(self.sptk_bindir,'mc2b'),
'B2MC' : os.path.join(self.sptk_bindir,'b2mc')
}
# self.NND = {
# 'FEATN' : os.path.join(self.nndata_bindir,'FeatureNormalization'),
# 'LF0IP' : os.path.join(self.nndata_bindir,'F0Interpolation'),
# 'F0VUV' : os.path.join(self.nndata_bindir,'F0VUVComposition')
# }
self.STRAIGHT = {
'SYNTHESIS_FFT' : os.path.join(self.straight_bindir, 'synthesis_fft'),
'BNDAP2AP' : os.path.join(self.straight_bindir, 'bndap2ap'),
}
self.WORLD = {
'SYNTHESIS' : os.path.join(self.world_bindir, 'synth'),
'ANALYSIS' : os.path.join(self.world_bindir, 'analysis'),
}
# STILL TO DO - test that all the above tools exist and are executable
###dimensions for the output features
### key name must follow the self.in_dimension_dict.
### If do not want to include dynamic feature, just use the same dimension as that self.in_dimension_dict
### if lf0 is one of the acoustic featues, the out_dimension_dict must have an additional 'vuv' key
### a bit confusing
###need to control the order of the key?
self.in_dir_dict = {} ##dimensions for each raw acoustic (output of NN) feature
self.out_dimension_dict = {}
self.in_dimension_dict = {}
self.private_hidden_sizes = []
self.stream_weights = []
logger.debug('setting up output features')
self.cmp_dim = 0
for feature_name in self.output_features:
logger.debug(' %s' % feature_name)
in_dimension = 0
out_dimension = 0
in_directory = ''
# current_stream_hidden_size = 0
# current_stream_weight = 0.0
# stream_lr_ratio = 0.0
if feature_name == 'mgc':
in_dimension = self.mgc_dim
out_dimension = self.dmgc_dim
in_directory = self.in_mgc_dir
elif feature_name == 'fft':
in_dimension = self.fft_dim
out_dimension = self.fft_dim
in_directory = self.in_fft_dir
elif feature_name == 'samp':
in_dimension = self.samp_dim
out_dimension = self.samp_dim
in_directory = self.in_samp_dir
# current_stream_hidden_size = self.stream_mgc_hidden_size
# current_stream_weight = self.stream_weight_mgc
elif feature_name == 'bap':
in_dimension = self.bap_dim
out_dimension = self.dbap_dim
in_directory = self.in_bap_dir
# current_stream_hidden_size = self.stream_bap_hidden_size
# current_stream_weight = self.stream_weight_bap
elif feature_name == 'lf0':
in_dimension = self.lf0_dim
out_dimension = self.dlf0_dim
in_directory = self.in_lf0_dir
# current_stream_hidden_size = self.stream_lf0_hidden_size
# current_stream_weight = self.stream_weight_lf0
elif feature_name == 'vuv':
out_dimension = 1
# current_stream_hidden_size = self.stream_vuv_hidden_size
# current_stream_weight = self.stream_weight_vuv
elif feature_name == 'stepw':
in_dimension = self.stepw_dim
out_dimension = self.stepw_dim
in_directory = self.in_stepw_dir
# current_stream_hidden_size = self.stream_stepw_hidden_size
# current_stream_weight = self.stream_weight_stepw
elif feature_name == 'sp':
in_dimension = self.sp_dim
out_dimension = self.sp_dim
in_directory = self.in_sp_dir
# current_stream_hidden_size = self.stream_sp_hidden_size
# current_stream_weight = self.stream_weight_sp
elif feature_name == 'seglf0':
in_dimension = self.seglf0_dim
out_dimension = self.seglf0_dim
in_directory = self.in_seglf0_dir
# current_stream_hidden_size = self.stream_seglf0_hidden_size
# current_stream_weight = self.stream_weight_seglf0
## for GlottHMM (start)
elif feature_name == 'F0':
in_dimension = self.F0_dim
out_dimension = self.dF0_dim
in_directory = self.in_F0_dir
# current_stream_hidden_size = self.stream_F0_hidden_size
# current_stream_weight = self.stream_weight_F0
elif feature_name == 'Gain':
in_dimension = self.Gain_dim
out_dimension = self.dGain_dim
in_directory = self.in_Gain_dir
# current_stream_hidden_size = self.stream_Gain_hidden_size
# current_stream_weight = self.stream_weight_Gain
elif feature_name == 'HNR':
in_dimension = self.HNR_dim
out_dimension = self.dHNR_dim
in_directory = self.in_HNR_dir
# current_stream_hidden_size = self.stream_HNR_hidden_size
# current_stream_weight = self.stream_weight_HNR
elif feature_name == 'LSF':
in_dimension = self.LSF_dim
out_dimension = self.dLSF_dim
in_directory = self.in_LSF_dir
# current_stream_hidden_size = self.stream_LSF_hidden_size
# current_stream_weight = self.stream_weight_LSF
elif feature_name == 'LSFsource':
in_dimension = self.LSFsource_dim
out_dimension = self.dLSFsource_dim
in_directory = self.in_LSFsource_dir
# current_stream_hidden_size = self.stream_LSFsource_hidden_size
# current_stream_weight = self.stream_weight_LSFsource
## for GlottHMM (end)
## for joint dur (start)
elif feature_name == 'dur':
in_dimension = self.dur_dim
out_dimension = self.dur_dim
in_directory = self.in_dur_dir
# current_stream_hidden_size = self.stream_dur_hidden_size
# current_stream_weight = self.stream_weight_dur
## for joint dur (end)
else:
logger.critical('%s feature is not supported right now. Please change the configuration.py to support it' %(feature_name))
raise
logger.info(' in_dimension: %d' % in_dimension)
logger.info(' out_dimension : %d' % out_dimension)
logger.info(' in_directory : %s' % in_directory)
# logger.info(' current_stream_hidden_size: %d' % current_stream_hidden_size)
# logger.info(' current_stream_weight: %d' % current_stream_weight)
if in_dimension > 0:
self.in_dimension_dict[feature_name] = in_dimension
if in_directory == '':
logger.critical('please provide the path for %s feature' %(feature_name))
raise
if out_dimension < in_dimension:
logger.critical('the dimensionality setting for %s feature is not correct!' %(feature_name))
raise
self.in_dir_dict[feature_name] = in_directory
if out_dimension > 0:
self.out_dimension_dict[feature_name] = out_dimension
# if (current_stream_hidden_size <= 0 or current_stream_weight <= 0.0) and self.multistream_switch:
# logger.critical('the hidden layer size or stream weight is not corrected setted for %s feature' %(feature_name))
# raise
# if self.multistream_switch:
# self.private_hidden_sizes.append(current_stream_hidden_size)
# self.stream_weights.append(current_stream_weight)
self.cmp_dim += out_dimension
# if not self.multistream_switch:
# self.private_hidden_sizes = []
# if self.stream_cmp_hidden_size > 0:
# self.private_hidden_sizes.append(self.stream_cmp_hidden_size)
# else:
# self.private_hidden_sizes.append(self.hidden_layer_size[-1]) ## use the same number of hidden layers if multi-stream is not supported
# self.stream_weights = []
# self.stream_weights.append(1.0)
self.stream_lr_weights = []
self.multistream_outs = []
if self.multistream_switch:
for feature_name in self.out_dimension_dict.keys():
self.multistream_outs.append(self.out_dimension_dict[feature_name])
# stream_lr_ratio = 0.5
# if feature_name == 'lf0':
# stream_lr_ratio = self.stream_lf0_lr
# if feature_name == 'vuv':
# stream_lr_ratio = self.stream_vuv_lr
# self.stream_lr_weights.append(stream_lr_ratio)
else:
### the new cmp is not the one for HTS, it includes all the features, such as that for main tasks and that for additional tasks
self.multistream_outs.append(self.cmp_dim)
# self.stream_lr_weights.append(0.5)
logger.info('multistream dimensions: %s' %(self.multistream_outs))
# to check whether all the input and output features' file extensions are here
self.file_extension_dict = {}
self.file_extension_dict['mgc'] = self.mgc_ext
self.file_extension_dict['samp'] = self.samp_ext
self.file_extension_dict['fft'] = self.fft_ext
self.file_extension_dict['lf0'] = self.lf0_ext
self.file_extension_dict['bap'] = self.bap_ext
self.file_extension_dict['stepw'] = self.stepw_ext
self.file_extension_dict['cmp'] = self.cmp_ext
self.file_extension_dict['seglf0'] = self.lf0_ext
## gHMM:
self.file_extension_dict['F0'] = self.F0_ext
self.file_extension_dict['Gain'] = self.Gain_ext
self.file_extension_dict['HNR'] = self.HNR_ext
self.file_extension_dict['LSF'] = self.LSF_ext
self.file_extension_dict['LSFsource'] = self.LSFsource_ext
## joint dur
self.file_extension_dict['dur'] = self.dur_ext
## hyper parameters for DNN. need to be setted by the user, as they depend on the architecture
self.hyper_params = { 'learning_rate' : '0.0002', ###
'l2_reg' : '0.00001',
'l1_reg' : '0.0',
'batch_size' : '16',
'training_epochs' : '25',
'early_stop_epochs' : '5',
'hidden_activation' : 'tanh',
'output_activation' : 'linear',
'do_pretraining' : False,
'pretraining_epochs' : '10',
'pretraining_lr' : '0.0001'}
self.hyper_params['warmup_momentum'] = self.warmup_momentum
self.hyper_params['momentum'] = self.momentum
self.hyper_params['warmup_epoch'] = self.warmup_epoch
self.hyper_params['learning_rate'] = self.learning_rate
self.hyper_params['l2_reg'] = self.l2_reg
self.hyper_params['l1_reg'] = self.l1_reg
self.hyper_params['batch_size'] = self.batch_size
self.hyper_params['training_epochs'] = self.training_epochs
self.hyper_params['hidden_activation'] = self.hidden_activation
self.hyper_params['output_activation'] = self.output_activation
self.hyper_params['do_pretraining'] = self.do_pretraining
self.hyper_params['pretraining_epochs'] = self.pretraining_epochs
self.hyper_params['pretraining_lr'] = self.pretraining_lr
self.hyper_params['hidden_layer_size'] = self.hidden_layer_size
self.hyper_params['warmup_epoch'] = self.warmup_epoch
self.hyper_params['use_rprop'] = self.use_rprop
# self.hyper_params['private_hidden_sizes'] = self.private_hidden_sizes
# self.hyper_params['stream_weights'] = self.stream_weights
# self.hyper_params['private_l2_reg'] = self.private_l2_reg
# self.hyper_params['stream_lr_weights'] = self.stream_lr_weights
# self.hyper_params['use_private_hidden'] = self.use_private_hidden
self.hyper_params['model_type'] = self.model_type
self.hyper_params['hidden_layer_type'] = self.hidden_layer_type
self.hyper_params['index_to_project'] = self.index_to_project
self.hyper_params['projection_insize'] = self.projection_insize
self.hyper_params['projection_outsize'] = self.projection_outsize
self.hyper_params['initial_projection_distrib'] = self.initial_projection_distrib
self.hyper_params['layers_with_projection_input'] = self.layers_with_projection_input
self.hyper_params['projection_learning_rate_scaling'] = self.projection_learning_rate_scaling
self.hyper_params['sequential_training'] = self.sequential_training
self.hyper_params['dropout_rate'] = self.dropout_rate
for hidden_type in self.hidden_layer_type:
if 'LSTM' in hidden_type or 'RNN' in hidden_type or 'GRU' in hidden_type:
self.hyper_params['sequential_training'] = self.sequential_training
#To be recorded in the logging file for reference
for param_name in self.hyper_params.keys():
logger.info('%s : %s' %(param_name, str(self.hyper_params[param_name])))
# input files
# set up the label processing
# currently must be one of two styles
if self.label_style == 'HTS':
# xpath_file_name is now obsolete - to remove
self.xpath_file_name=None
elif self.label_style == 'HTS_duration':
self.xpath_file_name=None
elif self.label_style == 'composed':
self.question_file_name=None
else:
logger.critical('unsupported label style requested: %s' % self.label_style)
raise Exception
def logging_configuration(self):
# get a logger
logger = logging.getLogger("configuration")
# logging configuration, see here for format description
# https://docs.python.org/2/library/logging.config.html#logging-config-fileformat
# what we really want to do is this dicitonary-based configuration, but it's only available from Python 2.7 onwards
# logging.config.dictConfig(cfg.logging_configuration)
# so we will settle for this file-based configuration procedure instead
try:
# open the logging configuration file
fp = open(self.log_config_file,'r')
logger.debug("loading logging configuration from %s" % self.log_config_file)
# load the logging configuration file into a string
config_string = fp.read()
fp.close()
except ValueError:
# this means that cfg.log_config_file does not exist and that no default was provided
# NOTE: currently this will never run
logging.warn('no logging configuration file provided - using default (console only, DEBUG level)')
# set up a default level and default handlers
# first, get the root logger - all other loggers will inherit its configuration
rootogger = logging.getLogger("")
# default logging level is DEBUG (a highly-verbose level)
rootlogger.setLevel(logging.DEBUG)
# add a handler to write to console
ch = logging.StreamHandler()
rootlogger.addHandler(ch)
# and a formatter
formatter = logging.Formatter('%(asctime)s %(levelname)8s%(name)15s: %(message)s')
ch.setFormatter(formatter)
except IOError:
# this means that open(...) threw an error
logger.critical('could not load logging configuration file %s' % self.log_config_file)
raise
else:
# inject the config lines for the file handler, now that we know the name of the file it will write to
if not os.path.exists(self.log_path):
os.makedirs(self.log_path, 0755)
log_file_name = '%s_%s_%d_%d_%d_%d_%f_%s.log' %(self.combined_model_name, self.combined_feature_name, self.train_file_number,
self.cmp_dim, len(self.hidden_layer_size),
self.hidden_layer_size[-1], self.learning_rate,
datetime.datetime.now().strftime("%I_%M%p_%B_%d_%Y"))
self.log_file = os.path.join(self.log_path, log_file_name)
to_inject="""
[handler_file]
class=FileHandler
formatter=file
args=('"""+self.log_file+"""', 'w')
"""
# config file format doesn't allow leading white space on lines, so remove it with dedent
config_string = config_string + textwrap.dedent(to_inject)
try:
# pass that string as a filehandle
fh = StringIO.StringIO(config_string)
logging.config.fileConfig(fh)
fh.close()
logger.info("logging is now fully configured")
except IOError:
logger.critical('could not configure logging: perhaps log file path is wrong?')
sys.exit(1)
|
ashmanmode/TTSDNNRepo
|
src/configuration/configuration.py
|
Python
|
apache-2.0
| 45,317
|
[
"Gaussian"
] |
fbab3f5873e246926585aefc57802c8d5f78ec4348c5b51e3ba865bf4a9077d3
|
# ===============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of eos.
#
# eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with eos. If not, see <http://www.gnu.org/licenses/>.
# ===============================================================================
import datetime
import time
from copy import deepcopy
from itertools import chain
from math import log, sqrt
from logbook import Logger
from sqlalchemy.orm import reconstructor, validates
import eos.db
from eos import capSim
from eos.calc import calculateLockTime, calculateMultiplier
from eos.const import CalcType, FitSystemSecurity, FittingHardpoint, FittingModuleState, FittingSlot, ImplantLocation
from eos.effectHandlerHelpers import (
HandledBoosterList, HandledDroneCargoList, HandledImplantList,
HandledModuleList, HandledProjectedDroneList, HandledProjectedModList)
from eos.saveddata.character import Character
from eos.saveddata.citadel import Citadel
from eos.saveddata.damagePattern import DamagePattern
from eos.saveddata.module import Module
from eos.saveddata.ship import Ship
from eos.saveddata.targetProfile import TargetProfile
from eos.utils.stats import DmgTypes, RRTypes
pyfalog = Logger(__name__)
class FitLite:
def __init__(self, id=None, name=None, shipID=None, shipName=None, shipNameShort=None):
self.ID = id
self.name = name
self.shipID = shipID
self.shipName = shipName
self.shipNameShort = shipNameShort
def __repr__(self):
return 'FitLite(ID={})'.format(self.ID)
class Fit:
"""Represents a fitting, with modules, ship, implants, etc."""
PEAK_RECHARGE = 0.25
def __init__(self, ship=None, name=""):
"""Initialize a fit from the program"""
self.__ship = None
self.__mode = None
# use @mode.setter's to set __attr and IDs. This will set mode as well
self.ship = ship
if self.ship:
self.ship.owner = self
self.__modules = HandledModuleList()
self.__drones = HandledDroneCargoList()
self.__fighters = HandledDroneCargoList()
self.__cargo = HandledDroneCargoList()
self.__implants = HandledImplantList()
self.__boosters = HandledBoosterList()
# self.__projectedFits = {}
self.__projectedModules = HandledProjectedModList()
self.__projectedDrones = HandledProjectedDroneList()
self.__projectedFighters = HandledProjectedDroneList()
self.__character = None
self.__owner = None
self.projected = False
self.name = name
self.timestamp = time.time()
self.created = None
self.modified = None
self.modeID = None
self.build()
@reconstructor
def init(self):
"""Initialize a fit from the database and validate"""
self.__ship = None
self.__mode = None
if self.shipID:
item = eos.db.getItem(self.shipID)
if item is None:
pyfalog.error("Item (id: {0}) does not exist", self.shipID)
return
try:
try:
self.__ship = Ship(item, self)
except ValueError:
self.__ship = Citadel(item, self)
# @todo extra attributes is now useless, however it set to be
# the same as ship attributes for ease (so we don't have to
# change all instances in source). Remove this at some point
self.extraAttributes = self.__ship.itemModifiedAttributes
except ValueError:
pyfalog.error("Item (id: {0}) is not a Ship", self.shipID)
return
if self.modeID and self.__ship:
item = eos.db.getItem(self.modeID)
# Don't need to verify if it's a proper item, as validateModeItem assures this
self.__mode = self.ship.validateModeItem(item, owner=self)
else:
self.__mode = self.ship.validateModeItem(None, owner=self)
self.build()
def build(self):
self.__extraDrains = []
self.__ehp = None
self.__weaponDpsMap = {}
self.__weaponVolleyMap = {}
self.__remoteRepMap = {}
self.__minerYield = None
self.__droneDps = None
self.__droneVolley = None
self.__droneYield = None
self.__sustainableTank = None
self.__effectiveSustainableTank = None
self.__effectiveTank = None
self.__calculated = False
self.__capStable = None
self.__capState = None
self.__capUsed = None
self.__capRecharge = None
self.__savedCapSimData = {}
self.__calculatedTargets = []
self.factorReload = False
self.boostsFits = set()
self.gangBoosts = None
self.ecmProjectedStr = 1
self.commandBonuses = {}
def clearFactorReloadDependentData(self):
# Here we clear all data known to rely on cycle parameters
# (which, in turn, relies on factor reload flag)
self.__weaponDpsMap.clear()
self.__droneDps = None
self.__remoteRepMap.clear()
self.__capStable = None
self.__capState = None
self.__capUsed = None
self.__capRecharge = None
self.__savedCapSimData.clear()
# Ancillary tank modules affect this
self.__sustainableTank = None
self.__effectiveSustainableTank = None
@property
def targetProfile(self):
if self.__userTargetProfile is not None:
return self.__userTargetProfile
if self.__builtinTargetProfileID is not None:
return TargetProfile.getBuiltinById(self.__builtinTargetProfileID)
return None
@targetProfile.setter
def targetProfile(self, targetProfile):
if targetProfile is None:
self.__userTargetProfile = None
self.__builtinTargetProfileID = None
elif targetProfile.builtin:
self.__userTargetProfile = None
self.__builtinTargetProfileID = targetProfile.ID
else:
self.__userTargetProfile = targetProfile
self.__builtinTargetProfileID = None
self.__weaponDpsMap = {}
self.__weaponVolleyMap = {}
self.__droneDps = None
self.__droneVolley = None
@property
def damagePattern(self):
if self.__userDamagePattern is not None:
return self.__userDamagePattern
if self.__builtinDamagePatternID is not None:
pattern = DamagePattern.getBuiltinById(self.__builtinDamagePatternID)
if pattern is not None:
return pattern
return DamagePattern.getDefaultBuiltin()
@damagePattern.setter
def damagePattern(self, damagePattern):
if damagePattern is None:
self.__userDamagePattern = None
self.__builtinDamagePatternID = None
elif damagePattern.builtin:
self.__userDamagePattern = None
self.__builtinDamagePatternID = damagePattern.ID
else:
self.__userDamagePattern = damagePattern
self.__builtinDamagePatternID = None
self.__ehp = None
self.__effectiveTank = None
@property
def isInvalid(self):
return self.__ship is None
@property
def mode(self):
return self.__mode
@mode.setter
def mode(self, mode):
if self.__mode is not None:
self.__mode.owner = None
self.__mode = mode
self.modeID = mode.item.ID if mode is not None else None
if mode is not None:
mode.owner = self
@property
def modifiedCoalesce(self):
"""
This is a property that should get whichever date is available for the fit. @todo: migrate old timestamp data
and ensure created / modified are set in database to get rid of this
"""
return self.modified or self.created or datetime.datetime.fromtimestamp(self.timestamp)
@property
def character(self):
return self.__character if self.__character is not None else Character.getAll0()
@character.setter
def character(self, char):
self.__character = char
@property
def calculated(self):
return self.__calculated
@calculated.setter
def calculated(self, bool):
# todo: brief explaination hwo this works
self.__calculated = bool
@property
def ship(self):
return self.__ship
@ship.setter
def ship(self, ship):
if self.__ship is not None:
self.__ship.owner = None
self.__ship = ship
self.shipID = ship.item.ID if ship is not None else None
if ship is not None:
ship.owner = self
# set mode of new ship
self.mode = self.ship.validateModeItem(None, owner=self) if ship is not None else None
# set fit attributes the same as ship
self.extraAttributes = self.ship.itemModifiedAttributes
@property
def isStructure(self):
return isinstance(self.ship, Citadel)
@property
def drones(self):
return self.__drones
@property
def fighters(self):
return self.__fighters
@property
def cargo(self):
return self.__cargo
@property
def modules(self):
return self.__modules
@property
def implants(self):
return self.__implants
@property
def boosters(self):
return self.__boosters
@property
def projectedModules(self):
return self.__projectedModules
@property
def projectedFits(self):
# only in extreme edge cases will the fit be invalid, but to be sure do
# not return them.
return [fit for fit in list(self.projectedFitDict.values()) if not fit.isInvalid]
@property
def commandFits(self):
return [fit for fit in list(self.commandFitDict.values()) if not fit.isInvalid]
def getProjectionInfo(self, fitID):
return self.projectedOnto.get(fitID, None)
def getCommandInfo(self, fitID):
return self.boostedOnto.get(fitID, None)
@property
def projectedDrones(self):
return self.__projectedDrones
@property
def projectedFighters(self):
return self.__projectedFighters
def getWeaponDps(self, spoolOptions=None):
if spoolOptions not in self.__weaponDpsMap:
self.calculateWeaponDmgStats(spoolOptions)
return self.__weaponDpsMap[spoolOptions]
def getWeaponVolley(self, spoolOptions=None):
if spoolOptions not in self.__weaponVolleyMap:
self.calculateWeaponDmgStats(spoolOptions)
return self.__weaponVolleyMap[spoolOptions]
def getDroneDps(self):
if self.__droneDps is None:
self.calculateDroneDmgStats()
return self.__droneDps
def getDroneVolley(self):
if self.__droneVolley is None:
self.calculateDroneDmgStats()
return self.__droneVolley
def getTotalDps(self, spoolOptions=None):
return self.getDroneDps() + self.getWeaponDps(spoolOptions=spoolOptions)
def getTotalVolley(self, spoolOptions=None):
return self.getDroneVolley() + self.getWeaponVolley(spoolOptions=spoolOptions)
@property
def minerYield(self):
if self.__minerYield is None:
self.calculateMiningStats()
return self.__minerYield
@property
def droneYield(self):
if self.__droneYield is None:
self.calculateMiningStats()
return self.__droneYield
@property
def totalYield(self):
return self.droneYield + self.minerYield
@property
def maxTargets(self):
return min(self.extraAttributes["maxTargetsLockedFromSkills"],
self.ship.getModifiedItemAttr("maxLockedTargets"))
@property
def maxTargetRange(self):
return self.ship.getModifiedItemAttr("maxTargetRange")
@property
def scanStrength(self):
return max([self.ship.getModifiedItemAttr("scan%sStrength" % scanType)
for scanType in ("Magnetometric", "Ladar", "Radar", "Gravimetric")])
@property
def scanType(self):
maxStr = -1
type = None
for scanType in ("Magnetometric", "Ladar", "Radar", "Gravimetric"):
currStr = self.ship.getModifiedItemAttr("scan%sStrength" % scanType)
if currStr > maxStr:
maxStr = currStr
type = scanType
elif currStr == maxStr:
type = "Multispectral"
return type
@property
def jamChance(self):
return (1 - self.ecmProjectedStr) * 100
@property
def maxSpeed(self):
speedLimit = self.ship.getModifiedItemAttr("speedLimit")
if speedLimit and self.ship.getModifiedItemAttr("maxVelocity") > speedLimit:
return speedLimit
return self.ship.getModifiedItemAttr("maxVelocity")
@property
def alignTime(self):
agility = self.ship.getModifiedItemAttr("agility") or 0
mass = self.ship.getModifiedItemAttr("mass")
return -log(0.25) * agility * mass / 1000000
@property
def implantSource(self):
return self.implantLocation
@implantSource.setter
def implantSource(self, source):
self.implantLocation = source
@property
def appliedImplants(self):
if self.implantLocation == ImplantLocation.CHARACTER:
return self.character.implants
else:
return self.implants
@validates("ID", "ownerID", "shipID")
def validator(self, key, val):
map = {
"ID" : lambda _val: isinstance(_val, int),
"ownerID": lambda _val: isinstance(_val, int) or _val is None,
"shipID" : lambda _val: isinstance(_val, int) or _val is None
}
if not map[key](val):
raise ValueError(str(val) + " is not a valid value for " + key)
else:
return val
def canFit(self, item):
# Whereas Module.fits() deals with current state of the fit in order to determine if somethign fits (for example maxGroupFitted which can be modified by effects),
# this function should be used against Items to see if the item is even allowed on the fit with rules that don't change
fitsOnType = set()
fitsOnGroup = set()
shipType = item.attributes.get("fitsToShipType", None)
if shipType is not None:
fitsOnType.add(shipType.value)
fitsOnType.update([item.attributes[attr].value for attr in item.attributes if attr.startswith("canFitShipType")])
fitsOnGroup.update([item.attributes[attr].value for attr in item.attributes if attr.startswith("canFitShipGroup")])
if (len(fitsOnGroup) > 0 or len(fitsOnType) > 0) \
and self.ship.item.group.ID not in fitsOnGroup \
and self.ship.item.ID not in fitsOnType:
return False
# Citadel modules are now under a new category, so we can check this to ensure only structure modules can fit on a citadel
if isinstance(self.ship, Citadel) is not item.isStandup:
return False
return True
def clear(self, projected=False, command=False):
self.__effectiveTank = None
self.__weaponDpsMap = {}
self.__weaponVolleyMap = {}
self.__remoteRepMap = {}
self.__minerYield = None
self.__effectiveSustainableTank = None
self.__sustainableTank = None
self.__droneDps = None
self.__droneVolley = None
self.__droneYield = None
self.__ehp = None
self.__calculated = False
self.__capStable = None
self.__capState = None
self.__capUsed = None
self.__capRecharge = None
self.__savedCapSimData.clear()
self.ecmProjectedStr = 1
# self.commandBonuses = {}
del self.__calculatedTargets[:]
del self.__extraDrains[:]
if self.ship:
self.ship.clear()
c = chain(
self.modules,
self.drones,
self.fighters,
self.boosters,
self.implants,
self.projectedDrones,
self.projectedModules,
self.projectedFighters,
(self.character, self.extraAttributes),
)
for stuff in c:
if stuff is not None and stuff != self:
stuff.clear()
# If this is the active fit that we are clearing, not a projected fit,
# then this will run and clear the projected ships and flag the next
# iteration to skip this part to prevent recursion.
# if not projected:
# for stuff in self.projectedFits:
# if stuff is not None and stuff != self:
# stuff.clear(projected=True)
#
# if not command:
# for stuff in self.commandFits:
# if stuff is not None and stuff != self:
# stuff.clear(command=True)
# Methods to register and get the thing currently affecting the fit,
# so we can correctly map "Affected By"
def register(self, currModifier, origin=None):
self.__modifier = currModifier
self.__origin = origin
if hasattr(currModifier, "itemModifiedAttributes"):
if hasattr(currModifier.itemModifiedAttributes, "fit"):
currModifier.itemModifiedAttributes.fit = origin or self
if hasattr(currModifier, "chargeModifiedAttributes"):
if hasattr(currModifier.chargeModifiedAttributes, "fit"):
currModifier.chargeModifiedAttributes.fit = origin or self
def getModifier(self):
return self.__modifier
def getOrigin(self):
return self.__origin
def addCommandBonus(self, warfareBuffID, value, module, effect, runTime="normal"):
# oh fuck this is so janky
# @todo should we pass in min/max to this function, or is abs okay?
# (abs is old method, ccp now provides the aggregate function in their data)
if warfareBuffID not in self.commandBonuses or abs(self.commandBonuses[warfareBuffID][1]) < abs(value):
self.commandBonuses[warfareBuffID] = (runTime, value, module, effect)
def __runCommandBoosts(self, runTime="normal"):
pyfalog.debug("Applying gang boosts for {0}", repr(self))
for warfareBuffID in list(self.commandBonuses.keys()):
# Unpack all data required to run effect properly
effect_runTime, value, thing, effect = self.commandBonuses[warfareBuffID]
if runTime != effect_runTime:
continue
# This should always be a gang effect, otherwise it wouldn't be added to commandBonuses
if effect.isType("gang"):
self.register(thing)
if warfareBuffID == 10: # Shield Burst: Shield Harmonizing: Shield Resistance
for damageType in ("Em", "Explosive", "Thermal", "Kinetic"):
self.ship.boostItemAttr("shield%sDamageResonance" % damageType, value, stackingPenalties=True)
if warfareBuffID == 11: # Shield Burst: Active Shielding: Repair Duration/Capacitor
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Shield Operation") or mod.item.requiresSkill(
"Shield Emission Systems"), "capacitorNeed", value)
self.modules.filteredItemBoost(
lambda mod: mod.item.requiresSkill("Shield Operation") or mod.item.requiresSkill(
"Shield Emission Systems"), "duration", value)
if warfareBuffID == 12: # Shield Burst: Shield Extension: Shield HP
self.ship.boostItemAttr("shieldCapacity", value, stackingPenalties=True)
if warfareBuffID == 13: # Armor Burst: Armor Energizing: Armor Resistance
for damageType in ("Em", "Thermal", "Explosive", "Kinetic"):
self.ship.boostItemAttr("armor%sDamageResonance" % damageType, value, stackingPenalties=True)
if warfareBuffID == 14: # Armor Burst: Rapid Repair: Repair Duration/Capacitor
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Remote Armor Repair Systems") or
mod.item.requiresSkill("Repair Systems"),
"capacitorNeed", value)
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Remote Armor Repair Systems") or
mod.item.requiresSkill("Repair Systems"),
"duration", value)
if warfareBuffID == 15: # Armor Burst: Armor Reinforcement: Armor HP
self.ship.boostItemAttr("armorHP", value, stackingPenalties=True)
if warfareBuffID == 16: # Information Burst: Sensor Optimization: Scan Resolution
self.ship.boostItemAttr("scanResolution", value, stackingPenalties=True)
if warfareBuffID == 17: # Information Burst: Electronic Superiority: EWAR Range and Strength
groups = ("ECM", "Sensor Dampener", "Weapon Disruptor", "Target Painter")
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups, "maxRange", value,
stackingPenalties=True)
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups,
"falloffEffectiveness", value, stackingPenalties=True)
for scanType in ("Magnetometric", "Radar", "Ladar", "Gravimetric"):
self.modules.filteredItemBoost(lambda mod: mod.item.group.name == "ECM",
"scan%sStrengthBonus" % scanType, value,
stackingPenalties=True)
for attr in ("missileVelocityBonus", "explosionDelayBonus", "aoeVelocityBonus", "falloffBonus",
"maxRangeBonus", "aoeCloudSizeBonus", "trackingSpeedBonus"):
self.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Weapon Disruptor",
attr, value)
for attr in ("maxTargetRangeBonus", "scanResolutionBonus"):
self.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Sensor Dampener",
attr, value)
self.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Target Painter",
"signatureRadiusBonus", value, stackingPenalties=True)
if warfareBuffID == 18: # Information Burst: Electronic Hardening: Scan Strength
for scanType in ("Gravimetric", "Radar", "Ladar", "Magnetometric"):
self.ship.boostItemAttr("scan%sStrength" % scanType, value, stackingPenalties=True)
if warfareBuffID == 19: # Information Burst: Electronic Hardening: RSD/RWD Resistance
self.ship.boostItemAttr("sensorDampenerResistance", value)
self.ship.boostItemAttr("weaponDisruptionResistance", value)
if warfareBuffID == 20: # Skirmish Burst: Evasive Maneuvers: Signature Radius
self.ship.boostItemAttr("signatureRadius", value, stackingPenalties=True)
if warfareBuffID == 21: # Skirmish Burst: Interdiction Maneuvers: Tackle Range
groups = ("Stasis Web", "Warp Scrambler")
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups, "maxRange", value,
stackingPenalties=True)
if warfareBuffID == 22: # Skirmish Burst: Rapid Deployment: AB/MWD Speed Increase
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Afterburner") or
mod.item.requiresSkill("High Speed Maneuvering"),
"speedFactor", value, stackingPenalties=True)
if warfareBuffID == 23: # Mining Burst: Mining Laser Field Enhancement: Mining/Survey Range
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Mining") or
mod.item.requiresSkill("Ice Harvesting") or
mod.item.requiresSkill("Gas Cloud Harvesting"),
"maxRange", value, stackingPenalties=True)
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("CPU Management"),
"surveyScanRange", value, stackingPenalties=True)
if warfareBuffID == 24: # Mining Burst: Mining Laser Optimization: Mining Capacitor/Duration
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Mining") or
mod.item.requiresSkill("Ice Harvesting") or
mod.item.requiresSkill("Gas Cloud Harvesting"),
"capacitorNeed", value, stackingPenalties=True)
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Mining") or
mod.item.requiresSkill("Ice Harvesting") or
mod.item.requiresSkill("Gas Cloud Harvesting"),
"duration", value, stackingPenalties=True)
if warfareBuffID == 25: # Mining Burst: Mining Equipment Preservation: Crystal Volatility
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Mining"),
"crystalVolatilityChance", value, stackingPenalties=True)
if warfareBuffID == 26: # Information Burst: Sensor Optimization: Targeting Range
self.ship.boostItemAttr("maxTargetRange", value, stackingPenalties=True)
if warfareBuffID == 60: # Skirmish Burst: Evasive Maneuvers: Agility
self.ship.boostItemAttr("agility", value, stackingPenalties=True)
# Titan effects
if warfareBuffID == 39: # Avatar Effect Generator : Capacitor Recharge bonus
self.ship.boostItemAttr("rechargeRate", value, stackingPenalties=True)
if warfareBuffID == 40: # Avatar Effect Generator : Kinetic resistance bonus
for attr in ("armorKineticDamageResonance", "shieldKineticDamageResonance", "kineticDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 41: # Avatar Effect Generator : EM resistance penalty
for attr in ("armorEmDamageResonance", "shieldEmDamageResonance", "emDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 42: # Erebus Effect Generator : Armor HP bonus
self.ship.boostItemAttr("armorHP", value, stackingPenalties=True)
if warfareBuffID == 43: # Erebus Effect Generator : Explosive resistance bonus
for attr in ("armorExplosiveDamageResonance", "shieldExplosiveDamageResonance", "explosiveDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 44: # Erebus Effect Generator : Thermal resistance penalty
for attr in ("armorThermalDamageResonance", "shieldThermalDamageResonance", "thermalDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 45: # Ragnarok Effect Generator : Signature Radius bonus
self.ship.boostItemAttr("signatureRadius", value, stackingPenalties=True)
if warfareBuffID == 46: # Ragnarok Effect Generator : Thermal resistance bonus
for attr in ("armorThermalDamageResonance", "shieldThermalDamageResonance", "thermalDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 47: # Ragnarok Effect Generator : Explosive resistance penaly
for attr in ("armorExplosiveDamageResonance", "shieldExplosiveDamageResonance", "explosiveDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 48: # Leviathan Effect Generator : Shield HP bonus
self.ship.boostItemAttr("shieldCapacity", value, stackingPenalties=True)
if warfareBuffID == 49: # Leviathan Effect Generator : EM resistance bonus
for attr in ("armorEmDamageResonance", "shieldEmDamageResonance", "emDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 50: # Leviathan Effect Generator : Kinetic resistance penalty
for attr in ("armorKineticDamageResonance", "shieldKineticDamageResonance", "kineticDamageResonance"):
self.ship.boostItemAttr(attr, value, stackingPenalties=True)
if warfareBuffID == 51: # Avatar Effect Generator : Velocity penalty
self.ship.boostItemAttr("maxVelocity", value, stackingPenalties=True)
if warfareBuffID == 52: # Erebus Effect Generator : Shield RR penalty
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Shield Emission Systems"), "shieldBonus", value, stackingPenalties=True)
if warfareBuffID == 53: # Leviathan Effect Generator : Armor RR penalty
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Remote Armor Repair Systems"),
"armorDamageAmount", value, stackingPenalties=True)
if warfareBuffID == 54: # Ragnarok Effect Generator : Laser and Hybrid Optimal penalty
groups = ("Energy Weapon", "Hybrid Weapon")
self.modules.filteredItemBoost(lambda mod: mod.item.group.name in groups, "maxRange", value, stackingPenalties=True)
# Localized environment effects
if warfareBuffID == 79: # AOE_Beacon_bioluminescence_cloud
self.ship.boostItemAttr("signatureRadius", value, stackingPenalties=True)
if warfareBuffID == 80: # AOE_Beacon_caustic_cloud_local_repair
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Repair Systems"),
"armorDamageAmount", value, stackingPenalties=True)
if warfareBuffID == 81: # AOE_Beacon_caustic_cloud_remote_repair
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Remote Armor Repair Systems"),
"armorDamageAmount", value, stackingPenalties=True)
if warfareBuffID == 88: # AOE_Beacon_filament_cloud_shield_booster_shield_bonus
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Shield Operation"),
"shieldBonus", value, stackingPenalties=True)
if warfareBuffID == 89: # AOE_Beacon_filament_cloud_shield_booster_duration
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Shield Operation"),
"duration", value, stackingPenalties=True)
# Abyssal Weather Effects
if warfareBuffID == 90: # Weather_electric_storm_EM_resistance_penalty
for tankType in ("shield", "armor"):
self.ship.boostItemAttr("{}EmDamageResonance".format(tankType), value)
self.ship.boostItemAttr("emDamageResonance", value) # for hull
if warfareBuffID == 92: # Weather_electric_storm_capacitor_recharge_bonus
self.ship.boostItemAttr("rechargeRate", value, stackingPenalties=True)
if warfareBuffID == 93: # Weather_xenon_gas_explosive_resistance_penalty
for tankType in ("shield", "armor"):
self.ship.boostItemAttr("{}ExplosiveDamageResonance".format(tankType), value)
self.ship.boostItemAttr("explosiveDamageResonance", value) # for hull
if warfareBuffID == 94: # Weather_xenon_gas_shield_hp_bonus
self.ship.boostItemAttr("shieldCapacity", value) # for hull
if warfareBuffID == 95: # Weather_infernal_thermal_resistance_penalty
for tankType in ("shield", "armor"):
self.ship.boostItemAttr("{}ThermalDamageResonance".format(tankType), value)
self.ship.boostItemAttr("thermalDamageResonance", value) # for hull
if warfareBuffID == 96: # Weather_infernal_armor_hp_bonus
self.ship.boostItemAttr("armorHP", value) # for hull
if warfareBuffID == 97: # Weather_darkness_turret_range_penalty
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Gunnery"),
"maxRange", value, stackingPenalties=True)
self.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Gunnery"),
"falloff", value, stackingPenalties=True)
if warfareBuffID == 98: # Weather_darkness_velocity_bonus
self.ship.boostItemAttr("maxVelocity", value)
if warfareBuffID == 99: # Weather_caustic_toxin_kinetic_resistance_penalty
for tankType in ("shield", "armor"):
self.ship.boostItemAttr("{}KineticDamageResonance".format(tankType), value)
self.ship.boostItemAttr("kineticDamageResonance", value) # for hull
if warfareBuffID == 100: # Weather_caustic_toxin_scan_resolution_bonus
self.ship.boostItemAttr("scanResolution", value, stackingPenalties=True)
del self.commandBonuses[warfareBuffID]
def __resetDependentCalcs(self):
self.calculated = False
for value in list(self.projectedOnto.values()):
if value.victim_fit: # removing a self-projected fit causes victim fit to be None. @todo: look into why. :3
value.victim_fit.calculated = False
def calculateModifiedAttributes(self, targetFit=None, type=CalcType.LOCAL):
"""
The fit calculation function. It should be noted that this is a recursive function - if the local fit has
projected fits, this function will be called for those projected fits to be calculated.
Args:
targetFit:
If this is set, signals that we are currently calculating a remote fit (projected or command) that
should apply it's remote effects to the targetFit. If None, signals that we are currently calcing the
local fit
type:
The type of calculation our current iteration is in. This helps us determine the interactions between
fits that rely on others for proper calculations
"""
pyfalog.info("Starting fit calculation on: {0}, calc: {1}", repr(self), CalcType(type).name)
# If we are projecting this fit onto another one, collect the projection info for later use
# We also deal with self-projection here by setting self as a copy (to get a new fit object) to apply onto original fit
# First and foremost, if we're looking at a local calc, reset the calculated state of fits that this fit affects
# Thankfully, due to the way projection mechanics currently work, we don't have to traverse down a projection
# tree to (resetting the first degree of projection will suffice)
if targetFit is None:
# This resets all fits that local projects onto, allowing them to recalc when loaded
self.__resetDependentCalcs()
# For fits that are under local's Command, we do the same thing
for value in list(self.boostedOnto.values()):
# apparently this is a thing that happens when removing a command fit from a fit and then switching to
# that command fit. Same as projected clears, figure out why.
if value.boosted_fit:
value.boosted_fit.__resetDependentCalcs()
if targetFit and type == CalcType.PROJECTED:
pyfalog.debug("Calculating projections from {0} to target {1}", repr(self), repr(targetFit))
projectionInfo = self.getProjectionInfo(targetFit.ID)
# Start applying any command fits that we may have.
# We run the command calculations first so that they can calculate fully and store the command effects on the
# target fit to be used later on in the calculation. This does not apply when we're already calculating a
# command fit.
if type != CalcType.COMMAND and self.commandFits and not self.__calculated:
for fit in self.commandFits:
commandInfo = fit.getCommandInfo(self.ID)
# Continue loop if we're trying to apply ourselves or if this fit isn't active
if not commandInfo.active or self == commandInfo.booster_fit:
continue
commandInfo.booster_fit.calculateModifiedAttributes(self, CalcType.COMMAND)
# If we're not explicitly asked to project fit onto something,
# set self as target fit
if targetFit is None:
targetFit = self
# If fit is calculated and we have nothing to do here, get out
# A note on why we only do this for local fits. There may be
# gains that we can do here after some evaluation, but right
# now we need the projected and command fits to continue in
# this function even if they are already calculated, since it
# is during those calculations that they apply their effect
# to the target fits. todo: We could probably skip local fit
# calculations if calculated, and instead to projections and
# command stuffs. ninja edit: this is probably already being
# done with the calculated conditional in the calc loop
if self.__calculated and type == CalcType.LOCAL:
pyfalog.debug("Fit has already been calculated and is local, returning: {0}", self)
return
if not self.__calculated:
pyfalog.info("Fit is not yet calculated; will be running local calcs for {}".format(repr(self)))
self.clear()
# Loop through our run times here. These determine which effects are run in which order.
for runTime in ("early", "normal", "late"):
# pyfalog.debug("Run time: {0}", runTime)
# Items that are unrestricted. These items are run on the local fit
# first and then projected onto the target fit it one is designated
u = [
(self.character, self.ship),
self.drones,
self.fighters,
self.boosters,
self.appliedImplants,
self.modules
] if not self.isStructure else [
# Ensure a restricted set for citadels
(self.character, self.ship),
self.fighters,
self.modules
]
# Items that are restricted. These items are only run on the local
# fit. They are NOT projected onto the target fit. # See issue 354
r = [(self.mode,), self.projectedDrones, self.projectedFighters, self.projectedModules]
# chain unrestricted and restricted into one iterable
c = chain.from_iterable(u + r)
for item in c:
# Registering the item about to affect the fit allows us to
# track "Affected By" relations correctly
if item is not None:
# apply effects locally if this is first time running them on fit
if not self.__calculated:
self.register(item)
item.calculateModifiedAttributes(self, runTime, False)
# Run command effects against target fit. We only have to worry about modules
if type == CalcType.COMMAND and item in self.modules:
# Apply the gang boosts to target fit
# targetFit.register(item, origin=self)
item.calculateModifiedAttributes(targetFit, runTime, False, True)
# pyfalog.debug("Command Bonuses: {}".format(self.commandBonuses))
# If we are calculating our local or projected fit and have command bonuses, apply them
if type != CalcType.COMMAND and self.commandBonuses:
self.__runCommandBoosts(runTime)
# Run projection effects against target fit. Projection effects have been broken out of the main loop,
# see GH issue #1081
if type == CalcType.PROJECTED and projectionInfo:
self.__runProjectionEffects(runTime, targetFit, projectionInfo)
# Recursive command ships (A <-> B) get marked as calculated, which means that they aren't recalced when changing
# tabs. See GH issue 1193
if type == CalcType.COMMAND and targetFit in self.commandFits:
pyfalog.debug("{} is in the command listing for COMMAND ({}), do not mark self as calculated (recursive)".format(repr(targetFit), repr(self)))
else:
self.__calculated = True
# Only apply projected fits if fit it not projected itself.
if type == CalcType.LOCAL:
for fit in self.projectedFits:
projInfo = fit.getProjectionInfo(self.ID)
if projInfo.active:
if fit == self:
# If doing self projection, no need to run through the recursion process. Simply run the
# projection effects on ourselves
pyfalog.debug("Running self-projection for {0}", repr(self))
for runTime in ("early", "normal", "late"):
self.__runProjectionEffects(runTime, self, projInfo)
else:
fit.calculateModifiedAttributes(self, type=CalcType.PROJECTED)
pyfalog.debug('Done with fit calculation')
def __runProjectionEffects(self, runTime, targetFit, projectionInfo):
"""
To support a simpler way of doing self projections (so that we don't have to make a copy of the fit and
recalculate), this function was developed to be a common source of projected effect application.
"""
for item in chain(self.drones, self.fighters):
if item is not None:
# apply effects onto target fit x amount of times
for _ in range(projectionInfo.amount):
targetFit.register(item, origin=self)
item.calculateModifiedAttributes(
targetFit, runTime, forceProjected=True,
forcedProjRange=0)
for mod in self.modules:
for _ in range(projectionInfo.amount):
targetFit.register(mod, origin=self)
mod.calculateModifiedAttributes(
targetFit, runTime, forceProjected=True,
forcedProjRange=projectionInfo.projectionRange)
def fill(self):
"""
Fill this fit's module slots with enough dummy slots so that all slots are used.
This is mostly for making the life of gui's easier.
GUI's can call fill() and then stop caring about empty slots completely.
todo: want to get rid of using this from the gui/commands, and instead make it a more built-in feature within
recalc. Figure out a way to keep track of any changes to slot layout and call this automatically
"""
if self.ship is None:
return {}
# Look for any dummies of that type to remove
posToRemove = {}
for slotType in (FittingSlot.LOW.value, FittingSlot.MED.value, FittingSlot.HIGH.value, FittingSlot.RIG.value, FittingSlot.SUBSYSTEM.value, FittingSlot.SERVICE.value):
amount = self.getSlotsFree(slotType, True)
if amount > 0:
for _ in range(int(amount)):
self.modules.append(Module.buildEmpty(slotType))
if amount < 0:
for mod in self.modules:
if mod.isEmpty and mod.slot == slotType:
pos = self.modules.index(mod)
posToRemove[pos] = slotType
amount += 1
if amount == 0:
break
for pos in sorted(posToRemove, reverse=True):
mod = self.modules[pos]
self.modules.remove(mod)
return posToRemove
def unfill(self):
for i in range(len(self.modules) - 1, -1, -1):
mod = self.modules[i]
if mod.isEmpty:
del self.modules[i]
@property
def modCount(self):
x = 0
for i in range(len(self.modules) - 1, -1, -1):
mod = self.modules[i]
if not mod.isEmpty:
x += 1
return x
@staticmethod
def getItemAttrSum(dict, attr):
amount = 0
for mod in dict:
add = mod.getModifiedItemAttr(attr)
if add is not None:
amount += add
return amount
@staticmethod
def getItemAttrOnlineSum(dict, attr):
amount = 0
for mod in dict:
add = mod.getModifiedItemAttr(attr) if mod.state >= FittingModuleState.ONLINE else None
if add is not None:
amount += add
return amount
def getHardpointsUsed(self, type):
amount = 0
for mod in self.modules:
if mod.hardpoint is type and not mod.isEmpty:
amount += 1
return amount
def getSlotsUsed(self, type, countDummies=False):
amount = 0
for mod in chain(self.modules, self.fighters):
if mod.slot is type and (not getattr(mod, "isEmpty", False) or countDummies):
if type in (FittingSlot.F_HEAVY, FittingSlot.F_SUPPORT, FittingSlot.F_LIGHT, FittingSlot.FS_HEAVY, FittingSlot.FS_LIGHT, FittingSlot.FS_SUPPORT) and not mod.active:
continue
amount += 1
return amount
slots = {
FittingSlot.LOW : "lowSlots",
FittingSlot.MED : "medSlots",
FittingSlot.HIGH : "hiSlots",
FittingSlot.RIG : "rigSlots",
FittingSlot.SUBSYSTEM: "maxSubSystems",
FittingSlot.SERVICE : "serviceSlots",
FittingSlot.F_LIGHT : "fighterLightSlots",
FittingSlot.F_SUPPORT: "fighterSupportSlots",
FittingSlot.F_HEAVY : "fighterHeavySlots",
FittingSlot.FS_LIGHT: "fighterStandupLightSlots",
FittingSlot.FS_SUPPORT: "fighterStandupSupportSlots",
FittingSlot.FS_HEAVY: "fighterStandupHeavySlots",
}
def getSlotsFree(self, type, countDummies=False):
if type in (FittingSlot.MODE, FittingSlot.SYSTEM):
# These slots don't really exist, return default 0
return 0
slotsUsed = self.getSlotsUsed(type, countDummies)
totalSlots = self.ship.getModifiedItemAttr(self.slots[type]) or 0
return int(totalSlots - slotsUsed)
def getNumSlots(self, type):
return self.ship.getModifiedItemAttr(self.slots[type]) or 0
def getHardpointsFree(self, type):
if type == FittingHardpoint.NONE:
return 1
elif type == FittingHardpoint.TURRET:
return self.ship.getModifiedItemAttr('turretSlotsLeft') - self.getHardpointsUsed(FittingHardpoint.TURRET)
elif type == FittingHardpoint.MISSILE:
return self.ship.getModifiedItemAttr('launcherSlotsLeft') - self.getHardpointsUsed(FittingHardpoint.MISSILE)
else:
raise ValueError("%d is not a valid value for Hardpoint Enum", type)
@property
def calibrationUsed(self):
return self.getItemAttrOnlineSum(self.modules, 'upgradeCost')
@property
def pgUsed(self):
return round(self.getItemAttrOnlineSum(self.modules, "power"), 2)
@property
def cpuUsed(self):
return round(self.getItemAttrOnlineSum(self.modules, "cpu"), 2)
@property
def droneBandwidthUsed(self):
amount = 0
for d in self.drones:
amount += d.getModifiedItemAttr("droneBandwidthUsed") * d.amountActive
return amount
@property
def droneBayUsed(self):
amount = 0
for d in self.drones:
amount += d.item.volume * d.amount
return amount
@property
def fighterBayUsed(self):
amount = 0
for f in self.fighters:
amount += f.item.volume * f.amount
return amount
@property
def fighterTubesUsed(self):
amount = 0
for f in self.fighters:
if f.active:
amount += 1
return amount
@property
def fighterTubesTotal(self):
return self.ship.getModifiedItemAttr("fighterTubes")
@property
def cargoBayUsed(self):
amount = 0
for c in self.cargo:
amount += c.getModifiedItemAttr("volume") * c.amount
return amount
@property
def activeDrones(self):
amount = 0
for d in self.drones:
amount += d.amountActive
return amount
@property
def probeSize(self):
"""
Expresses how difficult a target is to probe down with scan probes
"""
sigRad = self.ship.getModifiedItemAttr("signatureRadius")
sensorStr = float(self.scanStrength)
probeSize = sigRad / sensorStr if sensorStr != 0 else None
# http://www.eveonline.com/ingameboard.asp?a=topic&threadID=1532170&page=2#42
if probeSize is not None:
# Probe size is capped at 1.08
probeSize = max(probeSize, 1.08)
return probeSize
@property
def warpSpeed(self):
base = self.ship.getModifiedItemAttr("baseWarpSpeed") or 1
multiplier = self.ship.getModifiedItemAttr("warpSpeedMultiplier") or 1
return base * multiplier
@property
def maxWarpDistance(self):
capacity = self.ship.getModifiedItemAttr("capacitorCapacity")
mass = self.ship.getModifiedItemAttr("mass")
warpCapNeed = self.ship.getModifiedItemAttr("warpCapacitorNeed")
if not warpCapNeed:
return 0
return capacity / (mass * warpCapNeed)
@property
def capStable(self):
if self.__capStable is None:
self.simulateCap()
return self.__capStable
@property
def capState(self):
"""
If the cap is stable, the capacitor state is the % at which it is stable.
If the cap is unstable, this is the amount of time before it runs out
"""
if self.__capState is None:
self.simulateCap()
return self.__capState
@property
def capUsed(self):
if self.__capUsed is None:
self.simulateCap()
return self.__capUsed
@property
def capRecharge(self):
if self.__capRecharge is None:
self.simulateCap()
return self.__capRecharge
@property
def capDelta(self):
return (self.__capRecharge or 0) - (self.__capUsed or 0)
def calculateCapRecharge(self, percent=PEAK_RECHARGE, capacity=None, rechargeRate=None):
if capacity is None:
capacity = self.ship.getModifiedItemAttr("capacitorCapacity")
if rechargeRate is None:
rechargeRate = self.ship.getModifiedItemAttr("rechargeRate") / 1000.0
return 10 / rechargeRate * sqrt(percent) * (1 - sqrt(percent)) * capacity
def calculateShieldRecharge(self, percent=PEAK_RECHARGE):
capacity = self.ship.getModifiedItemAttr("shieldCapacity")
rechargeRate = self.ship.getModifiedItemAttr("shieldRechargeRate") / 1000.0
return 10 / rechargeRate * sqrt(percent) * (1 - sqrt(percent)) * capacity
def addDrain(self, src, cycleTime, capNeed, clipSize=0, reloadTime=0):
""" Used for both cap drains and cap fills (fills have negative capNeed) """
energyNeutralizerSignatureResolution = src.getModifiedItemAttr("energyNeutralizerSignatureResolution")
signatureRadius = self.ship.getModifiedItemAttr("signatureRadius")
# Signature reduction, uses the bomb formula as per CCP Larrikin
if energyNeutralizerSignatureResolution:
capNeed = capNeed * min(1, signatureRadius / energyNeutralizerSignatureResolution)
if capNeed:
self.__extraDrains.append((cycleTime, capNeed, clipSize, reloadTime))
def removeDrain(self, i):
del self.__extraDrains[i]
def iterDrains(self):
return self.__extraDrains.__iter__()
def __generateDrain(self):
drains = []
capUsed = 0
capAdded = 0
for mod in self.activeModulesIter():
if (mod.getModifiedItemAttr("capacitorNeed") or 0) != 0:
cycleTime = mod.rawCycleTime or 0
reactivationTime = mod.getModifiedItemAttr("moduleReactivationDelay") or 0
fullCycleTime = cycleTime + reactivationTime
reloadTime = mod.reloadTime
if fullCycleTime > 0:
capNeed = mod.capUse
if capNeed > 0:
capUsed += capNeed
else:
capAdded -= capNeed
# If this is a turret, don't stagger activations
disableStagger = mod.hardpoint == FittingHardpoint.TURRET
drains.append((
int(fullCycleTime),
mod.getModifiedItemAttr("capacitorNeed") or 0,
mod.numShots or 0,
disableStagger,
reloadTime,
mod.item.group.name == 'Capacitor Booster'))
for fullCycleTime, capNeed, clipSize, reloadTime in self.iterDrains():
drains.append((
int(fullCycleTime),
capNeed,
clipSize,
# Stagger incoming effects for cap simulation
False,
reloadTime,
False))
if capNeed > 0:
capUsed += capNeed / (fullCycleTime / 1000.0)
else:
capAdded += -capNeed / (fullCycleTime / 1000.0)
return drains, capUsed, capAdded
def simulateCap(self):
drains, self.__capUsed, self.__capRecharge = self.__generateDrain()
self.__capRecharge += self.calculateCapRecharge()
sim = self.__runCapSim(drains=drains)
if sim is not None:
capState = (sim.cap_stable_low + sim.cap_stable_high) / (2 * sim.capacitorCapacity)
self.__capStable = capState > 0
self.__capState = min(100, capState * 100) if self.__capStable else sim.t / 1000.0
else:
self.__capStable = True
self.__capState = 100
def getCapSimData(self, startingCap):
if startingCap not in self.__savedCapSimData:
self.__runCapSim(startingCap=startingCap, tMax=3600, optimizeRepeats=False)
return self.__savedCapSimData[startingCap]
def __runCapSim(self, drains=None, startingCap=None, tMax=None, optimizeRepeats=True):
if drains is None:
drains, nil, nil = self.__generateDrain()
if tMax is None:
tMax = 6 * 60 * 60 * 1000
else:
tMax *= 1000
if len(drains) > 0:
sim = capSim.CapSimulator()
sim.init(drains)
sim.capacitorCapacity = self.ship.getModifiedItemAttr("capacitorCapacity")
sim.capacitorRecharge = self.ship.getModifiedItemAttr("rechargeRate")
sim.startingCapacity = startingCap = self.ship.getModifiedItemAttr("capacitorCapacity") if startingCap is None else startingCap
sim.stagger = True
sim.scale = False
sim.t_max = tMax
sim.reload = self.factorReload
sim.optimize_repeats = optimizeRepeats
sim.run()
# We do not want to store partial results
if not sim.result_optimized_repeats:
self.__savedCapSimData[startingCap] = sim.saved_changes
return sim
else:
self.__savedCapSimData[startingCap] = []
return None
def getCapRegenGainFromMod(self, mod):
"""Return how much cap regen do we gain from having this module"""
currentRegen = self.calculateCapRecharge()
nomodRegen = self.calculateCapRecharge(
capacity=self.ship.getModifiedItemAttrExtended("capacitorCapacity", ignoreAfflictors=[mod]),
rechargeRate=self.ship.getModifiedItemAttrExtended("rechargeRate", ignoreAfflictors=[mod]) / 1000.0)
return currentRegen - nomodRegen
def getRemoteReps(self, spoolOptions=None):
if spoolOptions not in self.__remoteRepMap:
remoteReps = RRTypes(0, 0, 0, 0)
for module in self.modules:
remoteReps += module.getRemoteReps(spoolOptions=spoolOptions)
for drone in self.drones:
remoteReps += drone.getRemoteReps()
self.__remoteRepMap[spoolOptions] = remoteReps
return self.__remoteRepMap[spoolOptions]
@property
def hp(self):
hp = {}
for (type, attr) in (('shield', 'shieldCapacity'), ('armor', 'armorHP'), ('hull', 'hp')):
hp[type] = self.ship.getModifiedItemAttr(attr)
return hp
@property
def ehp(self):
if self.__ehp is None:
if self.damagePattern is None:
ehp = self.hp
else:
ehp = self.damagePattern.calculateEhp(self)
self.__ehp = ehp
return self.__ehp
@property
def tank(self):
reps = {
"passiveShield": self.calculateShieldRecharge(),
"shieldRepair": self.extraAttributes["shieldRepair"],
"armorRepair": self.extraAttributes["armorRepair"],
"armorRepairPreSpool": self.extraAttributes["armorRepairPreSpool"],
"armorRepairFullSpool": self.extraAttributes["armorRepairFullSpool"],
"hullRepair": self.extraAttributes["hullRepair"]}
return reps
@property
def effectiveTank(self):
if self.__effectiveTank is None:
if self.damagePattern is None:
ehps = self.tank
else:
ehps = self.damagePattern.calculateEffectiveTank(self, self.tank)
self.__effectiveTank = ehps
return self.__effectiveTank
@property
def sustainableTank(self):
if self.__sustainableTank is None:
self.calculateSustainableTank()
return self.__sustainableTank
@property
def effectiveSustainableTank(self):
if self.__effectiveSustainableTank is None:
if self.damagePattern is None:
tank = self.sustainableTank
else:
tank = self.damagePattern.calculateEffectiveTank(self, self.sustainableTank)
self.__effectiveSustainableTank = tank
return self.__effectiveSustainableTank
def calculateSustainableTank(self):
if self.__sustainableTank is None:
sustainable = {
"passiveShield": self.calculateShieldRecharge(),
"shieldRepair": self.extraAttributes["shieldRepair"],
"armorRepair": self.extraAttributes["armorRepair"],
"armorRepairPreSpool": self.extraAttributes["armorRepairPreSpool"],
"armorRepairFullSpool": self.extraAttributes["armorRepairFullSpool"],
"hullRepair": self.extraAttributes["hullRepair"]}
if not self.capStable or self.factorReload:
# Map a local repairer type to the attribute it uses
groupAttrMap = {
"Shield Booster": "shieldBonus",
"Ancillary Shield Booster": "shieldBonus",
"Armor Repair Unit": "armorDamageAmount",
"Ancillary Armor Repairer": "armorDamageAmount",
"Hull Repair Unit": "structureDamageAmount"}
# Map local repairer type to tank type
groupStoreMap = {
"Shield Booster": "shieldRepair",
"Ancillary Shield Booster": "shieldRepair",
"Armor Repair Unit": "armorRepair",
"Ancillary Armor Repairer": "armorRepair",
"Hull Repair Unit": "hullRepair"}
repairers = []
localAdjustment = {"shieldRepair": 0, "armorRepair": 0, "hullRepair": 0}
capUsed = self.capUsed
for tankType in localAdjustment:
dict = self.extraAttributes.getAfflictions(tankType)
if self in dict:
for afflictor, operator, stackingGroup, preResAmount, postResAmount, used in dict[self]:
if not used:
continue
if afflictor.projected:
continue
if afflictor.item.group.name not in groupAttrMap:
continue
usesCap = True
try:
if afflictor.capUse:
capUsed -= afflictor.capUse
else:
usesCap = False
except AttributeError:
usesCap = False
# Normal Repairers
if usesCap and not afflictor.charge:
cycleTime = afflictor.rawCycleTime
amount = afflictor.getModifiedItemAttr(groupAttrMap[afflictor.item.group.name])
localAdjustment[tankType] -= amount / (cycleTime / 1000.0)
repairers.append(afflictor)
# Ancillary Armor reps etc
elif usesCap and afflictor.charge:
cycleTime = afflictor.rawCycleTime
amount = afflictor.getModifiedItemAttr(groupAttrMap[afflictor.item.group.name])
if afflictor.charge.name == "Nanite Repair Paste":
multiplier = afflictor.getModifiedItemAttr("chargedArmorDamageMultiplier") or 1
else:
multiplier = 1
localAdjustment[tankType] -= amount * multiplier / (cycleTime / 1000.0)
repairers.append(afflictor)
# Ancillary Shield boosters etc
elif not usesCap and afflictor.item.group.name in ("Ancillary Shield Booster", "Ancillary Remote Shield Booster"):
cycleTime = afflictor.rawCycleTime
amount = afflictor.getModifiedItemAttr(groupAttrMap[afflictor.item.group.name])
if self.factorReload and afflictor.charge:
reloadtime = afflictor.reloadTime
else:
reloadtime = 0.0
offdutycycle = reloadtime / ((max(afflictor.numShots, 1) * cycleTime) + reloadtime)
localAdjustment[tankType] -= amount * offdutycycle / (cycleTime / 1000.0)
# Sort repairers by efficiency. We want to use the most efficient repairers first
repairers.sort(key=lambda _mod: _mod.getModifiedItemAttr(
groupAttrMap[_mod.item.group.name]) * (_mod.getModifiedItemAttr(
"chargedArmorDamageMultiplier") or 1) / _mod.getModifiedItemAttr("capacitorNeed"), reverse=True)
# Loop through every module until we're above peak recharge
# Most efficient first, as we sorted earlier.
# calculate how much the repper can rep stability & add to total
totalPeakRecharge = self.capRecharge
for afflictor in repairers:
if capUsed > totalPeakRecharge:
break
if self.factorReload and afflictor.charge:
reloadtime = afflictor.reloadTime
else:
reloadtime = 0.0
cycleTime = afflictor.rawCycleTime
capPerSec = afflictor.capUse
if capPerSec is not None and cycleTime is not None:
# Check how much this repper can work
sustainability = min(1, (totalPeakRecharge - capUsed) / capPerSec)
amount = afflictor.getModifiedItemAttr(groupAttrMap[afflictor.item.group.name])
# Add the sustainable amount
if not afflictor.charge:
localAdjustment[groupStoreMap[afflictor.item.group.name]] += sustainability * amount / (
cycleTime / 1000.0)
else:
if afflictor.charge.name == "Nanite Repair Paste":
multiplier = afflictor.getModifiedItemAttr("chargedArmorDamageMultiplier") or 1
else:
multiplier = 1
ondutycycle = (max(afflictor.numShots, 1) * cycleTime) / (
(max(afflictor.numShots, 1) * cycleTime) + reloadtime)
localAdjustment[groupStoreMap[
afflictor.item.group.name]] += sustainability * amount * ondutycycle * multiplier / (
cycleTime / 1000.0)
capUsed += capPerSec
sustainable["shieldRepair"] += localAdjustment["shieldRepair"]
sustainable["armorRepair"] += localAdjustment["armorRepair"]
sustainable["armorRepairPreSpool"] += localAdjustment["armorRepair"]
sustainable["armorRepairFullSpool"] += localAdjustment["armorRepair"]
sustainable["hullRepair"] += localAdjustment["hullRepair"]
self.__sustainableTank = sustainable
return self.__sustainableTank
def calculateLockTime(self, radius):
scanRes = self.ship.getModifiedItemAttr("scanResolution")
if scanRes is not None and scanRes > 0:
return calculateLockTime(srcScanRes=scanRes, tgtSigRadius=radius)
else:
return self.ship.getModifiedItemAttr("scanSpeed") / 1000.0
def calculateMiningStats(self):
minerYield = 0
droneYield = 0
for mod in self.modules:
minerYield += mod.miningStats
for drone in self.drones:
droneYield += drone.miningStats
self.__minerYield = minerYield
self.__droneYield = droneYield
def calculateWeaponDmgStats(self, spoolOptions):
weaponVolley = DmgTypes(0, 0, 0, 0)
weaponDps = DmgTypes(0, 0, 0, 0)
for mod in self.modules:
weaponVolley += mod.getVolley(spoolOptions=spoolOptions, targetProfile=self.targetProfile)
weaponDps += mod.getDps(spoolOptions=spoolOptions, targetProfile=self.targetProfile)
self.__weaponVolleyMap[spoolOptions] = weaponVolley
self.__weaponDpsMap[spoolOptions] = weaponDps
def calculateDroneDmgStats(self):
droneVolley = DmgTypes(0, 0, 0, 0)
droneDps = DmgTypes(0, 0, 0, 0)
for drone in self.drones:
droneVolley += drone.getVolley(targetProfile=self.targetProfile)
droneDps += drone.getDps(targetProfile=self.targetProfile)
for fighter in self.fighters:
droneVolley += fighter.getVolley(targetProfile=self.targetProfile)
droneDps += fighter.getDps(targetProfile=self.targetProfile)
self.__droneDps = droneDps
self.__droneVolley = droneVolley
@property
def fits(self):
for mod in self.modules:
if not mod.isEmpty and not mod.fits(self):
return False
return True
def getReleaseLimitForDrone(self, item):
if not item.isDrone:
return 0
bw = round(self.ship.getModifiedItemAttr("droneBandwidth"))
volume = round(item.attribsWithOverrides['volume'].value)
return int(bw / volume)
def getStoreLimitForDrone(self, item):
if not item.isDrone:
return 0
bayTotal = round(self.ship.getModifiedItemAttr("droneCapacity"))
bayUsed = round(self.droneBayUsed)
volume = item.attribsWithOverrides['volume'].value
return int((bayTotal - bayUsed) / volume)
def getSystemSecurity(self):
secstatus = self.systemSecurity
# Default to nullsec
if secstatus is None:
secstatus = FitSystemSecurity.NULLSEC
return secstatus
def activeModulesIter(self):
for mod in self.modules:
if mod.state >= FittingModuleState.ACTIVE:
yield mod
def activeDronesIter(self):
for drone in self.drones:
if drone.amountActive > 0:
yield drone
def activeFightersIter(self):
for fighter in self.fighters:
if fighter.active:
yield fighter
def activeFighterAbilityIter(self):
for fighter in self.activeFightersIter():
for ability in fighter.abilities:
if ability.active:
yield fighter, ability
def getDampMultScanRes(self):
damps = []
for mod in self.activeModulesIter():
for effectName in ('remoteSensorDampFalloff', 'structureModuleEffectRemoteSensorDampener'):
if effectName in mod.item.effects:
damps.append((mod.getModifiedItemAttr('scanResolutionBonus'), 'default'))
if 'doomsdayAOEDamp' in mod.item.effects:
damps.append((mod.getModifiedItemAttr('scanResolutionBonus'), 'default'))
for drone in self.activeDronesIter():
if 'remoteSensorDampEntity' in drone.item.effects:
damps.extend(drone.amountActive * ((drone.getModifiedItemAttr('scanResolutionBonus'), 'default'),))
mults = {}
for strength, stackingGroup in damps:
mults.setdefault(stackingGroup, []).append((1 + strength / 100, None))
return calculateMultiplier(mults)
def __deepcopy__(self, memo=None):
fitCopy = Fit()
# Character and owner are not copied
fitCopy.character = self.__character
fitCopy.owner = self.owner
fitCopy.ship = deepcopy(self.ship)
fitCopy.mode = deepcopy(self.mode)
fitCopy.name = "%s copy" % self.name
fitCopy.damagePattern = self.damagePattern
fitCopy.targetProfile = self.targetProfile
fitCopy.implantLocation = self.implantLocation
fitCopy.systemSecurity = self.systemSecurity
fitCopy.notes = self.notes
for i in self.modules:
fitCopy.modules.appendIgnoreEmpty(deepcopy(i))
toCopy = (
"drones",
"fighters",
"cargo",
"implants",
"boosters",
"projectedModules",
"projectedDrones",
"projectedFighters")
for name in toCopy:
orig = getattr(self, name)
c = getattr(fitCopy, name)
for i in orig:
c.append(deepcopy(i))
# this bit is required -- see GH issue # 83
def forceUpdateSavedata(fit):
eos.db.saveddata_session.flush()
eos.db.saveddata_session.refresh(fit)
for fit in self.commandFits:
fitCopy.commandFitDict[fit.ID] = fit
forceUpdateSavedata(fit)
copyCommandInfo = fit.getCommandInfo(fitCopy.ID)
originalCommandInfo = fit.getCommandInfo(self.ID)
copyCommandInfo.active = originalCommandInfo.active
forceUpdateSavedata(fit)
for fit in self.projectedFits:
fitCopy.projectedFitDict[fit.ID] = fit
forceUpdateSavedata(fit)
copyProjectionInfo = fit.getProjectionInfo(fitCopy.ID)
originalProjectionInfo = fit.getProjectionInfo(self.ID)
copyProjectionInfo.active = originalProjectionInfo.active
copyProjectionInfo.amount = originalProjectionInfo.amount
copyProjectionInfo.projectionRange = originalProjectionInfo.projectionRange
forceUpdateSavedata(fit)
return fitCopy
def __repr__(self):
return "Fit(ID={}, ship={}, name={}) at {}".format(
self.ID, self.ship.item.name, self.name, hex(id(self))
)
def __str__(self):
return "{} ({})".format(
self.name, self.ship.item.name
)
|
DarkFenX/Pyfa
|
eos/saveddata/fit.py
|
Python
|
gpl-3.0
| 75,739
|
[
"CRYSTAL"
] |
d5e3af61cdfbee8f3cfb5f3c13c0b4f717a9ca51cecf8894bde729f892a1573f
|
"""
Generalized Linear Models with Exponential Dispersion Family
"""
# Author: Christian Lorentzen <lorentzen.ch@googlemail.com>
# some parts and tricks stolen from other sklearn files.
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.optimize
from ...base import BaseEstimator, RegressorMixin
from ...utils.optimize import _check_optimize_result
from ...utils.validation import check_is_fitted, _check_sample_weight
from ..._loss.glm_distribution import (
ExponentialDispersionModel,
TweedieDistribution,
EDM_DISTRIBUTIONS
)
from .link import (
BaseLink,
IdentityLink,
LogLink,
)
def _safe_lin_pred(X, coef):
"""Compute the linear predictor taking care if intercept is present."""
if coef.size == X.shape[1] + 1:
return X @ coef[1:] + coef[0]
else:
return X @ coef
def _y_pred_deviance_derivative(coef, X, y, weights, family, link):
"""Compute y_pred and the derivative of the deviance w.r.t coef."""
lin_pred = _safe_lin_pred(X, coef)
y_pred = link.inverse(lin_pred)
d1 = link.inverse_derivative(lin_pred)
temp = d1 * family.deviance_derivative(y, y_pred, weights)
if coef.size == X.shape[1] + 1:
devp = np.concatenate(([temp.sum()], temp @ X))
else:
devp = temp @ X # same as X.T @ temp
return y_pred, devp
class GeneralizedLinearRegressor(RegressorMixin, BaseEstimator):
"""Regression via a penalized Generalized Linear Model (GLM).
GLMs based on a reproductive Exponential Dispersion Model (EDM) aim at
fitting and predicting the mean of the target y as y_pred=h(X*w).
Therefore, the fit minimizes the following objective function with L2
priors as regularizer::
1/(2*sum(s)) * deviance(y, h(X*w); s)
+ 1/2 * alpha * |w|_2
with inverse link function h and s=sample_weight.
The parameter ``alpha`` corresponds to the lambda parameter in glmnet.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
family : {'normal', 'poisson', 'gamma', 'inverse-gaussian'} \
or an ExponentialDispersionModel instance, default='normal'
The distributional assumption of the GLM, i.e. which distribution from
the EDM, specifies the loss function to be minimized.
link : {'auto', 'identity', 'log'} or an instance of class BaseLink, \
default='auto'
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
the link depending on the chosen family as follows:
- 'identity' for Normal distribution
- 'log' for Poisson, Gamma and Inverse Gaussian distributions
solver : 'lbfgs', default='lbfgs'
Algorithm to use in the optimization problem:
'lbfgs'
Calls scipy's L-BFGS-B optimizer.
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_``.
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
"""
def __init__(self, *, alpha=1.0,
fit_intercept=True, family='normal', link='auto',
solver='lbfgs', max_iter=100, tol=1e-4, warm_start=False,
verbose=0):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.family = family
self.link = link
self.solver = solver
self.max_iter = max_iter
self.tol = tol
self.warm_start = warm_start
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit a Generalized Linear Model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
self : returns an instance of self.
"""
if isinstance(self.family, ExponentialDispersionModel):
self._family_instance = self.family
elif self.family in EDM_DISTRIBUTIONS:
self._family_instance = EDM_DISTRIBUTIONS[self.family]()
else:
raise ValueError(
"The family must be an instance of class"
" ExponentialDispersionModel or an element of"
" ['normal', 'poisson', 'gamma', 'inverse-gaussian']"
"; got (family={0})".format(self.family))
# Guarantee that self._link_instance is set to an instance of
# class BaseLink
if isinstance(self.link, BaseLink):
self._link_instance = self.link
else:
if self.link == 'auto':
if isinstance(self._family_instance, TweedieDistribution):
if self._family_instance.power <= 0:
self._link_instance = IdentityLink()
if self._family_instance.power >= 1:
self._link_instance = LogLink()
else:
raise ValueError("No default link known for the "
"specified distribution family. Please "
"set link manually, i.e. not to 'auto'; "
"got (link='auto', family={})"
.format(self.family))
elif self.link == 'identity':
self._link_instance = IdentityLink()
elif self.link == 'log':
self._link_instance = LogLink()
else:
raise ValueError(
"The link must be an instance of class Link or "
"an element of ['auto', 'identity', 'log']; "
"got (link={0})".format(self.link))
if not isinstance(self.alpha, numbers.Number) or self.alpha < 0:
raise ValueError("Penalty term must be a non-negative number;"
" got (alpha={0})".format(self.alpha))
if not isinstance(self.fit_intercept, bool):
raise ValueError("The argument fit_intercept must be bool;"
" got {0}".format(self.fit_intercept))
if self.solver not in ['lbfgs']:
raise ValueError("GeneralizedLinearRegressor supports only solvers"
"'lbfgs'; got {0}".format(self.solver))
solver = self.solver
if (not isinstance(self.max_iter, numbers.Integral)
or self.max_iter <= 0):
raise ValueError("Maximum number of iteration must be a positive "
"integer;"
" got (max_iter={0!r})".format(self.max_iter))
if not isinstance(self.tol, numbers.Number) or self.tol <= 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol={0!r})".format(self.tol))
if not isinstance(self.warm_start, bool):
raise ValueError("The argument warm_start must be bool;"
" got {0}".format(self.warm_start))
family = self._family_instance
link = self._link_instance
X, y = self._validate_data(X, y, accept_sparse=['csc', 'csr'],
dtype=[np.float64, np.float32],
y_numeric=True, multi_output=False)
weights = _check_sample_weight(sample_weight, X)
_, n_features = X.shape
if not np.all(family.in_y_range(y)):
raise ValueError("Some value(s) of y are out of the valid "
"range for family {0}"
.format(family.__class__.__name__))
# TODO: if alpha=0 check that X is not rank deficient
# rescaling of sample_weight
#
# IMPORTANT NOTE: Since we want to minimize
# 1/(2*sum(sample_weight)) * deviance + L2,
# deviance = sum(sample_weight * unit_deviance),
# we rescale weights such that sum(weights) = 1 and this becomes
# 1/2*deviance + L2 with deviance=sum(weights * unit_deviance)
weights = weights / weights.sum()
if self.warm_start and hasattr(self, 'coef_'):
if self.fit_intercept:
coef = np.concatenate((np.array([self.intercept_]),
self.coef_))
else:
coef = self.coef_
else:
if self.fit_intercept:
coef = np.zeros(n_features+1)
coef[0] = link(np.average(y, weights=weights))
else:
coef = np.zeros(n_features)
# algorithms for optimization
if solver == 'lbfgs':
def func(coef, X, y, weights, alpha, family, link):
y_pred, devp = _y_pred_deviance_derivative(
coef, X, y, weights, family, link
)
dev = family.deviance(y, y_pred, weights)
# offset if coef[0] is intercept
offset = 1 if self.fit_intercept else 0
coef_scaled = alpha * coef[offset:]
obj = 0.5 * dev + 0.5 * (coef[offset:] @ coef_scaled)
objp = 0.5 * devp
objp[offset:] += coef_scaled
return obj, objp
args = (X, y, weights, self.alpha, family, link)
opt_res = scipy.optimize.minimize(
func, coef, method="L-BFGS-B", jac=True,
options={
"maxiter": self.max_iter,
"iprint": (self.verbose > 0) - 1,
"gtol": self.tol,
"ftol": 1e3*np.finfo(float).eps,
},
args=args)
self.n_iter_ = _check_optimize_result("lbfgs", opt_res)
coef = opt_res.x
if self.fit_intercept:
self.intercept_ = coef[0]
self.coef_ = coef[1:]
else:
# set intercept to zero as the other linear models do
self.intercept_ = 0.
self.coef_ = coef
return self
def _linear_predictor(self, X):
"""Compute the linear_predictor = `X @ coef_ + intercept_`.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values of linear predictor.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=[np.float64, np.float32], ensure_2d=True,
allow_nd=False, reset=False)
return X @ self.coef_ + self.intercept_
def predict(self, X):
"""Predict using GLM with feature matrix X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Samples.
Returns
-------
y_pred : array of shape (n_samples,)
Returns predicted values.
"""
# check_array is done in _linear_predictor
eta = self._linear_predictor(X)
y_pred = self._link_instance.inverse(eta)
return y_pred
def score(self, X, y, sample_weight=None):
"""Compute D^2, the percentage of deviance explained.
D^2 is a generalization of the coefficient of determination R^2.
R^2 uses squared error and D^2 deviance. Note that those two are equal
for ``family='normal'``.
D^2 is defined as
:math:`D^2 = 1-\\frac{D(y_{true},y_{pred})}{D_{null}}`,
:math:`D_{null}` is the null deviance, i.e. the deviance of a model
with intercept alone, which corresponds to :math:`y_{pred} = \\bar{y}`.
The mean :math:`\\bar{y}` is averaged by sample_weight.
Best possible score is 1.0 and it can be negative (because the model
can be arbitrarily worse).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,)
True values of target.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
D^2 of self.predict(X) w.r.t. y.
"""
# Note, default score defined in RegressorMixin is R^2 score.
# TODO: make D^2 a score function in module metrics (and thereby get
# input validation and so on)
weights = _check_sample_weight(sample_weight, X)
y_pred = self.predict(X)
dev = self._family_instance.deviance(y, y_pred, weights=weights)
y_mean = np.average(y, weights=weights)
dev_null = self._family_instance.deviance(y, y_mean, weights=weights)
return 1 - dev / dev_null
def _more_tags(self):
# create the _family_instance if fit wasn't called yet.
if hasattr(self, '_family_instance'):
_family_instance = self._family_instance
elif isinstance(self.family, ExponentialDispersionModel):
_family_instance = self.family
elif self.family in EDM_DISTRIBUTIONS:
_family_instance = EDM_DISTRIBUTIONS[self.family]()
else:
raise ValueError
return {"requires_positive_y": not _family_instance.in_y_range(-1.0)}
class PoissonRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Poisson distribution.
This regressor uses the 'log' link function.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
----------
>>> from sklearn import linear_model
>>> clf = linear_model.PoissonRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [12, 17, 22, 21]
>>> clf.fit(X, y)
PoissonRegressor()
>>> clf.score(X, y)
0.990...
>>> clf.coef_
array([0.121..., 0.158...])
>>> clf.intercept_
2.088...
>>> clf.predict([[1, 1], [3, 4]])
array([10.676..., 21.875...])
"""
def __init__(self, *, alpha=1.0, fit_intercept=True, max_iter=100,
tol=1e-4, warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family="poisson", link='log', max_iter=max_iter,
tol=tol, warm_start=warm_start, verbose=verbose)
@property
def family(self):
# Make this attribute read-only to avoid mis-uses e.g. in GridSearch.
return "poisson"
@family.setter
def family(self, value):
if value != "poisson":
raise ValueError("PoissonRegressor.family must be 'poisson'!")
class GammaRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Gamma distribution.
This regressor uses the 'log' link function.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X * coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.GammaRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [19, 26, 33, 30]
>>> clf.fit(X, y)
GammaRegressor()
>>> clf.score(X, y)
0.773...
>>> clf.coef_
array([0.072..., 0.066...])
>>> clf.intercept_
2.896...
>>> clf.predict([[1, 0], [2, 8]])
array([19.483..., 35.795...])
"""
def __init__(self, *, alpha=1.0, fit_intercept=True, max_iter=100,
tol=1e-4, warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family="gamma", link='log', max_iter=max_iter,
tol=tol, warm_start=warm_start, verbose=verbose)
@property
def family(self):
# Make this attribute read-only to avoid mis-uses e.g. in GridSearch.
return "gamma"
@family.setter
def family(self, value):
if value != "gamma":
raise ValueError("GammaRegressor.family must be 'gamma'!")
class TweedieRegressor(GeneralizedLinearRegressor):
"""Generalized Linear Model with a Tweedie distribution.
This estimator can be used to model different GLMs depending on the
``power`` parameter, which determines the underlying distribution.
Read more in the :ref:`User Guide <Generalized_linear_regression>`.
.. versionadded:: 0.23
Parameters
----------
power : float, default=0
The power determines the underlying target distribution according
to the following table:
+-------+------------------------+
| Power | Distribution |
+=======+========================+
| 0 | Normal |
+-------+------------------------+
| 1 | Poisson |
+-------+------------------------+
| (1,2) | Compound Poisson Gamma |
+-------+------------------------+
| 2 | Gamma |
+-------+------------------------+
| 3 | Inverse Gaussian |
+-------+------------------------+
For ``0 < power < 1``, no distribution exists.
alpha : float, default=1
Constant that multiplies the penalty term and thus determines the
regularization strength. ``alpha = 0`` is equivalent to unpenalized
GLMs. In this case, the design matrix `X` must have full column rank
(no collinearities).
fit_intercept : bool, default=True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the linear predictor (X @ coef + intercept).
link : {'auto', 'identity', 'log'}, default='auto'
The link function of the GLM, i.e. mapping from linear predictor
`X @ coeff + intercept` to prediction `y_pred`. Option 'auto' sets
the link depending on the chosen family as follows:
- 'identity' for Normal distribution
- 'log' for Poisson, Gamma and Inverse Gaussian distributions
max_iter : int, default=100
The maximal number of iterations for the solver.
tol : float, default=1e-4
Stopping criterion. For the lbfgs solver,
the iteration will stop when ``max{|g_j|, j = 1, ..., d} <= tol``
where ``g_j`` is the j-th component of the gradient (derivative) of
the objective function.
warm_start : bool, default=False
If set to ``True``, reuse the solution of the previous call to ``fit``
as initialization for ``coef_`` and ``intercept_`` .
verbose : int, default=0
For the lbfgs solver set verbose to any positive number for verbosity.
Attributes
----------
coef_ : array of shape (n_features,)
Estimated coefficients for the linear predictor (`X @ coef_ +
intercept_`) in the GLM.
intercept_ : float
Intercept (a.k.a. bias) added to linear predictor.
n_iter_ : int
Actual number of iterations used in the solver.
Examples
----------
>>> from sklearn import linear_model
>>> clf = linear_model.TweedieRegressor()
>>> X = [[1, 2], [2, 3], [3, 4], [4, 3]]
>>> y = [2, 3.5, 5, 5.5]
>>> clf.fit(X, y)
TweedieRegressor()
>>> clf.score(X, y)
0.839...
>>> clf.coef_
array([0.599..., 0.299...])
>>> clf.intercept_
1.600...
>>> clf.predict([[1, 1], [3, 4]])
array([2.500..., 4.599...])
"""
def __init__(self, *, power=0.0, alpha=1.0, fit_intercept=True,
link='auto', max_iter=100, tol=1e-4,
warm_start=False, verbose=0):
super().__init__(alpha=alpha, fit_intercept=fit_intercept,
family=TweedieDistribution(power=power), link=link,
max_iter=max_iter, tol=tol,
warm_start=warm_start, verbose=verbose)
@property
def family(self):
# We use a property with a setter to make sure that the family is
# always a Tweedie distribution, and that self.power and
# self.family.power are identical by construction.
dist = TweedieDistribution(power=self.power)
# TODO: make the returned object immutable
return dist
@family.setter
def family(self, value):
if isinstance(value, TweedieDistribution):
self.power = value.power
else:
raise TypeError("TweedieRegressor.family must be of type "
"TweedieDistribution!")
|
glemaitre/scikit-learn
|
sklearn/linear_model/_glm/glm.py
|
Python
|
bsd-3-clause
| 25,041
|
[
"Gaussian"
] |
b9cf81cda51274bd26c6f1469e5f0f402c3bd8b2980673f1233e707f03ae9740
|
#!/usr/bin/env python
import os
import sys
from glob import glob
sys.path.insert(0, os.path.abspath('lib'))
from ansible import __version__, __author__
from distutils.core import setup
# find library modules
from ansible.constants import DEFAULT_MODULE_PATH
dirs=os.listdir("./library/")
data_files = []
for i in dirs:
data_files.append((os.path.join(DEFAULT_MODULE_PATH, i), glob('./library/' + i + '/*')))
setup(name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='michael@ansibleworks.com',
url='http://ansibleworks.com/',
license='GPLv3',
install_requires=['paramiko', 'jinja2', "PyYAML"],
package_dir={ 'ansible': 'lib/ansible' },
packages=[
'ansible',
'ansible.utils',
'ansible.inventory',
'ansible.inventory.vars_plugins',
'ansible.playbook',
'ansible.runner',
'ansible.runner.action_plugins',
'ansible.runner.lookup_plugins',
'ansible.runner.connection_plugins',
'ansible.runner.filter_plugins',
'ansible.callback_plugins',
'ansible.module_utils'
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy'
],
data_files=data_files
)
|
bezhermoso/home
|
setup.py
|
Python
|
gpl-3.0
| 1,396
|
[
"Galaxy"
] |
a3fcbdfe6713d425761f345e22fdf2546e02f33f47e8b8659d9421e336e7b12d
|
#!/usr/bin/env python2
# coding:utf-8
# Based on GAppProxy 2.0.0 by Du XiaoGang <dugang.2008@gmail.com>
# Based on WallProxy 0.4.0 by Hust Moon <www.ehust@gmail.com>
# Contributor:
# Phus Lu <phus.lu@gmail.com>
# Hewig Xu <hewigovens@gmail.com>
# Ayanamist Yang <ayanamist@gmail.com>
# V.E.O <V.E.O@tom.com>
# Max Lv <max.c.lv@gmail.com>
# AlsoTang <alsotang@gmail.com>
# Christopher Meng <cickumqt@gmail.com>
# Yonsm Guo <YonsmGuo@gmail.com>
# Parkman <cseparkman@gmail.com>
# Ming Bai <mbbill@gmail.com>
# Bin Yu <yubinlove1991@gmail.com>
# lileixuan <lileixuan@gmail.com>
# Cong Ding <cong@cding.org>
# Zhang Youfu <zhangyoufu@gmail.com>
# Lu Wei <luwei@barfoo>
# Harmony Meow <harmony.meow@gmail.com>
# logostream <logostream@gmail.com>
# Rui Wang <isnowfy@gmail.com>
# Wang Wei Qiang <wwqgtxx@gmail.com>
# Felix Yan <felixonmars@gmail.com>
# QXO <qxodream@gmail.com>
# Geek An <geekan@foxmail.com>
# Poly Rabbit <mcx_221@foxmail.com>
# oxnz <yunxinyi@gmail.com>
# Shusen Liu <liushusen.smart@gmail.com>
# Yad Smood <y.s.inside@gmail.com>
# Chen Shuang <cs0x7f@gmail.com>
# cnfuyu <cnfuyu@gmail.com>
# cuixin <steven.cuixin@gmail.com>
import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir))
data_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data'))
data_gae_proxy_path = os.path.join(data_path, 'gae_proxy')
python_path = os.path.abspath( os.path.join(root_path, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform.startswith("linux"):
linux_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(linux_lib)
elif sys.platform == "darwin":
darwin_lib = os.path.abspath( os.path.join(python_path, 'lib', 'darwin'))
sys.path.append(darwin_lib)
extra_lib = "/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python"
sys.path.append(extra_lib)
import time
import traceback
import platform
import threading
import urllib2
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
work_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(work_path)
def create_data_path():
if not os.path.isdir(data_path):
os.mkdir(data_path)
if not os.path.isdir(data_gae_proxy_path):
os.mkdir(data_gae_proxy_path)
create_data_path()
from config import config
from xlog import getLogger
xlog = getLogger("gae_proxy")
xlog.set_buffer(2000)
if config.log_file:
log_file = os.path.join(data_gae_proxy_path, "local.log")
xlog.set_file(log_file)
from cert_util import CertUtil
import pac_server
import simple_http_server
import proxy_handler
import connect_control
import env_info
import connect_manager
from gae_handler import spawn_later
# launcher/module_init will check this value for start/stop finished
ready = False
def pre_start():
if config.PAC_ENABLE:
pac_ip = config.PAC_IP
url = 'http://%s:%d/%s' % (pac_ip, config.PAC_PORT, config.PAC_FILE)
spawn_later(600, urllib2.build_opener(urllib2.ProxyHandler({})).open, url)
def log_info():
xlog.info('------------------------------------------------------')
xlog.info('Python Version : %s', platform.python_version())
xlog.info('OS : %s', env_info.os_detail())
xlog.info('Listen Address : %s:%d', config.LISTEN_IP, config.LISTEN_PORT)
if config.PROXY_ENABLE:
xlog.info('%s Proxy : %s:%s', config.PROXY_TYPE, config.PROXY_HOST, config.PROXY_PORT)
xlog.info('GAE APPID : %s', '|'.join(config.GAE_APPIDS))
if config.PAC_ENABLE:
xlog.info('Pac Server : http://%s:%d/%s', config.PAC_IP, config.PAC_PORT, config.PAC_FILE)
#info += 'Pac File : file://%s\n' % os.path.join(self.DATA_PATH, self.PAC_FILE)
xlog.info('------------------------------------------------------')
def main():
global ready
connect_control.keep_running = True
config.load()
connect_manager.https_manager.load_config()
xlog.debug("## GAEProxy set keep_running: %s", connect_control.keep_running)
# to profile gae_proxy, run proxy.py, visit some web by proxy, then visit http://127.0.0.1:8084/quit to quit and print result.
do_profile = False
if do_profile:
import cProfile, pstats
pr = cProfile.Profile()
pr.enable()
global __file__
__file__ = os.path.abspath(__file__)
if os.path.islink(__file__):
__file__ = getattr(os, 'readlink', lambda x: x)(__file__)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#xlog.basicConfig(level=xlog.DEBUG if config.LISTEN_DEBUGINFO else xlog.INFO, format='%(levelname)s - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
pre_start()
log_info()
CertUtil.init_ca()
proxy_daemon = simple_http_server.HTTPServer((config.LISTEN_IP, config.LISTEN_PORT), proxy_handler.GAEProxyHandler)
proxy_thread = threading.Thread(target=proxy_daemon.serve_forever)
proxy_thread.setDaemon(True)
proxy_thread.start()
if config.PAC_ENABLE:
pac_daemon = simple_http_server.HTTPServer((config.PAC_IP, config.PAC_PORT), pac_server.PACServerHandler)
pac_thread = threading.Thread(target=pac_daemon.serve_forever)
pac_thread.setDaemon(True)
pac_thread.start()
ready = True # checked by launcher.module_init
while connect_control.keep_running:
time.sleep(1)
xlog.info("Exiting gae_proxy module...")
proxy_daemon.shutdown()
proxy_daemon.server_close()
proxy_thread.join()
if config.PAC_ENABLE:
pac_daemon.shutdown()
pac_daemon.server_close()
pac_thread.join()
ready = False # checked by launcher.module_init
xlog.debug("## GAEProxy set keep_running: %s", connect_control.keep_running)
if do_profile:
pr.disable()
pr.print_stats()
# called by launcher/module/stop
def terminate():
xlog.info("start to terminate GAE_Proxy")
connect_control.keep_running = False
xlog.debug("## Set keep_running: %s", connect_control.keep_running)
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
terminate()
sys.exit()
|
viger/docker
|
proxy/proxy/code/default/gae_proxy/local/proxy.py
|
Python
|
mit
| 6,968
|
[
"VisIt"
] |
72653eeb5f074bd94cd83b0db26a6b6c8d78cf11e702bcfb6403cde3e04d826a
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import os
import numpy as np
from rmgpy import getPath
from rmgpy.qm.main import QMCalculator
from rmgpy.molecule import Molecule
from rmgpy.qm.gaussian import GaussianMolPM3, GaussianMolPM6
gaussEnv = os.getenv('GAUSS_EXEDIR') or os.getenv('g09root') or os.getenv('g03root') or ""
# GAUSS_EXEDIR may be a list like "path1:path2:path3"
for possibleDir in gaussEnv.split(':'):
if os.path.exists(os.path.join(possibleDir , 'g09')):
executablePath = os.path.join(possibleDir , 'g09')
break
elif os.path.exists(os.path.join(possibleDir , 'g03')):
executablePath = os.path.join(possibleDir , 'g03')
break
else:
executablePath = os.path.join(gaussEnv , '(g03 or g09)')
qm = QMCalculator()
qm.settings.software = 'gaussian'
RMGpy_path = os.path.normpath(os.path.join(getPath(),'..'))
qm.settings.fileStore = os.path.join(RMGpy_path, 'testing', 'qm', 'QMfiles')
qm.settings.scratchDirectory = None
qm.settings.onlyCyclics = False
qm.settings.maxRadicalNumber = 0
mol1 = Molecule().fromSMILES('C1=CC=C2C=CC=CC2=C1')
class TestGaussianMolPM3(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = GaussianMolPM3(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertEqual(result.molecularMass.value, 128.173)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 1) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM3 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
self.assertAlmostEqual(result.energy.value_si, 169708.01906637018, 1)
if result.molecularMass.units=='amu':
self.assertEqual(result.molecularMass.value, 128.173)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 1) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # to 1 decimal place
class TestGaussianMolPM6(unittest.TestCase):
"""
Contains unit tests for the Geometry class.
"""
@unittest.skipIf(os.path.exists(executablePath)==False, "Gaussian not found. Try resetting your environment variables if you want to use it.")
def setUp(self):
"""
A function run before each unit test in this class.
"""
if not os.path.exists(qm.settings.fileStore):
os.makedirs(qm.settings.fileStore)
self.qmmol1 = GaussianMolPM6(mol1, qm.settings)
def testGenerateThermoData(self):
"""
Test that generateThermoData() works correctly.
"""
try:
fileList = os.listdir(self.qmmol1.settings.fileStore)
for fileName in fileList:
os.remove(os.path.join(self.qmmol1.settings.fileStore, fileName))
except OSError:
pass
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
if result.molecularMass.units=='amu':
self.assertEqual(result.molecularMass.value, 128.173)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 1) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # to 1 decimal place
def testLoadThermoData(self):
"""
Test that generateThermoData() can load thermo from a previous run.
Check that it loaded, and the values are the same as above.
"""
self.qmmol1.generateThermoData()
result = self.qmmol1.qmData
self.assertTrue(self.qmmol1.thermo.comment.startswith('QM GaussianMolPM6 calculation'))
self.assertEqual(result.numberOfAtoms, 18)
self.assertIsInstance(result.atomicNumbers, np.ndarray)
self.assertAlmostEqual(result.energy.value_si, 169708.01906637018, 1)
if result.molecularMass.units=='amu':
self.assertEqual(result.molecularMass.value, 128.173)
self.assertAlmostEqual(self.qmmol1.thermo.H298.value_si, 169708.0608, 1) # to 1 decimal place
self.assertAlmostEqual(self.qmmol1.thermo.S298.value_si, 334.5007584, 1) # to 1 decimal place
################################################################################
if __name__ == '__main__':
unittest.main( testRunner = unittest.TextTestRunner(verbosity=2) )
|
faribas/RMG-Py
|
rmgpy/qm/gaussianTest.py
|
Python
|
mit
| 5,473
|
[
"Gaussian"
] |
8bbc3dd84a21449c9deb42ad119dd08ff87eda281a83dae1d366df437287674f
|
#
# Copyright 2014 CIRAD
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import optparse, os, shutil, subprocess, sys, tempfile, fileinput, ConfigParser, operator, time, random
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
def cree_chrom(FILE, OUT):
record_dict = SeqIO.index(FILE, "fasta")
outfile = open(OUT, 'wb')
liste = []
for n in record_dict:
liste.append(n)
liste.sort()
for n in liste:
outfile.write('\t'.join([n, str(len(str(record_dict[n].seq)))])+'\n')
outfile.close()
def __main__():
#Parse Command Line
parser = optparse.OptionParser(usage="python %prog [options]\n\nProgram designed by Guillaume MARTIN : guillaume.martin@cirad.fr\n\n"
"This script generate a configuration file that will be used in the ApMap pipeline")
# Wrapper options.
parser.add_option( '', '--tool', dest='tool', default='bowtie2_single', help='The tool used : bowtie, bowtie2, bowtie2_single, bwa, bwa_mem, [default: %default]')
parser.add_option( '', '--ref', dest='ref', default='not_filled', help='The multifasta reference file')
parser.add_option( '', '--q1', dest='q1', default='not_filled', help='The mate1 fastq file')
parser.add_option( '', '--q2', dest='q2', default='not_filled', help='The mate2 fastq file')
parser.add_option( '', '--orient', dest='orient', default='rf', help='The expected orientation: rf or fr, [default: %default]')
parser.add_option( '', '--mini', dest='mini', default='2500', help='The minimum insert size (integer), [default: %default]')
parser.add_option( '', '--maxi', dest='maxi', default='7500', help='The maximum insert size (integer), [default: %default]')
parser.add_option( '', '--qual', dest='qual', default='33', help='Fastq quality encoding: 33 or 64, [default: %default]')
parser.add_option( '', '--index', dest='index', default='y', help='Build reference index : y or n, [default: %default]')
parser.add_option( '', '--rmindex', dest='rmindex', default='y', help='Remove reference index at the end of calculation: y or n, [default: %default]')
parser.add_option( '', '--filter_multi', dest='filter_multi', default='y', help='Filter reads with multiple locations : y or n, [default: %default]')
parser.add_option( '', '--mini_dis', dest='mini_dis', default='10000', help='The minimal insert size to keep the discordant read for structural variation search (integer), [default: %default]')
parser.add_option( '', '--mult_max_cov', dest='mult_max_cov', default='10', help='multiplicator of median coverage for maximal median coverage to keep a zone (float), [default: %default]')
parser.add_option( '', '--mult_min_cov', dest='mult_min_cov', default='0.25', help='multiplicator of median coverage for minimal median coverage to keep a zone (float), [default: %default]')
parser.add_option( '', '--min_zone', dest='min_zone', default='500', help='Minimal number of covered sites in a zone to be considered (integer), [default: %default]')
parser.add_option( '', '--min_gap', dest='min_gap', default='300', help='Maximal number of contiguous uncovered sites in a zone to be considered as a single zone (integer), [default: %default]')
parser.add_option( '', '--thread', dest='thread', default='1', help='The thread number used for mapping (integer), [default: %default]')
parser.add_option( '', '--msd', dest='msd', default='3', help='Multiplicator of standard deviation for discordant zone identification (integer), [default: %default]')
parser.add_option( '', '--max_dist_merge', dest='max_dist_merge', default=1000, help='Maximal distance between two discordant zone to merge, [default: %default]')
parser.add_option( '', '--YiS', dest='YiS', default=0, help='The Y-intercept of the linear function for zone size that will give the first component of product giving the score (integer), [default: %default]')
parser.add_option( '', '--MiS', dest='MiS', default=0.5, help='Multiplicator of median insert size for calculating minimal zone size for which the first component of product giving the score will be maximal (integer), [default: %default]. Exmple: if 0.5, discordant zone of more than 2500 pb will have a maximal score')
parser.add_option( '', '--YiC', dest='YiC', default=0, help='The Y-intercept of the linear function for coverage that will give the second component of product giving the score (integer), [default: %default]')
parser.add_option( '', '--MiC', dest='MiC', default=0.25, help='Multiplicator of median coverage for calculating minimal zone coverage for which the second component of product giving the score will be maximal (integer), [default: %default]. For homozygous SV in diploid: expected value = 0.5, if heterozygous: expected value = 0.25')
parser.add_option( '', '--min_score', dest='min_score', default=70, help='The minimal score for a discordant zone to be identified as passed, [default: %default]')
parser.add_option( '', '--ploid', dest='ploid', default=0.33, help='Multiplicator for coverage variation detection in SV identification (ex : If homozygous duplication expected in diploid: expected = coverage + coverage*1, if heterozygous duplication expected in diploid => expected = coverage + coverage*0.5). Choose a value lower than the expected one')
parser.add_option( '', '--restimate', dest='restimate', default='n', help='Wether re-estimating --mini and --maxi parameters: y or n, [default: %default]. If y, these parameters are calculated as followed on well mapped paired read on the basis of previous min and max parameters: min/max = median -/+ (standard_deviation * "--msd" option)')
parser.add_option( '', '--output', dest='output', default='config.conf', help='The output of the conf file, [default: %default]')
parser.add_option( '', '--chr', dest='chr', default='chr.tab', help='Output file containing chromosomes informations, [default: %default]')
parser.add_option( '', '--rm_intermediate', dest='rm_intermediate', default='n', help='remove intermediate bam/sam, [default: %default]')
parser.add_option( '', '--exclude_chrom', dest='exclude_chrom', default='no_exclude', help='Exclude chromosomes from analysis. "no_exclude" or chromosomes names separated by "=", [default: %default]')
(options, args) = parser.parse_args()
cree_chrom(options.ref, options.chr)
# print options.ref
# print options.chr
# print options.q1
# print options.q2
# print options.chr
# print options.output
config = ConfigParser.RawConfigParser()
config.add_section('General')
config.set('General','ref', options.ref)
config.set('General','chr', options.chr)
config.set('General','mini', options.mini)
config.set('General','maxi', options.maxi)
config.set('General','thread', options.thread)
config.set('General','tool', options.tool)
config.set('General','q1', options.q1)
config.set('General','q2', options.q2)
config.set('General','qual', options.qual)
config.set('General','orient', options.orient)
config.set('General','index', options.index)
config.set('General','rmindex', options.rmindex)
config.set('General','sd_multiplicator', options.msd)
config.set('General','restimate', options.restimate)
config.set('General','mini_dis', options.mini_dis)
config.set('General','mult_max_cov', options.mult_max_cov)
config.set('General','mult_min_cov', options.mult_min_cov)
config.set('General','min_zone', options.min_zone)
config.set('General','min_gap', options.min_gap)
config.set('General','max_dist_merge', options.max_dist_merge)
config.set('General','YiS', options.YiS)
config.set('General','MiS', options.MiS)
config.set('General','YiC', options.YiC)
config.set('General','MiC', options.MiC)
config.set('General','min_score', options.min_score)
config.set('General','ploid', options.ploid)
config.set('General','fai_file', options.ref+'.fai')
config.set('General','exclude_chrom', options.exclude_chrom)
config.add_section('Mapping')
config.add_section('Single_filter')
config.set('Single_filter','rminput', options.rm_intermediate)
config.set('Single_filter','filter_multi', options.filter_multi)
config.add_section('Remove_dup')
config.set('Remove_dup','rminput', options.rm_intermediate)
config.add_section('Calc_coverage')
config.add_section('Trie_discord')
config.set('Trie_discord','rminput', options.rm_intermediate)
config.add_section('Score_discord')
config.add_section('Ident_discord')
with open(options.output, 'wb') as configfile:
config.write(configfile)
if __name__ == "__main__": __main__()
|
guiguimartin/scaffremodler
|
bin/1_create_conf.py
|
Python
|
gpl-3.0
| 9,324
|
[
"BWA",
"Bowtie"
] |
958cc2a1e5072ea50cf6eb4295c92f75d01cfa4811de8c79ff991b886fde86e9
|
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any
from django.core.management.base import BaseCommand
from django.db import ProgrammingError
from confirmation.models import generate_realm_creation_url
from zerver.models import Realm
class Command(BaseCommand):
help = """
Outputs a randomly generated, 1-time-use link for Organization creation.
Whoever visits the link can create a new organization on this server, regardless of whether
settings.OPEN_REALM_CREATION is enabled. The link would expire automatically after
settings.REALM_CREATION_LINK_VALIDITY_DAYS.
Usage: ./manage.py generate_realm_creation_link """
# Fix support for multi-line usage
def create_parser(self, *args: Any, **kwargs: Any) -> ArgumentParser:
parser = super().create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def handle(self, *args: Any, **options: Any) -> None:
try:
# first check if the db has been initalized
Realm.objects.first()
except ProgrammingError:
print("The Zulip database does not appear to exist. Have you run initialize-database?")
sys.exit(1)
url = generate_realm_creation_url(by_admin=True)
self.stdout.write(self.style.SUCCESS("Please visit the following "
"secure single-use link to register your "))
self.stdout.write(self.style.SUCCESS("new Zulip organization:\033[0m"))
self.stdout.write("")
self.stdout.write(self.style.SUCCESS(" \033[1;92m%s\033[0m" % (url,)))
self.stdout.write("")
|
jackrzhang/zulip
|
zerver/management/commands/generate_realm_creation_link.py
|
Python
|
apache-2.0
| 1,697
|
[
"VisIt"
] |
42ebedad890cf82ea7023439f28223f802171733ca4924ec4801556e89855073
|
'''
Inspired in part by http://code.alcidesfonseca.com/docs/rdflib/graph_merging.html
'''
import sys
from rdflib.graph import Graph
def main(argv):
# inputFileName1 = '/home/mikel/UPV-EHU/Eclipse_Workspace/MergeRDFGraphs-Galaxy/data/vc-db-3.rdf'
# inputFileName2 = '/home/mikel/UPV-EHU/Eclipse_Workspace/MergeRDFGraphs-Galaxy/data/vc-db-4.rdf'
store = Graph()
for inputFileName in argv:
store.parse(inputFileName)
print store.serialize()
if __name__ == "__main__":
main(sys.argv[1:])
|
mikel-egana-aranguren/MergeRDFGraphs-Galaxy
|
src/mergerdfgraphs/MergeRDFGraphs.py
|
Python
|
gpl-3.0
| 526
|
[
"Galaxy"
] |
eb6f9c081af11dcb3cbe92fc339f410d14714600cc25bc80bdb3fff9dc710f36
|
import os
import sys
from tarfile import is_tarfile
from zipfile import is_zipfile
from ase.atoms import Atoms
from ase.units import Bohr, Hartree
from ase.io.trajectory import PickleTrajectory
from ase.io.bundletrajectory import BundleTrajectory
from ase.io.netcdftrajectory import NetCDFTrajectory
from ase.calculators.singlepoint import SinglePointDFTCalculator
from ase.calculators.singlepoint import SinglePointKPoint
__all__ = ['read', 'write', 'PickleTrajectory', 'BundleTrajectory',
'NetCDFTrajectory']
def read(filename, index=None, format=None):
"""Read Atoms object(s) from file.
filename: str
Name of the file to read from.
index: int or slice
If the file contains several configurations, the last configuration
will be returned by default. Use index=n to get configuration
number n (counting from zero).
format: str
Used to specify the file-format. If not given, the
file-format will be guessed by the *filetype* function.
Known formats:
========================= =============
format short name
========================= =============
GPAW restart-file gpw
Dacapo netCDF output file dacapo
Old ASE netCDF trajectory nc
Virtual Nano Lab file vnl
ASE pickle trajectory traj
ASE bundle trajectory bundle
GPAW text output gpaw-text
CUBE file cube
XCrySDen Structure File xsf
Dacapo text output dacapo-text
XYZ-file xyz
VASP POSCAR/CONTCAR file vasp
VASP OUTCAR file vasp_out
SIESTA STRUCT file struct_out
ABINIT input file abinit
V_Sim ascii file v_sim
Protein Data Bank pdb
CIF-file cif
FHI-aims geometry file aims
FHI-aims output file aims_out
VTK XML Image Data vti
VTK XML Structured Grid vts
VTK XML Unstructured Grid vtu
TURBOMOLE coord file tmol
TURBOMOLE gradient file tmol-gradient
exciting input exi
AtomEye configuration cfg
WIEN2k structure file struct
DftbPlus input file dftb
CASTEP geom file cell
CASTEP output file castep
CASTEP trajectory file geom
ETSF format etsf.nc
DFTBPlus GEN format gen
CMR db/cmr-file db
CMR db/cmr-file cmr
LAMMPS dump file lammps
EON reactant.con file eon
Gromacs coordinates gro
Gaussian com (input) file gaussian
Gaussian output file gaussian_out
Quantum espresso in file esp_in
Quantum espresso out file esp_out
Extended XYZ file extxyz
NWChem input file nw
========================= =============
"""
if isinstance(filename, str) and (
'.json@' in filename or
'.db@' in filename or
filename.startswith('pg://') and '@' in filename):
filename, index = filename.rsplit('@', 1)
if index.isdigit():
index = int(index)
else:
if isinstance(filename, str):
p = filename.rfind('@')
if p != -1:
try:
index = string2index(filename[p + 1:])
except ValueError:
pass
else:
filename = filename[:p]
if isinstance(index, str):
index = string2index(index)
if format is None:
format = filetype(filename)
if format.startswith('gpw'):
import gpaw
r = gpaw.io.open(filename, 'r')
positions = r.get('CartesianPositions') * Bohr
numbers = r.get('AtomicNumbers')
cell = r.get('UnitCell') * Bohr
pbc = r.get('BoundaryConditions')
tags = r.get('Tags')
magmoms = r.get('MagneticMoments')
energy = r.get('PotentialEnergy') * Hartree
if r.has_array('CartesianForces'):
forces = r.get('CartesianForces') * Hartree / Bohr
else:
forces = None
atoms = Atoms(positions=positions,
numbers=numbers,
cell=cell,
pbc=pbc)
if tags.any():
atoms.set_tags(tags)
if magmoms.any():
atoms.set_initial_magnetic_moments(magmoms)
else:
magmoms = None
atoms.calc = SinglePointDFTCalculator(atoms, energy=energy,
forces=forces, magmoms=magmoms)
kpts = []
if r.has_array('IBZKPoints'):
for w, kpt, eps_n, f_n in zip(r.get('IBZKPointWeights'),
r.get('IBZKPoints'),
r.get('Eigenvalues'),
r.get('OccupationNumbers')):
kpts.append(SinglePointKPoint(w, kpt[0], kpt[1],
eps_n[0], f_n[0]))
atoms.calc.kpts = kpts
return atoms
if format in ['json', 'db', 'postgresql']:
from ase.db.core import connect, dict2atoms
if index == slice(None, None):
index = None
images = [dict2atoms(d)
for d in connect(filename, format).select(index)]
if len(images) == 1:
return images[0]
else:
return images
if index is None:
index = -1
if format == 'castep':
from ase.io.castep import read_castep
return read_castep(filename, index)
if format == 'castep_cell':
import ase.io.castep
return ase.io.castep.read_cell(filename, index)
if format == 'castep_geom':
import ase.io.castep
return ase.io.castep.read_geom(filename, index)
if format == 'exi':
from ase.io.exciting import read_exciting
return read_exciting(filename, index)
if format == 'qxyz':
from ase.io.xyz import read_xyz_quicker
return read_xyz_quicker(filename, index)
if format in ['xyz', 'extxyz']:
from ase.io.extxyz import read_xyz
return read_xyz(filename, index)
if format == 'traj':
from ase.io.trajectory import read_trajectory
return read_trajectory(filename, index)
if format == 'bundle':
from ase.io.bundletrajectory import read_bundletrajectory
return read_bundletrajectory(filename, index)
if format == 'cube':
from ase.io.cube import read_cube
return read_cube(filename, index)
if format == 'nc':
from ase.io.netcdf import read_netcdf
return read_netcdf(filename, index)
if format == 'gpaw-text':
from ase.io.gpawtext import read_gpaw_text
return read_gpaw_text(filename, index)
if format == 'dacapo-text':
from ase.io.dacapo import read_dacapo_text
return read_dacapo_text(filename)
if format == 'dacapo':
from ase.io.dacapo import read_dacapo
return read_dacapo(filename)
if format == 'xsf':
from ase.io.xsf import read_xsf
return read_xsf(filename, index)
if format == 'vasp':
from ase.io.vasp import read_vasp
return read_vasp(filename)
if format == 'vasp_out':
from ase.io.vasp import read_vasp_out
return read_vasp_out(filename, index)
if format == 'abinit':
from ase.io.abinit import read_abinit
return read_abinit(filename)
if format == 'v_sim':
from ase.io.v_sim import read_v_sim
return read_v_sim(filename)
if format == 'mol':
from ase.io.mol import read_mol
return read_mol(filename)
if format == 'pdb':
from ase.io.pdb import read_pdb
return read_pdb(filename, index)
if format == 'cif':
from ase.io.cif import read_cif
return read_cif(filename, index)
if format == 'struct':
from ase.io.wien2k import read_struct
return read_struct(filename)
if format == 'struct_out':
from ase.io.siesta import read_struct
return read_struct(filename)
if format == 'vti':
from ase.io.vtkxml import read_vti
return read_vti(filename)
if format == 'vts':
from ase.io.vtkxml import read_vts
return read_vts(filename)
if format == 'vtu':
from ase.io.vtkxml import read_vtu
return read_vtu(filename)
if format == 'aims':
from ase.io.aims import read_aims
return read_aims(filename)
if format == 'aims_out':
from ase.io.aims import read_aims_output
return read_aims_output(filename, index)
if format == 'iwm':
from ase.io.iwm import read_iwm
return read_iwm(filename)
if format == 'Cmdft':
from ase.io.cmdft import read_I_info
return read_I_info(filename)
if format == 'tmol':
from ase.io.turbomole import read_turbomole
return read_turbomole(filename)
if format == 'tmol-gradient':
from ase.io.turbomole import read_turbomole_gradient
return read_turbomole_gradient(filename)
if format == 'cfg':
from ase.io.cfg import read_cfg
return read_cfg(filename)
if format == 'dftb':
from ase.io.dftb import read_dftb
return read_dftb(filename)
if format == 'sdf':
from ase.io.sdf import read_sdf
return read_sdf(filename)
if format == 'etsf':
from ase.io.etsf import ETSFReader
return ETSFReader(filename).read_atoms()
if format == 'gen':
from ase.io.gen import read_gen
return read_gen(filename)
if format == 'cmr':
from ase.io.cmr_io import read_db
return read_db(filename, index)
if format == 'lammps':
from ase.io.lammpsrun import read_lammps_dump
return read_lammps_dump(filename, index)
if format == 'eon':
from ase.io.eon import read_reactant_con
return read_reactant_con(filename)
if format == 'gromacs':
from ase.io.gromacs import read_gromacs
return read_gromacs(filename)
if format == 'gaussian':
from ase.io.gaussian import read_gaussian
return read_gaussian(filename)
if format == 'gaussian_out':
from ase.io.gaussian import read_gaussian_out
return read_gaussian_out(filename, index)
if format == 'esp_in':
from ase.io.espresso import read_espresso_in
return read_espresso_in(filename)
if format == 'esp_out':
from ase.io.espresso import read_espresso_out
return read_espresso_out(filename, index)
if format == 'nw':
from ase.io.nwchem import read_nwchem_input
return read_nwchem_input(filename)
raise RuntimeError('File format descriptor ' + format + ' not recognized!')
def write(filename, images, format=None, **kwargs):
"""Write Atoms object(s) to file.
filename: str
Name of the file to write to.
images: Atoms object or list of Atoms objects
A single Atoms object or a list of Atoms objects.
format: str
Used to specify the file-format. If not given, the
file-format will be taken from suffix of the filename.
The accepted output formats:
========================= ===========
format short name
========================= ===========
ASE pickle trajectory traj
ASE bundle trajectory bundle
CUBE file cube
XYZ-file xyz
VASP POSCAR/CONTCAR file vasp
ABINIT input file abinit
Protein Data Bank pdb
CIF-file cif
XCrySDen Structure File xsf
FHI-aims geometry file aims
gOpenMol .plt file plt
Python script py
Encapsulated Postscript eps
Portable Network Graphics png
Persistance of Vision pov
VTK XML Image Data vti
VTK XML Structured Grid vts
VTK XML Unstructured Grid vtu
TURBOMOLE coord file tmol
exciting exi
AtomEye configuration cfg
WIEN2k structure file struct
CASTEP cell file cell
DftbPlus input file dftb
ETSF etsf.nc
DFTBPlus GEN format gen
CMR db/cmr-file db
CMR db/cmr-file cmr
EON reactant.con file eon
Gromacs coordinates gro
GROMOS96 (only positions) g96
X3D x3d
X3DOM HTML html
Extended XYZ file extxyz
========================= ===========
The use of additional keywords is format specific.
The ``cube`` and ``plt`` formats accept (plt requires it) a ``data``
keyword, which can be used to write a 3D array to the file along
with the nuclei coordinates.
The ``vti``, ``vts`` and ``vtu`` formats are all specifically directed
for use with MayaVi, and the latter is designated for visualization of
the atoms whereas the two others are intended for volume data. Further,
it should be noted that the ``vti`` format is intended for orthogonal
unit cells as only the grid-spacing is stored, whereas the ``vts`` format
additionally stores the coordinates of each grid point, thus making it
useful for volume date in more general unit cells.
The ``eps``, ``png``, and ``pov`` formats are all graphics formats,
and accept the additional keywords:
rotation: str (default '')
The rotation angles, e.g. '45x,70y,90z'.
show_unit_cell: int (default 0)
Can be 0, 1, 2 to either not show, show, or show all of the unit cell.
radii: array or float (default 1.0)
An array of same length as the list of atoms indicating the sphere radii.
A single float specifies a uniform scaling of the default covalent radii.
bbox: 4 floats (default None)
Set the bounding box to (xll, yll, xur, yur) (lower left, upper right).
colors: array (default None)
An array of same length as the list of atoms, indicating the rgb color
code for each atom. Default is the jmol_colors of ase/data/colors.
scale: int (default 20)
Number of pixels per Angstrom.
For the ``pov`` graphics format, ``scale`` should not be specified.
The elements of the color array can additionally be strings, or 4
and 5 vectors for named colors, rgb + filter, and rgb + filter + transmit
specification. This format accepts the additional keywords:
``run_povray``, ``display``, ``pause``, ``transparent``,
``canvas_width``, ``canvas_height``, ``camera_dist``,
``image_plane``, ``camera_type``, ``point_lights``,
``area_light``, ``background``, ``textures``, ``celllinewidth``,
``bondlinewidth``, ``bondatoms``
The ``xyz`` format accepts a comment string using the ``comment`` keyword:
comment: str (default '')
Optional comment written on the second line of the file.
"""
if format is None:
if filename == '-':
format = 'xyz'
filename = sys.stdout
elif 'POSCAR' in filename or 'CONTCAR' in filename:
format = 'vasp'
elif 'OUTCAR' in filename:
format = 'vasp_out'
elif filename.endswith('etsf.nc'):
format = 'etsf'
elif filename.lower().endswith('.con'):
format = 'eon'
elif os.path.basename(filename) == 'coord':
format = 'tmol'
else:
suffix = filename.split('.')[-1]
format = {'cell': 'castep_cell',
}.get(suffix, suffix) # XXX this does not make sense
# Maybe like this:
## format = {'traj': 'trajectory',
## 'nc': 'netcdf',
## 'exi': 'exciting',
## 'in': 'aims',
## 'tmol': 'turbomole',
## }.get(suffix, suffix)
if format in ['json', 'db']:
from ase.db import connect
connect(filename, format).write(images)
return
if format == 'castep_cell':
from ase.io.castep import write_cell
write_cell(filename, images, **kwargs)
return
if format == 'exi':
from ase.io.exciting import write_exciting
write_exciting(filename, images)
return
if format == 'cif':
from ase.io.cif import write_cif
write_cif(filename, images)
if format == 'xyz':
from ase.io.extxyz import write_xyz
write_xyz(filename, images, columns=['symbols', 'positions'],
write_info=False, **kwargs)
return
if format == 'extxyz':
from ase.io.extxyz import write_xyz
write_xyz(filename, images, **kwargs)
return
if format == 'gen':
from ase.io.gen import write_gen
write_gen(filename, images)
return
elif format == 'in':
format = 'aims'
elif format == 'tmol':
from ase.io.turbomole import write_turbomole
write_turbomole(filename, images)
return
elif format == 'dftb':
from ase.io.dftb import write_dftb
write_dftb(filename, images)
return
elif format == 'struct':
from ase.io.wien2k import write_struct
write_struct(filename, images, **kwargs)
return
elif format == 'findsym':
from ase.io.findsym import write_findsym
write_findsym(filename, images)
return
elif format == 'etsf':
from ase.io.etsf import ETSFWriter
writer = ETSFWriter(filename)
if not isinstance(images, (list, tuple)):
images = [images]
writer.write_atoms(images[0])
writer.close()
return
elif format == 'cmr':
from ase.io.cmr_io import write_db
return write_db(filename, images, **kwargs)
elif format == 'eon':
from ase.io.eon import write_reactant_con
write_reactant_con(filename, images)
return
elif format == 'gro':
from ase.io.gromacs import write_gromacs
write_gromacs(filename, images)
return
elif format == 'g96':
from ase.io.gromos import write_gromos
write_gromos(filename, images)
return
elif format == 'html':
from ase.io.x3d import write_html
write_html(filename, images)
return
format = {'traj': 'trajectory',
'nc': 'netcdf',
'bundle': 'bundletrajectory'
}.get(format, format)
name = 'write_' + format
if format in ['vti', 'vts', 'vtu']:
format = 'vtkxml'
if format is None:
format = filetype(filename)
try:
write = getattr(__import__('ase.io.%s' % format, {}, {}, [name]), name)
except ImportError:
raise TypeError('Unknown format: "%s".' % format)
write(filename, images, **kwargs)
def string2index(string):
if ':' not in string:
return int(string)
i = []
for s in string.split(':'):
if s == '':
i.append(None)
else:
i.append(int(s))
i += (3 - len(i)) * [None]
return slice(*i)
def filetype(filename):
"""Try to guess the type of the file."""
if os.path.isdir(filename):
# Potentially a BundleTrajectory
if BundleTrajectory.is_bundle(filename):
return 'bundle'
elif os.path.normpath(filename) == 'states':
return 'eon'
else:
raise IOError('Directory: ' + filename)
if filename.startswith('pg://'):
return 'postgresql'
fileobj = open(filename, 'rU')
s3 = fileobj.read(3)
if len(s3) == 0:
raise IOError('Empty file: ' + filename)
if s3.startswith('{"'):
return 'json'
if filename.endswith('.db'):
return 'db'
if filename.lower().endswith('.cmr'):
return 'cmr'
if is_tarfile(filename):
return 'gpw'
if s3 == 'CDF':
from ase.io.pupynere import NetCDFFile
nc = NetCDFFile(filename)
if 'number_of_dynamic_atoms' in nc.dimensions:
return 'dacapo'
history = nc.history
if history == 'GPAW restart file':
return 'gpw-nc'
if history == 'ASE trajectory':
return 'nc'
if history == 'Dacapo':
return 'dacapo'
if hasattr(nc, 'file_format') and nc.file_format.startswith('ETSF'):
return 'etsf'
raise IOError('Unknown netCDF file!')
if is_zipfile(filename):
return 'vnl'
fileobj.seek(0)
lines = fileobj.readlines(1000)
if lines[0].startswith('PickleTrajectory'):
return 'traj'
if (lines[1].startswith('OUTER LOOP:') or
filename.lower().endswith('.cube')):
return 'cube'
if ' ___ ___ ___ _ _ _ \n' in lines:
return 'gpaw-text'
if (' &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n'
in lines[:90]):
return 'dacapo-text'
for line in lines:
if line[0] != '#':
word = line.strip()
if word in ['ANIMSTEPS', 'CRYSTAL', 'SLAB', 'POLYMER', 'MOLECULE']:
return 'xsf'
filename_v = os.path.basename(filename)
if 'POSCAR' in filename_v or 'CONTCAR' in filename_v:
return 'vasp'
if 'OUTCAR' in filename_v:
return 'vasp_out'
if filename.lower().endswith('.exi'):
return 'exi'
if filename.lower().endswith('.mol'):
return 'mol'
if filename.lower().endswith('.pdb'):
return 'pdb'
if filename.lower().endswith('.cif'):
return 'cif'
if filename.lower().endswith('.struct'):
return 'struct'
if filename.lower().endswith('.struct_out'):
return 'struct_out'
fileobj.seek(0)
while True:
line = fileobj.readline()
if not line:
break
if 'Invoking FHI-aims ...' in line:
return 'aims_out'
if 'atom' in line:
data = line.split()
try:
Atoms(symbols=[data[4]],
positions=[[float(data[1]),
float(data[2]),
float(data[3])]])
return 'aims'
except:
pass
if filename.lower().endswith('.in'):
fileobj.seek(0)
while True:
line = fileobj.readline()
if not line:
break
if ('&system' in line) or ('&SYSTEM' in line):
return 'esp_in'
return 'aims'
if filename.lower().endswith('.cfg'):
return 'cfg'
if os.path.split(filename)[1] == 'atoms.dat':
return 'iwm'
if filename.endswith('I_info'):
return 'Cmdft'
if lines[0].startswith('$coord') or os.path.basename(filename) == 'coord':
return 'tmol'
if (lines[0].startswith('$grad') or
os.path.basename(filename) == 'gradient'):
return 'tmol-gradient'
if lines[0].startswith('Geometry'):
return 'dftb'
if filename.lower().endswith('.geom'):
return 'castep_geom'
if filename.lower().endswith('.castep'):
return 'castep'
if filename.lower().endswith('.cell'):
return 'castep_cell'
if s3 == '<?x':
from ase.io.vtkxml import probe_vtkxml
xmltype = probe_vtkxml(filename)
if xmltype == 'ImageData':
return 'vti'
elif xmltype == 'StructuredGrid':
return 'vts'
elif xmltype == 'UnstructuredGrid':
return 'vtu'
elif xmltype is not None:
raise IOError('Unknown VTK XML file!')
if filename.lower().endswith('.sdf'):
return 'sdf'
if filename.lower().endswith('.gen'):
return 'gen'
if filename.lower().endswith('.con'):
return 'eon'
if 'ITEM: TIMESTEP\n' in lines:
return 'lammps'
if filename.lower().endswith('.gro'):
return 'gromacs'
if filename.lower().endswith('.log'):
return 'gaussian_out'
if filename.lower().endswith('.com'):
return 'gaussian'
if filename.lower().endswith('.g96'):
return 'gromos'
if filename.lower().endswith('.out'):
return 'esp_out'
if filename.endswith('.nw'):
return 'nw'
return 'xyz'
|
PHOTOX/fuase
|
ase/ase/io/__init__.py
|
Python
|
gpl-2.0
| 24,476
|
[
"ABINIT",
"ASE",
"CASTEP",
"CRYSTAL",
"ESPResSo",
"FHI-aims",
"GPAW",
"GROMOS",
"Gaussian",
"Gromacs",
"LAMMPS",
"Mayavi",
"NWChem",
"NetCDF",
"Quantum ESPRESSO",
"SIESTA",
"TURBOMOLE",
"VASP",
"VTK",
"WIEN2k",
"exciting"
] |
147bc14f1d9f22da4186b5cac4c3b89debc158bdfd8996be7a3af7235d1c33c5
|
#
# ast_input_line.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.meta_model.ast_data_type import ASTDataType
from pynestml.meta_model.ast_input_type import ASTInputType
from pynestml.meta_model.ast_node import ASTNode
from pynestml.meta_model.ast_signal_type import ASTSignalType
class ASTInputLine(ASTNode):
"""
This class is used to store a declaration of an input line.
ASTInputLine represents a single line form the input, e.g.:
spikeBuffer <- inhibitory excitatory spike
@attribute sizeParameter Optional parameter representing multisynapse neuron.
@attribute sizeParameter Type of the inputchannel: e.g. inhibitory or excitatory (or both).
@attribute spike true iff the neuron is a spike.
@attribute current true iff. the neuron is a current.
Grammar:
inputLine :
name=NAME
('[' sizeParameter=NAME ']')?
(datatype)?
'<-' inputType*
(is_current = 'current' | is_spike = 'spike');
Attributes:
name = None
size_parameter = None
data_type = None
input_types = None
signal_type = None
"""
def __init__(self, name=None, size_parameter=None, data_type=None, input_types=list(), signal_type=None,
source_position=None):
"""
Standard constructor.
:param name: the name of the buffer
:type name: str
:param size_parameter: a parameter indicating the index in an array.
:type size_parameter: str
:param data_type: the data type of this buffer
:type data_type: ASTDataType
:param input_types: a list of input types specifying the buffer.
:type input_types: list(ASTInputType)
:param signal_type: type of signal received, i.e., spikes or currents
:type signal_type: SignalType
:param source_position: the position of this element in the source file.
:type source_position: ASTSourceLocation.
"""
assert (name is not None and isinstance(name, str)), \
'(PyNestML.AST.InputLine) No or wrong type of name provided (%s)!' % type(name)
assert (signal_type is not None and isinstance(signal_type, ASTSignalType)), \
'(PyNestML.AST.InputLine) No or wrong type of input signal type provided (%s)!' % type(signal_type)
assert (input_types is not None and isinstance(input_types, list)), \
'(PyNestML.AST.InputLine) No or wrong type of input types provided (%s)!' % type(input_types)
for typ in input_types:
assert (typ is not None and isinstance(typ, ASTInputType)), \
'(PyNestML.AST.InputLine) No or wrong type of input type provided (%s)!' % type(typ)
assert (size_parameter is None or isinstance(size_parameter, str)), \
'(PyNestML.AST.InputLine) Wrong type of index parameter provided (%s)!' % type(size_parameter)
assert (data_type is None or isinstance(data_type, ASTDataType)), \
'(PyNestML.AST.InputLine) Wrong type of data-type provided (%s)!' % type(data_type)
super(ASTInputLine, self).__init__(source_position)
self.signal_type = signal_type
self.input_types = input_types
self.size_parameter = size_parameter
self.name = name
self.data_type = data_type
return
def get_name(self):
"""
Returns the name of the declared buffer.
:return: the name.
:rtype: str
"""
return self.name
def has_index_parameter(self):
"""
Returns whether a index parameter has been defined.
:return: True if index has been used, otherwise False.
:rtype: bool
"""
return self.size_parameter is not None
def get_index_parameter(self):
"""
Returns the index parameter.
:return: the index parameter.
:rtype: str
"""
return self.size_parameter
def has_input_types(self):
"""
Returns whether input types have been defined.
:return: True, if at least one input type has been defined.
:rtype: bool
"""
return len(self.input_types) > 0
def get_input_types(self):
"""
Returns the list of input types.
:return: a list of input types.
:rtype: list(ASTInputType)
"""
return self.input_types
def is_spike(self):
"""
Returns whether this is a spike buffer or not.
:return: True if spike buffer, False else.
:rtype: bool
"""
return self.signal_type is ASTSignalType.SPIKE
def is_current(self):
"""
Returns whether this is a current buffer or not.
:return: True if current buffer, False else.
:rtype: bool
"""
return self.signal_type is ASTSignalType.CURRENT
def is_excitatory(self):
"""
Returns whether this buffer is excitatory or not. For this, it has to be marked explicitly by the
excitatory keyword or no keywords at all shall occur (implicitly all types).
:return: True if excitatory, False otherwise.
:rtype: bool
"""
if self.get_input_types() is not None and len(self.get_input_types()) == 0:
return True
for typE in self.get_input_types():
if typE.is_excitatory:
return True
return False
def is_inhibitory(self):
"""
Returns whether this buffer is inhibitory or not. For this, it has to be marked explicitly by the
inhibitory keyword or no keywords at all shall occur (implicitly all types).
:return: True if inhibitory, False otherwise.
:rtype: bool
"""
if self.get_input_types() is not None and len(self.get_input_types()) == 0:
return True
for typE in self.get_input_types():
if typE.is_inhibitory:
return True
return False
def has_datatype(self):
"""
Returns whether this buffer has a defined data type or not.
:return: True if it has a datatype, otherwise False.
:rtype: bool
"""
return self.data_type is not None and isinstance(self.data_type, ASTDataType)
def get_datatype(self):
"""
Returns the currently used data type of this buffer.
:return: a single data type object.
:rtype: ASTDataType
"""
return self.data_type
def equals(self, other):
"""
The equals method.
:param other: a different object.
:type other: object
:return: True if equal,otherwise False.
:rtype: bool
"""
if not isinstance(other, ASTInputLine):
return False
if self.get_name() != other.get_name():
return False
if self.has_index_parameter() + other.has_index_parameter() == 1:
return False
if (self.has_index_parameter() and other.has_index_parameter() and
self.get_input_types() != other.get_index_parameter()):
return False
if self.has_datatype() + other.has_datatype() == 1:
return False
if self.has_datatype() and other.has_datatype() and not self.get_datatype().equals(other.get_datatype()):
return False
if len(self.get_input_types()) != len(other.get_input_types()):
return False
my_input_types = self.get_input_types()
your_input_types = other.get_input_types()
for i in range(0, len(my_input_types)):
if not my_input_types[i].equals(your_input_types[i]):
return False
return self.is_spike() == other.is_spike() and self.is_current() == other.is_current()
|
kperun/nestml
|
pynestml/meta_model/ast_input_line.py
|
Python
|
gpl-2.0
| 8,453
|
[
"NEURON"
] |
66c1996925c9687c94b9cdad875ce08ddcb45bc8d13959352bac83b56d0abce0
|
#!/usr/bin/python
#
# Open SoundControl for Python
# Copyright (C) 2002 Daniel Holth, Clinton McChesney
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Daniel Holth <dholth@stetson.edu> or visit
# http://www.stetson.edu/~ProctoLogic/
#
# Changelog:
# 15 Nov. 2001:
# Removed dependency on Python 2.0 features.
# - dwh
# 13 Feb. 2002:
# Added a generic callback handler.
# - dwh
#
# Updated June 2007 by Hans Huebner (hans.huebner@gmail.com)
# Improved bundle support, API cleanup
import sys
import struct
import math
import string
import time
from Logger import log
def hexDump(bytes):
"""Useful utility; prints the string in hexadecimal"""
for i in range(len(bytes)):
sys.stdout.write("%2x " % (ord(bytes[i])))
if (i+1) % 8 == 0:
print repr(bytes[i-7:i+1])
if(len(bytes) % 8 != 0):
print string.rjust("", 11), repr(bytes[i-7:i+1])
class OSCMessage:
"""Builds typetagged OSC messages."""
def __init__(self, address='', msg=()):
self.address = address
self.typetags = ","
self.message = ""
if type(msg) in (str, int, float):
self.append(msg)
elif type(msg) in (list,tuple):
for m in msg:
if type(m) not in (str,int,float):
log("don't know how to encode message element " + str(m) + " " + str(type(m)))
return
self.append(m)
else:
log("don't know how to encode message " + str(m) + " " + str(type(m)))
return
def append(self, argument, typehint = None):
"""Appends data to the message,
updating the typetags based on
the argument's type.
If the argument is a blob (counted string)
pass in 'b' as typehint."""
if typehint == 'b':
binary = OSCBlob(argument)
else:
binary = OSCArgument(argument)
self.typetags = self.typetags + binary[0]
self.message = self.message + binary[1]
def getBinary(self):
"""Returns the binary message (so far) with typetags."""
address = OSCArgument(self.address)[1]
typetags = OSCArgument(self.typetags)[1]
return address + typetags + self.message
def __repr__(self):
return self.getBinary()
JAN_1970 = 2208988800L
SECS_TO_PICOS = 4294967296L
def abs_to_timestamp(abs):
""" since 1970 => since 1900 64b OSC """
sec_1970 = long(abs)
sec_1900 = sec_1970 + JAN_1970
sec_frac = float(abs - sec_1970)
picos = long(sec_frac * SECS_TO_PICOS)
total_picos = (abs + JAN_1970) * SECS_TO_PICOS
return struct.pack('!LL', sec_1900, picos)
class OSCBundle:
"""Builds OSC bundles"""
def __init__(self, when=None):
self.items = []
if when == None:
when = time.time()
self.when = when
def append(self, address, msg = None):
if isinstance(address, str):
self.items.append(OSCMessage(address, msg))
elif isinstance(address, OSCMessage):
# address really is an OSCMessage
self.items.append(address)
else:
raise Exception('invalid type of first argument to OSCBundle.append(), need address string or OSCMessage, not ', str(type(address)))
def getBinary(self):
retval = OSCArgument('#bundle')[1] + abs_to_timestamp(self.when)
for item in self.items:
binary = item.getBinary()
retval = retval + OSCArgument(len(binary))[1] + binary
return retval
def readString(data):
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def readBlob(data):
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def readInt(data):
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer."""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def readFloat(data):
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def OSCBlob(next):
"""Convert a string into an OSC Blob,
returning a (typetag, data) tuple."""
if type(next) == type(""):
length = len(next)
padded = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (padded), length, next)
tag = 'b'
else:
tag = ''
binary = ''
return (tag, binary)
def OSCArgument(next):
"""Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple."""
if type(next) == type(""):
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
binary = struct.pack(">%ds" % (OSCstringLength), next)
tag = "s"
elif type(next) == type(42.5):
binary = struct.pack(">f", next)
tag = "f"
elif type(next) == type(13):
binary = struct.pack(">i", next)
tag = "i"
else:
raise Exception("don't know how to encode " + str(next) + " as OSC argument, type=" + str(type(next)))
return (tag, binary)
def parseArgs(args):
"""Given a list of strings, produces a list
where those strings have been parsed (where
possible) as floats or integers."""
parsed = []
for arg in args:
print arg
arg = arg.strip()
interpretation = None
try:
interpretation = float(arg)
if string.find(arg, ".") == -1:
interpretation = int(interpretation)
except:
# Oh - it was a string.
interpretation = arg
parsed.append(interpretation)
return parsed
def decodeOSC(data):
"""Converts a typetagged OSC message to a Python list."""
table = {"i":readInt, "f":readFloat, "s":readString, "b":readBlob}
decoded = []
address, rest = readString(data)
typetags = ""
if address == "#bundle":
time, rest = readLong(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
typetags, rest = readString(rest)
decoded.append(address)
decoded.append(typetags)
if(typetags[0] == ","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
print "Oops, typetag lacks the magic ,"
else:
decoded.append(address)
decoded.append(',')
# return only the data
return decoded
class CallbackManager:
"""This utility class maps OSC addresses to callables.
The CallbackManager calls its callbacks with a list
of decoded OSC arguments, including the address and
the typetags as the first two arguments."""
def __init__(self):
self.callbacks = {}
self.add("#bundle", self.unbundler)
def handle(self, data, source):
"""Given OSC data, tries to call the callback with the right address."""
decoded = decodeOSC(data)
self.dispatch(decoded, source)
def dispatch(self, message, source):
"""Sends decoded OSC data to an appropriate calback"""
address = message[0]
self.callbacks[address](message, source)
def add(self, address, callback):
"""Adds a callback to our set of callbacks,
or removes the callback with name if callback
is None."""
if callback == None:
del self.callbacks[address]
else:
self.callbacks[address] = callback
def unbundler(self, messages, source):
"""Dispatch the messages in a decoded bundle."""
# first two elements are #bundle and the time tag, rest are messages.
for message in messages[2:]:
self.dispatch(message, source)
if __name__ == "__main__":
hexDump("Welcome to the OSC testing program.")
print
message = OSCMessage("/foo/play")
message.append(44)
message.append(11)
message.append(4.5)
message.append("the white cliffs of dover")
hexDump(message.getBinary())
print "Making and unmaking a message.."
strings = OSCMessage()
strings.append("Mary had a little lamb")
strings.append("its fleece was white as snow")
strings.append("and everywhere that Mary went,")
strings.append("the lamb was sure to go.")
strings.append(14.5)
strings.append(14.5)
strings.append(-400)
raw = strings.getBinary()
hexDump(raw)
print "Retrieving arguments..."
data = raw
for i in range(6):
text, data = readString(data)
print text
number, data = readFloat(data)
print number
number, data = readFloat(data)
print number
number, data = readInt(data)
print number
hexDump(raw)
print decodeOSC(raw)
print decodeOSC(message.getBinary())
print "Testing Blob types."
blob = OSCMessage()
blob.append("","b")
blob.append("b","b")
blob.append("bl","b")
blob.append("blo","b")
blob.append("blob","b")
blob.append("blobs","b")
blob.append(42)
hexDump(blob.getBinary())
print decodeOSC(blob.getBinary())
def printingCallback(stuff, source):
sys.stdout.write("Got: ")
for i in stuff:
sys.stdout.write(str(i) + " ")
sys.stdout.write("\n")
print "Testing bundles"
print1 = OSCMessage("/print")
print1.append("Hey man, that's cool.")
print1.append(42)
print1.append(3.1415926)
bundle = OSCBundle()
bundle.append(print1)
bundle.append('/foo', (123, 456))
bundlebinary = bundle.getBinary()
hexDump(bundlebinary)
print decodeOSC(bundlebinary)
print "Testing the callback manager."
c = CallbackManager()
c.add("/print", printingCallback)
c.handle(message.getBinary(), None)
c.handle(print1.getBinary(), None)
print "sending a bundle to the callback manager"
c.handle(bundlebinary, None)
|
avroshk/VRDAW
|
VRDAW_working/OSC.py
|
Python
|
gpl-3.0
| 11,400
|
[
"VisIt"
] |
1c5fca6b363f6322f9c4e815485888db3b1d72f6f151a5d739a22f01d4eeda1e
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyEspressopp(CMakePackage):
"""ESPResSo++ is an extensible, flexible, fast and parallel simulation
software for soft matter research. It is a highly versatile software
package for the scientific simulation and analysis of coarse-grained
atomistic or bead-spring models as they are used in soft matter research
"""
homepage = "https://espressopp.github.io"
url = "https://github.com/espressopp/espressopp/tarball/v1.9.4.1"
version('develop', git='https://github.com/espressopp/espressopp.git', branch='master')
version('1.9.5', '13a93c30b07132b5e5fa0d828aa17d79')
version('1.9.4.1', '0da74a6d4e1bfa6a2a24fca354245a4f')
version('1.9.4', 'f2a27993a83547ad014335006eea74ea')
variant('ug', default=False, description='Build user guide')
variant('pdf', default=False, description='Build user guide in pdf format')
variant('dg', default=False, description='Build developer guide')
depends_on("cmake@2.8:", type='build')
depends_on("mpi")
depends_on("boost+serialization+filesystem+system+python+mpi", when='@1.9.4:')
extends("python")
depends_on("python@2:2.8")
depends_on("py-mpi4py@2.0.0:", when='@1.9.4', type=('build', 'run'))
depends_on("py-mpi4py@1.3.1:", when='@1.9.4.1:', type=('build', 'run'))
depends_on("fftw")
depends_on("py-sphinx", when="+ug", type='build')
depends_on("py-sphinx", when="+pdf", type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', when="+ug", type='build')
depends_on('py-matplotlib', when="+pdf", type='build')
depends_on("texlive", when="+pdf", type='build')
depends_on("doxygen", when="+dg", type='build')
def cmake_args(self):
return [
'-DEXTERNAL_MPI4PY=ON',
'-DEXTERNAL_BOOST=ON',
'-DWITH_RC_FILES=OFF'
]
def build(self, spec, prefix):
with working_dir(self.build_directory):
make()
if '+ug' in spec:
make("ug", parallel=False)
if '+pdf' in spec:
make("ug-pdf", parallel=False)
if '+dg' in spec:
make("doc", parallel=False)
|
skosukhin/spack
|
var/spack/repos/builtin/packages/py-espressopp/package.py
|
Python
|
lgpl-2.1
| 3,452
|
[
"ESPResSo"
] |
01ea6339a0b46d0aa88e469ddac707d9d530a2e3eaa56001e500844c5d052e0a
|
""" Test case for DIRAC.Core.Utilities.File module
"""
##
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2011/01/17 14:01:18
# @brief Definition of FileTestCase class.
# imports
import os
from os.path import abspath
import re
import sys
from hypothesis import given
from hypothesis.strategies import floats
from pytest import mark
# sut
from DIRAC.Core.Utilities.File import (
checkGuid,
makeGuid,
getSize,
getMD5ForFiles,
convertSizeUnits,
SIZE_UNIT_CONVERSION,
)
parametrize = mark.parametrize
def testCheckGuid():
"""checkGuid tests"""
# empty string
guid = ""
assert checkGuid(guid) is False, "empty guid"
# wrong length in a 1st field
guid = "012345678-0123-0123-0123-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 1st field"
guid = "0123456-0123-0123-0123-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 1st field"
# wrong length in a 2nd field
guid = "01234567-01234-0123-0123-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 2nd field"
guid = "01234567-012-0123-0123-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 2nd field"
# wrong length in a 3rd field
guid = "01234567-0123-01234-0123-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 3rd field"
guid = "01234567-0123-012-0123-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 3rd field"
# wrong length in a 4th field
guid = "01234567-0123-0123-01234-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 4th field"
guid = "01234567-0123-0123-012-0123456789AB"
assert checkGuid(guid) is False, "wrong length in 4th field"
# wrong length in a 5th field
guid = "01234567-0123-0123-0123-0123-0123456789ABC"
assert checkGuid(guid) is False, "wrong length in 5th field"
guid = "01234567-0123-0123-0123-0123-0123456789A"
assert checkGuid(guid) is False, "wrong length in 5th field"
# small caps
guid = "01234567-9ABC-0DEF-0123-456789ABCDEF".lower()
assert checkGuid(guid) is True, "small caps in guid, zut!"
# wrong characters not in [0-9A-F]
guid = "NEEDMORE-SPAM-SPAM-SPAM-SPAMWITHEGGS"
assert checkGuid(guid) is True, "wrong set of characters, zut!"
# normal operation
guid = "01234567-9ABC-0DEF-0123-456789ABCDEF"
assert checkGuid(guid) is True, "proper GUID"
def testMakeGuid():
"""makeGuid tests"""
# no filename - fake guid produced
assert checkGuid(makeGuid()) is True, "fake guid for inexisting file"
# using this python file
assert checkGuid(makeGuid(abspath(__file__))) is True, "guid for FileTestCase.py file"
def testGetSize():
"""getSize tests"""
# non existing file
assert getSize("/spam/eggs/eggs") == -1, "inexisting file"
# file unreadable
assert getSize("/root/.login") == -1, "unreadable file"
def testGetMD5ForFiles():
"""getMD5ForFiles tests"""
filesList = [abspath(".") + os.sep + x for x in os.listdir(".")]
md5sum = getMD5ForFiles(filesList)
reMD5 = re.compile("^[0-9a-fA-F]+$")
assert reMD5.match(md5sum) is not None
# OK for python 2.7
# self.assertRegexpMatches( md5sum, reMD5, "regexp doesn't match" )
@given(nb=floats(allow_nan=False, allow_infinity=False, min_value=1))
def test_convert_to_bigger_unit_floats(nb):
"""Make sure that converting to bigger unit gets the number smaller .
Also tests that two steps are equal to two consecutive steps
"""
toKB = convertSizeUnits(nb, "B", "kB")
toMB = convertSizeUnits(nb, "B", "MB")
fromkBtoMB = convertSizeUnits(toKB, "kB", "MB")
assert toKB < nb
assert toMB < toKB
assert toMB == fromkBtoMB
def test_convert_error_to_maxint():
"""Make sure that on error we receive -sys.maxint"""
assert convertSizeUnits("size", "B", "kB") == -sys.maxsize
assert convertSizeUnits(0, "srcUnit", "kB") == -sys.maxsize
assert convertSizeUnits(0, "B", "dstUnit") == -sys.maxsize
@given(nb=floats(allow_nan=False, allow_infinity=False, min_value=1))
@parametrize("srcUnit", SIZE_UNIT_CONVERSION)
@parametrize("dstUnit", SIZE_UNIT_CONVERSION)
def test_convert_loop(nb, srcUnit, dstUnit):
"""Make sure that converting a size back and forth preserves the number"""
converted = convertSizeUnits(convertSizeUnits(nb, srcUnit, dstUnit), dstUnit, srcUnit)
# We exclude the infinity case
if converted != float("Inf"):
assert converted == nb
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/Utilities/test/Test_File.py
|
Python
|
gpl-3.0
| 4,498
|
[
"DIRAC"
] |
6ee0eb61402f177e37231a9781644e6cebfee22240b7d5e3a854834d7776cd6c
|
#! /usr/bin/env python
from MDAnalysis import *
import numpy
import math
import sys
my_traj = sys.argv[1]
my_struc = sys.argv[2]
u = Universe(my_struc,my_traj)
end = my_traj.find('.pdb')
fout_angle = my_traj[0:end] + '_angle.dat'
#a = u.selectAtoms("segid A and resid 78:182")
#b = u.selectAtoms("segid B and resid 91:190")
a = u.selectAtoms("segid A and resid 84:182")
b = u.selectAtoms("segid B and resid 95:190")
g = open(fout_angle,'w')
for ts in u.trajectory:
a_1,a_2,a_3 = a.principalAxes()
b_1,b_2,b_3 = b.principalAxes()
angle1 = math.degrees(math.acos(numpy.dot(a_1,b_1)))
angle2 = math.degrees(math.acos(numpy.dot(a_2,b_2)))
angle3 = math.degrees(math.acos(numpy.dot(a_3,b_3)))
if angle1 > 90:
angle1 = 180-angle1
if angle2 > 90:
angle2 = 180-angle2
if angle3 > 90:
angle3 = 180-angle3
g.write('%7.3f %7.3f %7.3f\n' % (angle1,angle2,angle3))
g.close()
|
demharters/git_scripts
|
angle_glob_mhcii.py
|
Python
|
apache-2.0
| 942
|
[
"MDAnalysis"
] |
be57022aad82f8c473159e0bf9edce21e7d26bc6014fe719a65be46f3c763139
|
#!/usr/bin/env python3
import sys
import time
import random
import os
import subprocess
import gzip
import io
import pickle
import argparse
import itertools
from distutils.version import LooseVersion
from distutils.spawn import find_executable
sys.path.insert(1,sys.path[0]+'/..')
try:
from .version import SeqSero2_version
except Exception: #ImportError
from version import SeqSero2_version
### SeqSero Kmer
def parse_args():
"Parse the input arguments, use '-h' for help."
parser = argparse.ArgumentParser(usage='SeqSero2_package.py -t <data_type> -m <mode> -i <input_data> [-d <output_directory>] [-p <number of threads>] [-b <BWA_algorithm>]\n\nDevelopper: Shaokang Zhang (zskzsk@uga.edu), Hendrik C Den-Bakker (Hendrik.DenBakker@uga.edu) and Xiangyu Deng (xdeng@uga.edu)\n\nContact email:seqsero@gmail.com\n\nVersion: v1.2.1')#add "-m <data_type>" in future
parser.add_argument("-i",nargs="+",help="<string>: path/to/input_data",type=os.path.abspath) ### add 'type=os.path.abspath' to generate absolute path of input data.
parser.add_argument("-t",choices=['1','2','3','4','5'],help="<int>: '1' for interleaved paired-end reads, '2' for separated paired-end reads, '3' for single reads, '4' for genome assembly, '5' for nanopore reads (fasta/fastq)")
parser.add_argument("-b",choices=['sam','mem'],default="mem",help="<string>: algorithms for bwa mapping for allele mode; 'mem' for mem, 'sam' for samse/sampe; default=mem; optional; for now we only optimized for default 'mem' mode")
parser.add_argument("-p",default="1",help="<int>: number of threads for allele mode, if p >4, only 4 threads will be used for assembly since the amount of extracted reads is small, default=1")
parser.add_argument("-m",choices=['k','a'],default="a",help="<string>: which workflow to apply, 'a'(raw reads allele micro-assembly), 'k'(raw reads and genome assembly k-mer), default=a")
parser.add_argument("-n",help="<string>: optional, to specify a sample name in the report output")
parser.add_argument("-d",help="<string>: optional, to specify an output directory name, if not set, the output directory would be 'SeqSero_result_'+time stamp+one random number")
parser.add_argument("-c",action="store_true",help="<flag>: if '-c' was flagged, SeqSero2 will only output serotype prediction without the directory containing log files")
parser.add_argument("-s",action="store_true",help="<flag>: if '-s' was flagged, SeqSero2 will not output header in SeqSero_result.tsv")
parser.add_argument("--phred_offset",choices=['33','64','auto'],default='auto',help="<33|64|auto>: offset for FASTQ file quality scores, default=auto")
parser.add_argument("--check",action="store_true",help="<flag>: use '--check' flag to check the required dependencies")
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + SeqSero2_version)
return parser.parse_args()
### check paths of dependencies
check_dependencies = parse_args().check
dependencies = ['bwa','samtools','blastn','fastq-dump','spades.py','bedtools','SalmID.py']
if check_dependencies:
for item in dependencies:
ext_path = find_executable(item)
if ext_path is not None:
print ("Using "+item+" - "+ext_path)
else:
print ("ERROR: can not find "+item+" in PATH")
sys.exit()
### end of --check
def reverse_complement(sequence):
complement = {
'A': 'T',
'C': 'G',
'G': 'C',
'T': 'A',
'N': 'N',
'M': 'K',
'R': 'Y',
'W': 'W',
'S': 'S',
'Y': 'R',
'K': 'M',
'V': 'B',
'H': 'D',
'D': 'H',
'B': 'V'
}
return "".join(complement[base] for base in reversed(sequence))
def createKmerDict_reads(list_of_strings, kmer):
kmer_table = {}
for string in list_of_strings:
sequence = string.strip('\n')
for i in range(len(sequence) - kmer + 1):
new_mer = sequence[i:i + kmer].upper()
new_mer_rc = reverse_complement(new_mer)
if new_mer in kmer_table:
kmer_table[new_mer.upper()] += 1
else:
kmer_table[new_mer.upper()] = 1
if new_mer_rc in kmer_table:
kmer_table[new_mer_rc.upper()] += 1
else:
kmer_table[new_mer_rc.upper()] = 1
return kmer_table
def multifasta_dict(multifasta):
multifasta_list = [
line.strip() for line in open(multifasta, 'r') if len(line.strip()) > 0
]
headers = [i for i in multifasta_list if i[0] == '>']
multifasta_dict = {}
for h in headers:
start = multifasta_list.index(h)
for element in multifasta_list[start + 1:]:
if element[0] == '>':
break
else:
if h[1:] in multifasta_dict:
multifasta_dict[h[1:]] += element
else:
multifasta_dict[h[1:]] = element
return multifasta_dict
def multifasta_single_string(multifasta):
multifasta_list = [
line.strip() for line in open(multifasta, 'r')
if (len(line.strip()) > 0) and (line.strip()[0] != '>')
]
return ''.join(multifasta_list)
def chunk_a_long_sequence(long_sequence, chunk_size=60):
chunk_list = []
steps = len(long_sequence) // 60 #how many chunks
for i in range(steps):
chunk_list.append(long_sequence[i * chunk_size:(i + 1) * chunk_size])
chunk_list.append(long_sequence[steps * chunk_size:len(long_sequence)])
return chunk_list
def target_multifasta_kmerizer(multifasta, k, kmerDict):
forward_length = 300 #if find the target, put forward 300 bases
reverse_length = 2200 #if find the target, put backward 2200 bases
chunk_size = 60 #it will firstly chunk the single long sequence to multiple smaller sequences, it controls the size of those smaller sequences
target_mers = []
long_single_string = multifasta_single_string(multifasta)
multifasta_list = chunk_a_long_sequence(long_single_string, chunk_size)
unit_length = len(multifasta_list[0])
forward_lines = int(forward_length / unit_length) + 1
reverse_lines = int(forward_length / unit_length) + 1
start_num = 0
end_num = 0
for i in range(len(multifasta_list)):
if i not in range(start_num, end_num): #avoid computational repetition
line = multifasta_list[i]
start = int((len(line) - k) // 2)
s1 = line[start:k + start]
if s1 in kmerDict: #detect it is a potential read or not (use the middle part)
if i - forward_lines >= 0:
start_num = i - forward_lines
else:
start_num = 0
if i + reverse_lines <= len(multifasta_list) - 1:
end_num = i + reverse_lines
else:
end_num = len(multifasta_list) - 1
target_list = [
x.strip() for x in multifasta_list[start_num:end_num]
]
target_line = "".join(target_list)
target_mers += [
k1 for k1 in createKmerDict_reads([str(target_line)], k)
] ##changed k to k1, just want to avoid the mixes of this "k" (kmer) to the "k" above (kmer length)
else:
pass
return set(target_mers)
def target_read_kmerizer(file, k, kmerDict):
i = 1
n_reads = 0
total_coverage = 0
target_mers = []
if file.endswith(".gz"):
file_content = io.BufferedReader(gzip.open(file))
else:
file_content = open(file, "r").readlines()
for line in file_content:
start = int((len(line) - k) // 2)
if i % 4 == 2:
if file.endswith(".gz"):
s1 = line[start:k + start].decode()
line = line.decode()
else:
s1 = line[start:k + start]
if s1 in kmerDict: #detect it is a potential read or not (use the middle part)
n_reads += 1
total_coverage += len(line)
target_mers += [
k1 for k1 in createKmerDict_reads([str(line)], k)
] #changed k to k1, just want to avoid the mixes of this "k" (kmer) to the "k" above (kmer length)
i += 1
if total_coverage >= 4000000:
break
return set(target_mers)
def minion_fasta_kmerizer(file, k, kmerDict):
i = 1
n_reads = 0
total_coverage = 0
target_mers = {}
for line in open(file):
if i % 2 == 0:
for kmer, rc_kmer in kmers(line.strip().upper(), k):
if (kmer in kmerDict) or (rc_kmer in kmerDict):
if kmer in target_mers:
target_mers[kmer] += 1
else:
target_mers[kmer] = 1
if rc_kmer in target_mers:
target_mers[rc_kmer] += 1
else:
target_mers[rc_kmer] = 1
i += 1
return set([h for h in target_mers])
def minion_fastq_kmerizer(file, k, kmerDict):
i = 1
n_reads = 0
total_coverage = 0
target_mers = {}
for line in open(file):
if i % 4 == 2:
for kmer, rc_kmer in kmers(line.strip().upper(), k):
if (kmer in kmerDict) or (rc_kmer in kmerDict):
if kmer in target_mers:
target_mers[kmer] += 1
else:
target_mers[kmer] = 1
if rc_kmer in target_mers:
target_mers[rc_kmer] += 1
else:
target_mers[rc_kmer] = 1
i += 1
return set([h for h in target_mers])
def multifasta_single_string2(multifasta):
single_string = ''
with open(multifasta, 'r') as f:
for line in f:
if line.strip()[0] == '>':
pass
else:
single_string += line.strip()
return single_string
def kmers(seq, k):
rev_comp = reverse_complement(seq)
for start in range(1, len(seq) - k + 1):
yield seq[start:start + k], rev_comp[-(start + k):-start]
def multifasta_to_kmers_dict(multifasta,k_size):#used to create database kmer set
multi_seq_dict = multifasta_dict(multifasta)
lib_dict = {}
for h in multi_seq_dict:
lib_dict[h] = set(
[k for k in createKmerDict_reads([multi_seq_dict[h]], k_size)])
return lib_dict
def Combine(b, c):
fliC_combinations = []
fliC_combinations.append(",".join(c))
temp_combinations = []
for i in range(len(b)):
for x in itertools.combinations(b, i + 1):
temp_combinations.append(",".join(x))
for x in temp_combinations:
temp = []
for y in c:
temp.append(y)
temp.append(x)
temp = ",".join(temp)
temp = temp.split(",")
temp.sort()
temp = ",".join(temp)
fliC_combinations.append(temp)
return fliC_combinations
def seqsero_from_formula_to_serotypes(Otype, fliC, fljB, special_gene_list,subspecies):
#like test_output_06012017.txt
#can add more varialbles like sdf-type, sub-species-type in future (we can conclude it into a special-gene-list)
from Initial_Conditions import phase1,phase2,phaseO,sero,subs,remove_list,rename_dict
rename_dict_not_anymore=[rename_dict[x] for x in rename_dict]
rename_dict_all=rename_dict_not_anymore+list(rename_dict) #used for decide whether to
seronames = []
seronames_none_subspecies=[]
for i in range(len(phase1)):
fliC_combine = []
fljB_combine = []
if phaseO[i] == Otype: # no VII in KW, but it's there
### for fliC, detect every possible combinations to avoid the effect of "["
if phase1[i].count("[") == 0:
fliC_combine.append(phase1[i])
elif phase1[i].count("[") >= 1:
c = []
b = []
if phase1[i][0] == "[" and phase1[i][-1] == "]" and phase1[i].count(
"[") == 1:
content = phase1[i].replace("[", "").replace("]", "")
fliC_combine.append(content)
fliC_combine.append("-")
else:
for x in phase1[i].split(","):
if "[" in x:
b.append(x.replace("[", "").replace("]", ""))
else:
c.append(x)
fliC_combine = Combine(
b, c
) #Combine will offer every possible combinations of the formula, like f,[g],t: f,t f,g,t
### end of fliC "[" detect
### for fljB, detect every possible combinations to avoid the effect of "["
if phase2[i].count("[") == 0:
fljB_combine.append(phase2[i])
elif phase2[i].count("[") >= 1:
d = []
e = []
if phase2[i][0] == "[" and phase2[i][-1] == "]" and phase2[i].count(
"[") == 1:
content = phase2[i].replace("[", "").replace("]", "")
fljB_combine.append(content)
fljB_combine.append("-")
else:
for x in phase2[i].split(","):
if "[" in x:
d.append(x.replace("[", "").replace("]", ""))
else:
e.append(x)
fljB_combine = Combine(d, e)
### end of fljB "[" detect
new_fliC = fliC.split(
","
) #because some antigen like r,[i] not follow alphabetical order, so use this one to judge and can avoid missings
new_fliC.sort()
new_fliC = ",".join(new_fliC)
new_fljB = fljB.split(",")
new_fljB.sort()
new_fljB = ",".join(new_fljB)
if (new_fliC in fliC_combine
or fliC in fliC_combine) and (new_fljB in fljB_combine
or fljB in fljB_combine):
######start, remove_list,rename_dict, added on 11/11/2018
if sero[i] not in remove_list:
temp_sero=sero[i]
if temp_sero in rename_dict:
temp_sero=rename_dict[temp_sero] #rename if in the rename list
if temp_sero not in seronames:#the new sero may already included, if yes, then not consider
if subs[i] == subspecies:
seronames.append(temp_sero)
seronames_none_subspecies.append(temp_sero)
else:
pass
else:
pass
######end, added on 11/11/2018
#analyze seronames
subspecies_pointer=""
if len(seronames) == 0 and len(seronames_none_subspecies)!=0:
## ed_SL_06062020: for the subspecies mismatch between KW and SalmID
seronames=seronames_none_subspecies
#seronames=["N/A"]
subspecies_pointer="1"
#subspecies_pointer="0"
##
if len(seronames) == 0:
seronames = [
"N/A (The predicted antigenic profile does not exist in the White-Kauffmann-Le Minor scheme)"
]
star = ""
star_line = ""
if len(seronames) > 1: #there are two possible predictions for serotypes
star = "*"
#changed 04072019
#star_line = "The predicted serotypes share the same general formula:\t" + Otype + ":" + fliC + ":" + fljB + "\n"
if subspecies_pointer=="1" and len(seronames_none_subspecies)!=0:
star="*"
star_line = "This antigenic profile has been associated with serotype '"+(" or ").join(seronames)+"' in the Kauffman-White scheme. The existence of the same antigenic formula in multiple species or subspecies is well documented in the Kauffman-White Scheme. " + star_line ## ed_SL_03202021: changed for new output format
#star_line="The predicted O and H antigens correspond to serotype '"+(" or ").join(seronames)+"' in the Kauffmann-White scheme. The predicted subspecies by SalmID (github.com/hcdenbakker/SalmID) may not be consistent with subspecies designation in the Kauffmann-White scheme. " + star_line
#star_line="The formula with this subspieces prediction can't get a serotype in KW manual, and the serotyping prediction was made without considering it."+star_line
seronames=["N/A"] ## ed_SL_06062020
if Otype=="":
Otype="-"
predict_form = Otype + ":" + fliC + ":" + fljB
predict_sero = (" or ").join(seronames)
###special test for Enteritidis
if predict_form == "9:g,m:-":
sdf = "-"
for x in special_gene_list:
if x.startswith("sdf"):
sdf = "+"
#star_line="Detected sdf gene, a marker to differentiate Gallinarum and Enteritidis"
#star_line="sdf gene detected. "
star_line = "Detected Sdf I that is characteristic of commonly circulating strains of serotype Enteritidis. "
#predict_form = predict_form + " Sdf prediction:" + sdf
predict_form = predict_form #changed 04072019
if sdf == "-":
star = "*"
#star_line="Didn't detected sdf gene, a marker to differentiate Gallinarum and Enteritidis"
#star_line="sdf gene not detected. "
star_line = "Sdf I that is characteristic of commonly circulating strains of serotype Enteritidis was not detected. "
#changed in 04072019, for new output
#star_line = "Additional characterization is necessary to assign a serotype to this strain. Commonly circulating strains of serotype Enteritidis are sdf+, although sdf- strains of serotype Enteritidis are known to exist. Serotype Gallinarum is typically sdf- but should be quite rare. Sdf- strains of serotype Enteritidis and serotype Gallinarum can be differentiated by phenotypic profile or genetic criteria.\n"
#predict_sero = "Gallinarum/Enteritidis" #04132019, for new output requirement
predict_sero = "Gallinarum or Enteritidis"
###end of special test for Enteritidis
elif predict_form == "4:i:-":
predict_sero = "I 4,[5],12:i:-" # change serotype name
elif predict_form == "4:r:-":
predict_sero = "N/A (4:r:-)"
elif predict_form == "4:b:-":
predict_sero = "N/A (4:b:-)"
#elif predict_form == "8:e,h:1,2": #removed after official merge of newport and bardo
#predict_sero = "Newport"
#star = "*"
#star_line = "Serotype Bardo shares the same antigenic profile with Newport, but Bardo is exceedingly rare."
claim = "The serotype(s) is/are the only serotype(s) with the indicated antigenic profile currently recognized in the Kauffmann White Scheme. New serotypes can emerge and the possibility exists that this antigenic profile may emerge in a different subspecies. Identification of strains to the subspecies level should accompany serotype determination; the same antigenic profile in different subspecies is considered different serotypes.\n"
if "N/A" in predict_sero:
claim = ""
#special test for Typhimurium
if "Typhimurium" in predict_sero or predict_form == "4:i:-":
normal = 0
mutation = 0
for x in special_gene_list:
if "oafA-O-4_full" in x:
normal = float(special_gene_list[x])
elif "oafA-O-4_5-" in x:
mutation = float(special_gene_list[x])
if normal > mutation:
pass
elif normal < mutation:
#predict_sero = predict_sero.strip() + "(O5-)"
predict_sero = predict_sero.strip() #diable special sero for new output requirement, 04132019
star = "*"
#star_line = "Detected the deletion of O5-."
star_line = "Detected a deletion in gene oafA that causes O5- variant of Typhimurium. "
else:
pass
#special test for Paratyphi B
if "Paratyphi B" in predict_sero or predict_form == "4:b:-":
normal = 0
mutation = 0
for x in special_gene_list:
if "gntR-family-regulatory-protein_dt-positive" in x:
normal = float(special_gene_list[x])
elif "gntR-family-regulatory-protein_dt-negative" in x:
mutation = float(special_gene_list[x])
#print(normal,mutation)
if normal > mutation:
#predict_sero = predict_sero.strip() + "(dt+)" #diable special sero for new output requirement, 04132019
predict_sero = predict_sero.strip()+' var. L(+) tartrate+' if "Paratyphi B" in predict_sero else predict_sero.strip()
star = "*"
#star_line = "Didn't detect the SNP for dt- which means this isolate is a Paratyphi B variant L(+) tartrate(+)."
star_line = "The SNP in gene STM3356 that is associated with the d-Tartrate nonfermenting phenotype characteristic of the typhoidal pathotype was not detected. "
elif normal < mutation:
#predict_sero = predict_sero.strip() + "(dt-)" #diable special sero for new output requirement, 04132019
predict_sero = predict_sero.strip()
star = "*"
#star_line = "Detected the SNP for d-Tartrate nonfermenting phenotype of Paratyphi B. "
star_line = "Detected the SNP in gene STM3356 that is associated with the d-Tartrate nonfermenting phenotype characteristic of the typhoidal pathotype. "
else:
star = "*"
#star_line = " Failed to detect the SNP for dt-, can't decide it's a Paratyphi B variant L(+) tartrate(+) or not."
star_line = " " ## ed_SL_05152019: do not report this situation.
#special test for O13,22 and O13,23
if Otype=="13":
#ex_dir = os.path.dirname(os.path.realpath(__file__))
ex_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)),'seqsero2_db')) # ed_SL_09152019
f = open(ex_dir + '/special.pickle', 'rb')
special = pickle.load(f)
O22_O23=special['O22_O23']
if predict_sero.split(" or ")[0] in O22_O23[-1] and predict_sero.split(" or ")[0] not in rename_dict_all:#if in rename_dict_all, then it means already merged, no need to analyze
O22_score=0
O23_score=0
for x in special_gene_list:
if "O:22" in x:
O22_score = O22_score+float(special_gene_list[x])
elif "O:23" in x:
O23_score = O23_score+float(special_gene_list[x])
#print(O22_score,O23_score)
for z in O22_O23[0]:
if predict_sero.split(" or ")[0] in z:
if O22_score > O23_score:
star = "*"
#star_line = "Detected O22 specific genes to further differenciate '"+predict_sero+"'." #diabled for new output requirement, 04132019
predict_sero = z[0]
elif O22_score < O23_score:
star = "*"
#star_line = "Detected O23 specific genes to further differenciate '"+predict_sero+"'." #diabled for new output requirement, 04132019
predict_sero = z[1]
else:
star = "*"
#star_line = "Fail to detect O22 and O23 differences." #diabled for new output requirement, 04132019
if " or " in predict_sero:
star_line = star_line + "The predicted serotypes share the same general formula: " + Otype + ":" + fliC + ":" + fljB + " and can be differentiated by additional analysis. "
#special test for O6,8
#merge_O68_list=["Blockley","Bovismorbificans","Hadar","Litchfield","Manhattan","Muenchen"] #remove 11/11/2018, because already in merge list
#for x in merge_O68_list:
# if x in predict_sero:
# predict_sero=x
# star=""
# star_line=""
#special test for Montevideo; most of them are monophasic
#if "Montevideo" in predict_sero and "1,2,7" in predict_form: #remove 11/11/2018, because already in merge list
#star="*"
#star_line="Montevideo is almost always monophasic, having an antigen called for the fljB position may be a result of Salmonella-Salmonella contamination."
return predict_form, predict_sero, star, star_line, claim
### End of SeqSero Kmer part
### Begin of SeqSero2 allele prediction and output
def xml_parse_score_comparision_seqsero(xmlfile):
#used to do seqsero xml analysis
from Bio.Blast import NCBIXML
handle=open(xmlfile)
handle=NCBIXML.parse(handle)
handle=list(handle)
List=[]
List_score=[]
List_ids=[]
List_query_region=[]
for i in range(len(handle)):
if len(handle[i].alignments)>0:
for j in range(len(handle[i].alignments)):
score=0
ids=0
cover_region=set() #fixed problem that repeated calculation leading percentage > 1
List.append(handle[i].query.strip()+"___"+handle[i].alignments[j].hit_def)
for z in range(len(handle[i].alignments[j].hsps)):
hsp=handle[i].alignments[j].hsps[z]
temp=set(range(hsp.query_start,hsp.query_end))
if len(cover_region)==0:
cover_region=cover_region|temp
fraction=1
else:
fraction=1-len(cover_region&temp)/float(len(temp))
cover_region=cover_region|temp
if "last" in handle[i].query or "first" in handle[i].query:
score+=hsp.bits*fraction
ids+=float(hsp.identities)/handle[i].query_length*fraction
else:
score+=hsp.bits*fraction
ids+=float(hsp.identities)/handle[i].query_length*fraction
List_score.append(score)
List_ids.append(ids)
List_query_region.append(cover_region)
temp=zip(List,List_score,List_ids,List_query_region)
Final_list=sorted(temp, key=lambda d:d[1], reverse = True)
return Final_list
def Uniq(L,sort_on_fre="none"): #return the uniq list and the count number
Old=L
L.sort()
L = [L[i] for i in range(len(L)) if L[i] not in L[:i]]
count=[]
for j in range(len(L)):
y=0
for x in Old:
if L[j]==x:
y+=1
count.append(y)
if sort_on_fre!="none":
d=zip(*sorted(zip(count, L)))
L=d[1]
count=d[0]
return (L,count)
def judge_fliC_or_fljB_from_head_tail_for_one_contig(nodes_vs_score_list):
#used to predict it's fliC or fljB for one contig, based on tail and head score, but output the score difference,if it is very small, then not reliable, use blast score for whole contig to test
#this is mainly used for
a=nodes_vs_score_list
fliC_score=0
fljB_score=0
for z in a:
if "fliC" in z[0]:
fliC_score+=z[1]
elif "fljB" in z[0]:
fljB_score+=z[1]
if fliC_score>=fljB_score:
role="fliC"
else:
role="fljB"
return (role,abs(fliC_score-fljB_score))
def judge_fliC_or_fljB_from_whole_contig_blast_score_ranking(node_name,Final_list,Final_list_passed):
#used to predict contig is fliC or fljB, if the differnce score value on above head_and_tail is less than 10 (quite small)
#also used when no head or tail got blasted score for the contig
role=""
for z in Final_list_passed:
if node_name in z[0]:
role=z[0].split("_")[0]
break
return role
def fliC_or_fljB_judge_from_head_tail_sequence(nodes_list,tail_head_list,Final_list,Final_list_passed):
#nodes_list is the c created by c,d=Uniq(nodes) in below function
first_target=""
role_list=[]
for x in nodes_list:
a=[]
role=""
for y in tail_head_list:
if x in y[0]:
a.append(y)
if len(a)==4:
role,diff=judge_fliC_or_fljB_from_head_tail_for_one_contig(a)
if diff<20:
role=judge_fliC_or_fljB_from_whole_contig_blast_score_ranking(x,Final_list,Final_list_passed)
elif len(a)==3:
###however, if the one with highest score is the fewer one, compare their accumulation score
role,diff=judge_fliC_or_fljB_from_head_tail_for_one_contig(a)
if diff<20:
role=judge_fliC_or_fljB_from_whole_contig_blast_score_ranking(x,Final_list,Final_list_passed)
###end of above score comparison
elif len(a)==2:
#must on same node, if not, then decide with unit blast score, blast-score/length_of_special_sequence(30 or 37)
temp=[]
for z in a:
temp.append(z[0].split("_")[0])
m,n=Uniq(temp)#should only have one choice, but weird situation might occur too
if len(m)==1:
pass
else:
pass
role,diff=judge_fliC_or_fljB_from_head_tail_for_one_contig(a)
if diff<20:
role=judge_fliC_or_fljB_from_whole_contig_blast_score_ranking(x,Final_list,Final_list_passed)
###need to desgin a algorithm to guess most possible situation for nodes_list, See the situations of test evaluation
elif len(a)==1:
#that one
role,diff=judge_fliC_or_fljB_from_head_tail_for_one_contig(a)
if diff<20:
role=judge_fliC_or_fljB_from_whole_contig_blast_score_ranking(x,Final_list,Final_list_passed)
#need to evaluate, in future, may set up a cut-off, if not met, then just find Final_list_passed best match,like when "a==0"
else:#a==0
#use Final_list_passed best match
for z in Final_list_passed:
if x in z[0]:
role=z[0].split("_")[0]
break
#print x,role,len(a)
role_list.append((role,x))
if len(role_list)==2:
if role_list[0][0]==role_list[1][0]:#this is the most cocmmon error, two antigen were assigned to same phase
#just use score to do a final test
role_list=[]
for x in nodes_list:
role=judge_fliC_or_fljB_from_whole_contig_blast_score_ranking(x,Final_list,Final_list_passed)
role_list.append((role,x))
return role_list
def decide_contig_roles_for_H_antigen(Final_list,Final_list_passed):
#used to decide which contig is FliC and which one is fljB
contigs=[]
nodes=[]
for x in Final_list_passed:
if x[0].startswith("fl") and "last" not in x[0] and "first" not in x[0]:
nodes.append(x[0].split("___")[1].strip())
c,d=Uniq(nodes)#c is node_list
#print c
tail_head_list=[x for x in Final_list if ("last" in x[0] or "first" in x[0])]
roles=fliC_or_fljB_judge_from_head_tail_sequence(c,tail_head_list,Final_list,Final_list_passed)
return roles
def decide_O_type_and_get_special_genes(Final_list,Final_list_passed):
#decide O based on Final_list
O_choice="?"
O_list=[]
special_genes={}
nodes=[]
for x in Final_list_passed:
if x[0].startswith("O-"):
nodes.append(x[0].split("___")[1].strip())
elif not x[0].startswith("fl"):
special_genes[x[0]]=x[2]#08172018, x[2] changed from x[-1]
#print "special_genes:",special_genes
c,d=Uniq(nodes)
#print "potential O antigen contig",c
final_O=[]
O_nodes_list=[]
for x in c:#c is the list for contigs
temp=0
for y in Final_list_passed:
if x in y[0] and y[0].startswith("O-"):
final_O.append(y)
break
### O contig has the problem of two genes on same contig, so do additional test
potenial_new_gene=""
for x in final_O:
pointer=0 #for genes merged or not
#not consider O-1,3,19_not_in_3,10, too short compared with others
if "O-1,3,19_not_in_3,10" not in x[0] and int(x[0].split("__")[1].split("___")[0])*x[2]+850 <= int(x[0].split("length_")[1].split("_")[0]):#gene length << contig length; for now give 300*2 (for secureity can use 400*2) as flank region
pointer=x[0].split("___")[1].strip()#store the contig name
print(pointer)
if pointer!=0:#it has potential merge event
for y in Final_list:
if pointer in y[0] and y not in final_O and (y[1]>=int(y[0].split("__")[1].split("___")[0])*1.5 or (y[1]>=int(y[0].split("__")[1].split("___")[0])*y[2] and y[1]>=400)):#that's a realtively strict filter now; if passed, it has merge event and add one more to final_O
potenial_new_gene=y
#print(potenial_new_gene)
break
if potenial_new_gene!="":
print("two differnt genes in same contig, fix it for O antigen")
print(potenial_new_gene[:3])
pointer=0
for y in final_O:
if y[0].split("___")[-1]==potenial_new_gene[0].split("___")[-1]:
pointer=1
if pointer!=0: #changed to consider two genes in same contig
final_O.append(potenial_new_gene)
### end of the two genes on same contig test
final_O=sorted(final_O,key=lambda x: x[2], reverse=True)#sorted
if len(final_O)==0 or (len(final_O)==1 and "O-1,3,19_not_in_3,10" in final_O[0][0]):
#print "$$$No Otype, due to no hit"#may need to be changed
O_choice="-"
else:
highest_O_coverage=max([float(x[0].split("_cov_")[-1].split("_")[0]) for x in final_O if "O-1,3,19_not_in_3,10" not in x[0]])
O_list=[]
O_list_less_contamination=[]
for x in final_O:
if not "O-1,3,19_not_in_3,10__130" in x[0]:#O-1,3,19_not_in_3,10 is too small, which may affect further analysis; to avoid contamination affect, use 0.15 of highest coverage as cut-off
O_list.append(x[0].split("__")[0])
O_nodes_list.append(x[0].split("___")[1])
if float(x[0].split("_cov_")[-1].split("_")[0])>highest_O_coverage*0.15:
O_list_less_contamination.append(x[0].split("__")[0])
### special test for O9,46 and O3,10 family
if ("O-9,46_wbaV" in O_list or "O-9,46_wbaV-from-II-9,12:z29:1,5-SRR1346254" in O_list) and O_list_less_contamination[0].startswith("O-9,"):#not sure should use and float(O9_wbaV)/float(num_1) > 0.1
if "O-9,46_wzy" in O_list or "O-9,46_wzy_partial" in O_list:#and float(O946_wzy)/float(num_1) > 0.1
O_choice="O-9,46"
#print "$$$Most possilble Otype: O-9,46"
elif "O-9,46,27_partial_wzy" in O_list:#and float(O94627)/float(num_1) > 0.1
O_choice="O-9,46,27"
#print "$$$Most possilble Otype: O-9,46,27"
else:
O_choice="O-9"#next, detect O9 vs O2?
O2=0
O9=0
for z in special_genes:
if "tyr-O-9" in z:
O9=special_genes[z]
elif "tyr-O-2" in z:
O2=special_genes[z]
if O2>O9:
O_choice="O-2"
elif O2<O9:
pass
else:
pass
#print "$$$No suitable one, because can't distinct it's O-9 or O-2, but O-9 has a more possibility."
elif ("O-3,10_wzx" in O_list) and ("O-9,46_wzy" in O_list) and (O_list[0].startswith("O-3,10") or O_list_less_contamination[0].startswith("O-9,46_wzy")):#and float(O310_wzx)/float(num_1) > 0.1 and float(O946_wzy)/float(num_1) > 0.1
if "O-3,10_not_in_1,3,19" in O_list:#and float(O310_no_1319)/float(num_1) > 0.1
O_choice="O-3,10"
#print "$$$Most possilble Otype: O-3,10 (contain O-3,10_not_in_1,3,19)"
else:
O_choice="O-1,3,19"
#print "$$$Most possilble Otype: O-1,3,19 (not contain O-3,10_not_in_1,3,19)"
### end of special test for O9,46 and O3,10 family
else:
try:
max_score=0
for x in final_O:
if x[2]>=max_score and float(x[0].split("_cov_")[-1].split("_")[0])>highest_O_coverage*0.15:#use x[2],08172018, the "coverage identity = cover_length * identity"; also meet coverage threshold
max_score=x[2]#change from x[-1] to x[2],08172018
O_choice=x[0].split("_")[0]
if O_choice=="O-1,3,19":
O_choice=final_O[1][0].split("_")[0]
#print "$$$Most possilble Otype: ",O_choice
except:
pass
#print "$$$No suitable Otype, or failure of mapping (please check the quality of raw reads)"
if O_choice=="O-9,46,27" and len(O_list)==2 and "O-4_wzx" in O_list: #special for very low chance sitatuion between O4 and O9,27,46, this is for serotypes like Bredeney and Schwarzengrund (normallly O-4 will have higher score, but sometimes sequencing quality may affect the prediction)
O_choice="O-4"
#print "O:",O_choice,O_nodes_list
Otypes=[]
for x in O_list:
if x!="O-1,3,19_not_in_3,10":
if "O-9,46_" not in x:
Otypes.append(x.split("_")[0])
else:
Otypes.append(x.split("-from")[0])#O-9,46_wbaV-from-II-9,12:z29:1,5-SRR1346254
#Otypes=[x.split("_")[0] for x in O_list if x!="O-1,3,19_not_in_3,10"]
Otypes_uniq,Otypes_fre=Uniq(Otypes)
contamination_O=""
if O_choice=="O-9,46,27" or O_choice=="O-3,10" or O_choice=="O-1,3,19":
if len(Otypes_uniq)>2:
contamination_O="potential contamination from O antigen signals"
else:
if len(Otypes_uniq)>1:
if O_choice=="O-4" and len(Otypes_uniq)==2 and "O-9,46,27" in Otypes_uniq: #for special 4,12,27 case such as Bredeney and Schwarzengrund
contamination_O=""
elif O_choice=="O-9,46" and len(Otypes_uniq)==2 and "O-9,46_wbaV" in Otypes_uniq and "O-9,46_wzy" in Otypes_uniq: #for special 4,12,27 case such as Bredeney and Schwarzengrund
contamination_O=""
else:
contamination_O="potential contamination from O antigen signals"
return O_choice,O_nodes_list,special_genes,final_O,contamination_O,Otypes_uniq
### End of SeqSero2 allele prediction and output
def get_input_files(make_dir,input_file,data_type,dirpath):
#tell input files from datatype
#"<int>: '1'(pair-end reads, interleaved),'2'(pair-end reads, seperated),'3'(single-end reads), '4'(assembly),'5'(nanopore fasta),'6'(nanopore fastq)"
for_fq=""
rev_fq=""
os.chdir(make_dir)
if data_type=="1":
input_file=input_file[0].split("/")[-1]
if input_file.endswith(".sra"):
subprocess.check_call("fastq-dump --split-files "+input_file,shell=True)
for_fq=input_file.replace(".sra","_1.fastq")
rev_fq=input_file.replace(".sra","_2.fastq")
else:
core_id=input_file.split(".fastq")[0].split(".fq")[0]
for_fq=core_id+"_1.fastq"
rev_fq=core_id+"_2.fastq"
if input_file.endswith(".gz"):
subprocess.check_call("gzip -dc "+input_file+" | "+dirpath+"/deinterleave_fastq.sh "+for_fq+" "+rev_fq,shell=True)
else:
subprocess.check_call("cat "+input_file+" | "+dirpath+"/deinterleave_fastq.sh "+for_fq+" "+rev_fq,shell=True)
elif data_type=="2":
for_fq=input_file[0].split("/")[-1]
rev_fq=input_file[1].split("/")[-1]
elif data_type=="3":
input_file=input_file[0].split("/")[-1]
if input_file.endswith(".sra"):
subprocess.check_call("fastq-dump --split-files "+input_file,shell=True)
for_fq=input_file.replace(".sra","_1.fastq")
else:
for_fq=input_file
elif data_type in ["4","5","6"]:
for_fq=input_file[0].split("/")[-1]
os.chdir("..")
return for_fq,rev_fq
def predict_O_and_H_types(Final_list,Final_list_passed,new_fasta):
#get O and H types from Final_list from blast parsing; allele mode
from Bio import SeqIO
fliC_choice="-"
fljB_choice="-"
fliC_contig="NA"
fljB_contig="NA"
fliC_region=set([0])
fljB_region=set([0,])
fliC_length=0 #can be changed to coverage in future; in 03292019, changed to ailgned length
fljB_length=0 #can be changed to coverage in future; in 03292019, changed to ailgned length
O_choice="-"#no need to decide O contig for now, should be only one
O_choice,O_nodes,special_gene_list,O_nodes_roles,contamination_O,Otypes_uniq=decide_O_type_and_get_special_genes(Final_list,Final_list_passed)#decide the O antigen type and also return special-gene-list for further identification
O_choice=O_choice.split("-")[-1].strip()
if (O_choice=="1,3,19" and len(O_nodes_roles)==1 and "1,3,19" in O_nodes_roles[0][0]) or O_choice=="":
O_choice="-"
H_contig_roles=decide_contig_roles_for_H_antigen(Final_list,Final_list_passed)#decide the H antigen contig is fliC or fljB
#add alignment locations, used for further selection, 03312019
for i in range(len(H_contig_roles)):
x=H_contig_roles[i]
for y in Final_list_passed:
if x[1] in y[0] and y[0].startswith(x[0]):
H_contig_roles[i]+=H_contig_roles[i]+(y[-1],)
break
log_file=open("SeqSero_log.txt","a")
extract_file=open("Extracted_antigen_alleles.fasta","a")
handle_fasta=list(SeqIO.parse(new_fasta,"fasta"))
#print("O_contigs:")
log_file.write("O_contigs:\n")
extract_file.write("#Sequences with antigen signals (if the micro-assembled contig only covers the flanking region, it will not be used for contamination analysis)\n")
extract_file.write("#O_contigs:\n")
for x in O_nodes_roles:
if "O-1,3,19_not_in_3,10" not in x[0]:#O-1,3,19_not_in_3,10 is just a small size marker
#print(x[0].split("___")[-1],x[0].split("__")[0],"blast score:",x[1],"identity%:",str(round(x[2]*100,2))+"%",str(min(x[-1]))+" to "+str(max(x[-1])))
log_file.write(x[0].split("___")[-1]+" "+x[0].split("__")[0]+"; "+"blast score: "+str(x[1])+" identity%: "+str(round(x[2]*100,2))+"%; alignment from "+str(min(x[-1]))+" to "+str(max(x[-1]))+" of antigen\n")
title=">"+x[0].split("___")[-1]+" "+x[0].split("__")[0]+"; "+"blast score: "+str(x[1])+" identity%: "+str(round(x[2]*100,2))+"%; alignment from "+str(min(x[-1]))+" to "+str(max(x[-1]))+" of antigen\n"
seqs=""
for z in handle_fasta:
if x[0].split("___")[-1]==z.description:
seqs=str(z.seq)
extract_file.write(title+seqs+"\n")
if len(H_contig_roles)!=0:
highest_H_coverage=max([float(x[1].split("_cov_")[-1].split("_")[0]) for x in H_contig_roles]) #less than highest*0.1 would be regarded as contamination and noises, they will still be considered in contamination detection and logs, but not used as final serotype output
else:
highest_H_coverage=0
for x in H_contig_roles:
#if multiple choices, temporately select the one with longest length for now, will revise in further change
if "fliC" == x[0] and len(x[-1])>=fliC_length and x[1] not in O_nodes and float(x[1].split("_cov_")[-1].split("_")[0])>highest_H_coverage*0.13:#remember to avoid the effect of O-type contig, so should not in O_node list
fliC_contig=x[1]
fliC_length=len(x[-1])
elif "fljB" == x[0] and len(x[-1])>=fljB_length and x[1] not in O_nodes and float(x[1].split("_cov_")[-1].split("_")[0])>highest_H_coverage*0.13:
fljB_contig=x[1]
fljB_length=len(x[-1])
for x in Final_list_passed:
if fliC_choice=="-" and "fliC_" in x[0] and fliC_contig in x[0]:
fliC_choice=x[0].split("_")[1]
elif fljB_choice=="-" and "fljB_" in x[0] and fljB_contig in x[0]:
fljB_choice=x[0].split("_")[1]
elif fliC_choice!="-" and fljB_choice!="-":
break
#now remove contigs not in middle core part
first_allele="NA"
first_allele_percentage=0
for x in Final_list:
if x[0].startswith("fliC") or x[0].startswith("fljB"):
first_allele=x[0].split("__")[0] #used to filter those un-middle contigs
first_allele_percentage=x[2]
break
additional_contigs=[]
for x in Final_list:
if first_allele in x[0]:
if (fliC_contig == x[0].split("___")[-1]):
fliC_region=x[3]
elif fljB_contig!="NA" and (fljB_contig == x[0].split("___")[-1]):
fljB_region=x[3]
else:
if x[1]*1.1>int(x[0].split("___")[1].split("_")[3]):#loose threshold by multiplying 1.1
additional_contigs.append(x)
#else:
#print x[:3]
#we can just use the fljB region (or fliC depends on size), no matter set() or contain a large locations (without middle part); however, if none of them is fully assembled, use 500 and 1200 as conservative cut-off
if first_allele_percentage>0.9:
if len(fliC_region)>len(fljB_region) and (max(fljB_region)-min(fljB_region))>1000:
target_region=fljB_region|(fliC_region-set(range(min(fljB_region),max(fljB_region)))) #fljB_region|(fliC_region-set(range(min(fljB_region),max(fljB_region))))
elif len(fliC_region)<len(fljB_region) and (max(fliC_region)-min(fliC_region))>1000:
target_region=fliC_region|(fljB_region-set(range(min(fliC_region),max(fliC_region)))) #fljB_region|(fliC_region-set(range(min(fljB_region),max(fljB_region))))
else:
target_region=set()#doesn't do anything
else:
target_region=set()#doesn't do anything
#print(target_region)
#print(additional_contigs)
target_region2=set(list(range(0,525))+list(range(1200,1700)))#I found to use 500 to 1200 as special region would be best
target_region=target_region2|target_region
for x in additional_contigs:
removal=0
contig_length=int(x[0].split("___")[1].split("length_")[-1].split("_")[0])
if fljB_contig not in x[0] and fliC_contig not in x[0] and len(target_region&x[3])/float(len(x[3]))>0.65 and contig_length*0.5<len(x[3])<contig_length*1.5: #consider length and alignment length for now, but very loose,0.5 and 1.5 as cut-off
removal=1
else:
if first_allele_percentage > 0.9 and float(x[0].split("__")[1].split("___")[0])*x[2]/len(x[-1])>0.96:#if high similiarity with middle part of first allele (first allele >0.9, already cover middle part)
removal=1
else:
pass
if removal==1:
for y in H_contig_roles:
if y[1] in x[0]:
H_contig_roles.remove(y)
else:
pass
#print(x[:3],contig_length,len(target_region&x[3])/float(len(x[3])),contig_length*0.5,len(x[3]),contig_length*1.5)
#end of removing none-middle contigs
#print("H_contigs:")
log_file.write("H_contigs:\n")
extract_file.write("#H_contigs:\n")
H_contig_stat=[]
H1_cont_stat={}
H2_cont_stat={}
for i in range(len(H_contig_roles)):
x=H_contig_roles[i]
a=0
for y in Final_list_passed:
if x[1] in y[0] and y[0].startswith(x[0]):
if "first" in y[0] or "last" in y[0]: #this is the final filter to decide it's fliC or fljB, if can't pass, then can't decide
for y in Final_list_passed: #it's impossible to has the "first" and "last" allele as prediction, so re-do it
if x[1] in y[0]:#it's very possible to be third phase allele, so no need to make it must be fliC or fljB
#print(x[1],"can't_decide_fliC_or_fljB",y[0].split("_")[1],"blast_score:",y[1],"identity%:",str(round(y[2]*100,2))+"%",str(min(y[-1]))+" to "+str(max(y[-1])))
log_file.write(x[1]+" "+x[0]+" "+y[0].split("_")[1]+"; "+"blast score: "+str(y[1])+" identity%: "+str(round(y[2]*100,2))+"%; alignment from "+str(min(y[-1]))+" to "+str(max(y[-1]))+" of antigen\n")
H_contig_roles[i]="can't decide fliC or fljB, may be third phase"
title=">"+x[1]+" "+x[0]+" "+y[0].split("_")[1]+"; "+"blast score: "+str(y[1])+" identity%: "+str(round(y[2]*100,2))+"%; alignment from "+str(min(y[-1]))+" to "+str(max(y[-1]))+" of antiten\n"
seqs=""
for z in handle_fasta:
if x[1]==z.description:
seqs=str(z.seq)
extract_file.write(title+seqs+"\n")
break
else:
#print(x[1],x[0],y[0].split("_")[1],"blast_score:",y[1],"identity%:",str(round(y[2]*100,2))+"%",str(min(y[-1]))+" to "+str(max(y[-1])))
log_file.write(x[1]+" "+x[0]+" "+y[0].split("_")[1]+"; "+"blast score: "+str(y[1])+" identity%: "+str(round(y[2]*100,2))+"%; alignment from "+str(min(y[-1]))+" to "+str(max(y[-1]))+" of antigen\n")
title=">"+x[1]+" "+x[0]+" "+y[0].split("_")[1]+"; "+"blast score: "+str(y[1])+" identity%: "+str(round(y[2]*100,2))+"%; alignment from "+str(min(y[-1]))+" to "+str(max(y[-1]))+" of antigen\n"
seqs=""
for z in handle_fasta:
if x[1]==z.description:
seqs=str(z.seq)
extract_file.write(title+seqs+"\n")
if x[0]=="fliC":
if y[0].split("_")[1] not in H1_cont_stat:
H1_cont_stat[y[0].split("_")[1]]=y[2]
else:
H1_cont_stat[y[0].split("_")[1]]+=y[2]
if x[0]=="fljB":
if y[0].split("_")[1] not in H2_cont_stat:
H2_cont_stat[y[0].split("_")[1]]=y[2]
else:
H2_cont_stat[y[0].split("_")[1]]+=y[2]
break
#detect contaminations
#print(H1_cont_stat)
#print(H2_cont_stat)
H1_cont_stat_list=[x for x in H1_cont_stat if H1_cont_stat[x]>0.2]
H2_cont_stat_list=[x for x in H2_cont_stat if H2_cont_stat[x]>0.2]
contamination_H=""
if len(H1_cont_stat_list)>1 or len(H2_cont_stat_list)>1:
contamination_H="potential contamination from H antigen signals"
elif len(H2_cont_stat_list)==1 and fljB_contig=="NA":
contamination_H="potential contamination from H antigen signals, uncommon weak fljB signals detected"
#get additional antigens
"""
if ("O-9,46_wbaV" in O_list or "O-9,46_wbaV-from-II-9,12:z29:1,5-SRR1346254" in O_list) and O_list_less_contamination[0].startswith("O-9,"):#not sure should use and float(O9_wbaV)/float(num_1) > 0.1
if "O-9,46_wzy" in O_list:#and float(O946_wzy)/float(num_1) > 0.1
O_choice="O-9,46"
#print "$$$Most possilble Otype: O-9,46"
elif "O-9,46,27_partial_wzy" in O_list:#and float(O94627)/float(num_1) > 0.1
O_choice="O-9,46,27"
#print "$$$Most possilble Otype: O-9,46,27"
elif ("O-3,10_wzx" in O_list) and ("O-9,46_wzy" in O_list) and (O_list[0].startswith("O-3,10") or O_list_less_contamination[0].startswith("O-9,46_wzy")):#and float(O310_wzx)/float(num_1) > 0.1 and float(O946_wzy)/float(num_1) > 0.1
if "O-3,10_not_in_1,3,19" in O_list:#and float(O310_no_1319)/float(num_1) > 0.1
O_choice="O-3,10"
#print "$$$Most possilble Otype: O-3,10 (contain O-3,10_not_in_1,3,19)"
else:
O_choice="O-1,3,19"
#print "$$$Most possilble Otype: O-1,3,19 (not contain O-3,10_not_in_1,3,19)"
### end of special test for O9,46 and O3,10 family
if O_choice=="O-9,46,27" or O_choice=="O-3,10" or O_choice=="O-1,3,19":
if len(Otypes_uniq)>2:
contamination_O="potential contamination from O antigen signals"
else:
if len(Otypes_uniq)>1:
if O_choice=="O-4" and len(Otypes_uniq)==2 and "O-9,46,27" in Otypes_uniq: #for special 4,12,27 case such as Bredeney and Schwarzengrund
contamination_O=""
elif O_choice=="O-9,46" and len(Otypes_uniq)==2 and "O-9,46_wbaV" in Otypes_uniq and "O-9,46_wzy" in Otypes_uniq: #for special 4,12,27 case such as Bredeney and Schwarzengrund
contamination_O=""
"""
additonal_antigents=[]
#print(contamination_O)
#print(contamination_H)
log_file.write(contamination_O+"\n")
log_file.write(contamination_H+"\n")
log_file.close()
return O_choice,fliC_choice,fljB_choice,special_gene_list,contamination_O,contamination_H,Otypes_uniq,H1_cont_stat_list,H2_cont_stat_list
def get_input_K(input_file,lib_dict,data_type,k_size):
#kmer mode; get input_Ks from dict and data_type
kmers = []
for h in lib_dict:
kmers += lib_dict[h]
if data_type == '4':
input_Ks = target_multifasta_kmerizer(input_file, k_size, set(kmers))
elif data_type == '1' or data_type == '2' or data_type == '3':#set it for now, will change later
input_Ks = target_read_kmerizer(input_file, k_size, set(kmers))
elif data_type == '5':#minion_2d_fasta
#input_Ks = minion_fasta_kmerizer(input_file, k_size, set(kmers))
input_Ks = target_multifasta_kmerizer(input_file, k_size, set(kmers)) #ed_SL_08172020: change for nanopore workflow
if data_type == '6':#minion_2d_fastq
input_Ks = minion_fastq_kmerizer(input_file, k_size, set(kmers))
return input_Ks
def get_kmer_dict(lib_dict,input_Ks):
#kmer mode; get predicted types
O_dict = {}
H_dict = {}
Special_dict = {}
for h in lib_dict:
score = (len(lib_dict[h] & input_Ks) / len(lib_dict[h])) * 100
if score > 1: # Arbitrary cut-off for similarity score very low but seems necessary to detect O-3,10 in some cases
if h.startswith('O-') and score > 25:
O_dict[h] = score
if h.startswith('fl') and score > 40:
H_dict[h] = score
if (h[:2] != 'fl') and (h[:2] != 'O-'):
Special_dict[h] = score
return O_dict,H_dict,Special_dict
def call_O_and_H_type(O_dict,H_dict,Special_dict,make_dir):
log_file=open("SeqSero_log.txt","a")
log_file.write("O_scores:\n")
#call O:
highest_O = '-'
if len(O_dict) == 0:
pass
else:
for x in O_dict:
log_file.write(x+"\t"+str(O_dict[x])+"\n")
if ('O-9,46_wbaV__1002' in O_dict and O_dict['O-9,46_wbaV__1002']>70) or ("O-9,46_wbaV-from-II-9,12:z29:1,5-SRR1346254__1002" in O_dict and O_dict['O-9,46_wbaV-from-II-9,12:z29:1,5-SRR1346254__1002']>70): # not sure should use and float(O9_wbaV)/float(num_1) > 0.1
#if 'O-9,46_wzy__1191' in O_dict or "O-9,46_wzy_partial__216" in O_dict: # and float(O946_wzy)/float(num_1) > 0.1
#modified to fix miscall of O-9,46
if ('O-9,46_wzy__1191' in O_dict and O_dict['O-9,46_wzy__1191']>40) or ("O-9,46_wzy_partial__216" in O_dict and O_dict["O-9,46_wzy_partial__216"]>40): # and float(O946_wzy)/float(num_1) > 0.1
highest_O = "O-9,46"
elif "O-9,46,27_partial_wzy__1019" in O_dict: # and float(O94627)/float(num_1) > 0.1
highest_O = "O-9,46,27"
else:
highest_O = "O-9" # next, detect O9 vs O2?
O2 = 0
O9 = 0
for z in Special_dict:
if "tyr-O-9" in z:
O9 = float(Special_dict[z])
if "tyr-O-2" in z:
O2 = float(Special_dict[z])
if O2 > O9:
highest_O = "O-2"
elif ("O-3,10_wzx__1539" in O_dict) and (
"O-9,46_wzy__1191" in O_dict
): # and float(O310_wzx)/float(num_1) > 0.1 and float(O946_wzy)/float(num_1) > 0.1
if "O-3,10_not_in_1,3,19__1519" in O_dict: # and float(O310_no_1319)/float(num_1) > 0.1
highest_O = "O-3,10"
else:
highest_O = "O-1,3,19"
### end of special test for O9,46 and O3,10 family
else:
try:
max_score = 0
for x in O_dict:
if float(O_dict[x]) >= max_score:
max_score = float(O_dict[x])
#highest_O = x.split("_")[0]
# ed_SL_12182019: modified to fix the O-9,46 error example1
if (x == 'O-9,46_wbaV__1002' or x == 'O-9,46_wbaV-from-II-9,12:z29:1,5-SRR1346254__1002') and ('O-9,46_wzy__1191' not in O_dict and 'O-9,46_wzy_partial__216' not in O_dict):
highest_O = "O-9"
else:
highest_O = x.split("_")[0]
if highest_O == "O-1,3,19":
highest_O = '-'
max_score = 0
for x in O_dict:
if x == 'O-1,3,19_not_in_3,10__130':
pass
else:
if float(O_dict[x]) >= max_score:
max_score = float(O_dict[x])
#highest_O = x.split("_")[0]
# ed_SL_12182019: modified to fix the O-9,46 error example1
if (x == 'O-9,46_wbaV__1002' or x == 'O-9,46_wbaV-from-II-9,12:z29:1,5-SRR1346254__1002') and ('O-9,46_wzy__1191' not in O_dict and 'O-9,46_wzy_partial__216' not in O_dict):
highest_O = "O-9"
else:
highest_O = x.split("_")[0]
except:
pass
#call_fliC:
if len(H_dict)!=0:
highest_H_score_both_BC=H_dict[max(H_dict.keys(), key=(lambda k: H_dict[k]))] #used to detect whether fljB existed or not
else:
highest_H_score_both_BC=0
highest_fliC = '-'
highest_fliC_raw = '-'
highest_Score = 0
log_file.write("\nH_scores:\n")
for s in H_dict:
log_file.write(s+"\t"+str(H_dict[s])+"\n")
if s.startswith('fliC'):
if float(H_dict[s]) > highest_Score:
highest_fliC = s.split('_')[1]
highest_fliC_raw = s
highest_Score = float(H_dict[s])
#call_fljB
highest_fljB = '-'
highest_fljB_raw = '-'
highest_Score = 0
for s in H_dict:
if s.startswith('fljB'):
if float(H_dict[s]) > highest_Score and float(H_dict[s]) > highest_H_score_both_BC * 0.65: #fljB is special, so use highest_H_score_both_BC to give a general estimate of coverage, currently 0.65 seems pretty good; the reason use a high (0.65) is some fliC and fljB shared with each other
#highest_fljB = s.split('_')[1]
#highest_fljB_raw = s
#highest_Score = float(H_dict[s])
if s.split('_')[1]!=highest_fliC:
highest_fljB = s.split('_')[1]
highest_fljB_raw = s
highest_Score = float(H_dict[s])
log_file.write("\nSpecial_scores:\n")
for s in Special_dict:
log_file.write(s+"\t"+str(Special_dict[s])+"\n")
log_file.close()
return highest_O,highest_fliC,highest_fljB
def get_temp_file_names(for_fq,rev_fq):
#seqsero2 -a; get temp file names
sam=for_fq+".sam"
bam=for_fq+".bam"
sorted_bam=for_fq+"_sorted.bam"
mapped_fq1=for_fq+"_mapped.fq"
mapped_fq2=rev_fq+"_mapped.fq"
combined_fq=for_fq+"_combined.fq"
for_sai=for_fq+".sai"
rev_sai=rev_fq+".sai"
return sam,bam,sorted_bam,mapped_fq1,mapped_fq2,combined_fq,for_sai,rev_sai
def map_and_sort(threads,database,fnameA,fnameB,sam,bam,for_sai,rev_sai,sorted_bam,mapping_mode):
#seqsero2 -a; do mapping and sort
print("building database...")
subprocess.check_call("bwa index "+database+ " 2>> data_log.txt",shell=True)
print("mapping...")
if mapping_mode=="mem":
subprocess.check_call("bwa mem -k 17 -t "+threads+" "+database+" "+fnameA+" "+fnameB+" > "+sam+ " 2>> data_log.txt",shell=True)
elif mapping_mode=="sam":
if fnameB!="":
subprocess.check_call("bwa aln -t "+threads+" "+database+" "+fnameA+" > "+for_sai+ " 2>> data_log.txt",shell=True)
subprocess.check_call("bwa aln -t "+threads+" "+database+" "+fnameB+" > "+rev_sai+ " 2>> data_log.txt",shell=True)
subprocess.check_call("bwa sampe "+database+" "+for_sai+" "+ rev_sai+" "+fnameA+" "+fnameB+" > "+sam+ " 2>> data_log.txt",shell=True)
else:
subprocess.check_call("bwa aln -t "+threads+" "+database+" "+fnameA+" > "+for_sai+ " 2>> data_log.txt",shell=True)
subprocess.check_call("bwa samse "+database+" "+for_sai+" "+for_fq+" > "+sam)
subprocess.check_call("samtools view -@ "+threads+" -F 4 -Sh "+sam+" > "+bam,shell=True)
### check the version of samtools then use differnt commands
samtools_version=subprocess.Popen(["samtools"],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out, err = samtools_version.communicate()
version = str(err).split("ersion:")[1].strip().split(" ")[0].strip()
print("check samtools version:",version)
### end of samtools version check and its analysis
if LooseVersion(version)<=LooseVersion("1.2"):
subprocess.check_call("samtools sort -@ "+threads+" -n "+bam+" "+fnameA+"_sorted",shell=True)
else:
subprocess.check_call("samtools sort -@ "+threads+" -n "+bam+" >"+sorted_bam,shell=True)
def extract_mapped_reads_and_do_assembly_and_blast(current_time,sorted_bam,combined_fq,mapped_fq1,mapped_fq2,threads,fnameA,fnameB,database,mapping_mode,phred_offset):
#seqsero2 -a; extract, assembly and blast
subprocess.check_call("bamToFastq -i "+sorted_bam+" -fq "+combined_fq,shell=True)
#print("fnameA:",fnameA)
#print("fnameB:",fnameB)
if fnameB!="":
subprocess.check_call("bamToFastq -i "+sorted_bam+" -fq "+mapped_fq1+" -fq2 "+mapped_fq2 + " 2>> data_log.txt",shell=True)#2> /dev/null if want no output
else:
pass
outdir=current_time+"_temp"
print("assembling...")
if int(threads)>4:
t="4"
else:
t=threads
if os.path.getsize(combined_fq)>100 and (fnameB=="" or os.path.getsize(mapped_fq1)>100):#if not, then it's "-:-:-"
if phred_offset == 'auto':
phred_offset = ''
else:
phred_offset = '--phred-offset ' + phred_offset
if fnameB!="":
#print("spades.py --careful "+phred_offset+" --pe1-s "+combined_fq+" --pe1-1 "+mapped_fq1+" --pe1-2 "+mapped_fq2+" -t "+t+" -o "+outdir+ " >> data_log.txt 2>&1")
subprocess.check_call("spades.py --careful "+phred_offset+" --pe1-s "+combined_fq+" --pe1-1 "+mapped_fq1+" --pe1-2 "+mapped_fq2+" -t "+t+" -o "+outdir+ " >> data_log.txt 2>&1",shell=True)
else:
subprocess.check_call("spades.py --careful "+phred_offset+" --pe1-s "+combined_fq+" -t "+t+" -o "+outdir+ " >> data_log.txt 2>&1",shell=True)
new_fasta=fnameA+"_"+database+"_"+mapping_mode+".fasta"
#new_fasta=fnameA+"_"+database.split('/')[-1]+"_"+mapping_mode+".fasta" # change path to databse for packaging
subprocess.check_call("mv "+outdir+"/contigs.fasta "+new_fasta+ " 2> /dev/null",shell=True)
#os.system("mv "+outdir+"/scaffolds.fasta "+new_fasta+ " 2> /dev/null") contigs.fasta
subprocess.check_call("rm -rf "+outdir+ " 2> /dev/null",shell=True)
print("blasting...","\n")
xmlfile="blasted_output.xml"#fnameA+"-extracted_vs_"+database+"_"+mapping_mode+".xml"
subprocess.check_call('makeblastdb -in '+new_fasta+' -out '+new_fasta+'_db '+'-dbtype nucl >> data_log.txt 2>&1',shell=True) #temp.txt is to forbid the blast result interrupt the output of our program###1/27/2015
subprocess.check_call("blastn -query "+database+" -db "+new_fasta+"_db -out "+xmlfile+" -outfmt 5 >> data_log.txt 2>&1",shell=True)###1/27/2015; 08272018, remove "-word_size 10"
else:
xmlfile="NA"
return xmlfile,new_fasta
def judge_subspecies(fnameA):
#seqsero2 -a; judge subspecies on just forward raw reads fastq
salmID_output=subprocess.Popen("SalmID.py -i "+fnameA,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out, err = salmID_output.communicate()
out=out.decode("utf-8")
file=open("data_log.txt","a")
file.write(out)
file.close()
salm_species_scores=out.split("\n")[1].split("\t")[6:]
salm_species_results=out.split("\n")[0].split("\t")[6:]
max_score=0
max_score_index=1 #default is 1, means "I"
for i in range(len(salm_species_scores)):
if max_score<float(salm_species_scores[i]):
max_score=float(salm_species_scores[i])
max_score_index=i
prediction=salm_species_results[max_score_index].split(".")[1].strip().split(" ")[0]
#if float(out.split("\n")[1].split("\t")[4]) > float(out.split("\n")[1].split("\t")[5]): #bongori and enterica compare
if float(out.split("\n")[1].split("\t")[4]) > 10 and float(out.split("\n")[1].split("\t")[4]) > float(out.split("\n")[1].split("\t")[5]): ## ed_SL_0318: change SalmID_ssp_threshold
prediction="bongori" #if not, the prediction would always be enterica, since they are located in the later part
#if max_score<10: ## ed_SL_0318: change SalmID_ssp_threshold
if max_score<60:
prediction="-"
## ed_SL_0818: add for enterica
if float(out.split("\n")[1].split("\t")[5]) > 10 and float(out.split("\n")[1].split("\t")[5]) > float(out.split("\n")[1].split("\t")[4]):
prediction="enterica"
##
return prediction
def judge_subspecies_Kmer(Special_dict):
#seqsero2 -k;
max_score=0
prediction="-" #default should be I
for x in Special_dict:
#if "mer" in x: ## ed_SL_0318: change ssp_threshold
if "mer" in x and float(Special_dict[x]) > 60:
if max_score<float(Special_dict[x]):
max_score=float(Special_dict[x])
prediction=x.split("_")[-1].strip()
if x.split("_")[-1].strip()=="bongori" and float(Special_dict[x])>95:#if bongori already, then no need to test enterica
prediction="bongori"
break
return prediction
## ed_SL_11232019: add notes for missing antigen
def check_antigens(ssp,O_antigen,H1_antigen,H2_antigen,NA_note):
antigen_note = ''
if ssp != '-':
if O_antigen != '-' and H1_antigen == '-' and H2_antigen == '-': # O:-:-
antigen_note = 'H antigens were not detected. This is an atypical result that should be further investigated. Most Salmonella strains have at least fliC, encoding the Phase 1 H antigen, even if it is not expressed. '
NA_note = ''
elif O_antigen != '-' and H1_antigen == '-' and H2_antigen != '-': # O:-:H2
antigen_note = 'fliC was not detected. This is an atypical result that should be further investigated. Most Salmonella strains have fliC, encoding the Phase 1 H antigen, even if it is not expressed. '
NA_note = ''
elif O_antigen == '-' and H1_antigen != '-': # -:H1:X
antigen_note = 'O antigen was not detected. This result may be due to a rough strain that has deleted the rfb region. For raw reads input, the k-mer workflow is sometimes more sensitive than the microassembly workflow in detecting O antigen. Caution should be used with this approach because the k-mer result may be due to low levels of contamination. '
NA_note = ''
elif O_antigen == '-' and H1_antigen == '-' and H2_antigen == '-': # -:-:-
antigen_note = 'No serotype antigens were detected. This is an atypical result that should be further investigated. '
NA_note = ''
else:
antigen_note = 'The input genome cannot be identified as Salmonella. Check the input for taxonomic ID, contamination, or sequencing quality. '
NA_note = ''
if ssp == 'enterica':
antigen_note += 'Subspecies identification of the input genome cannot be definitively determined. '
NA_note = ''
# if [O_antigen, H1_antigen, H2_antigen].count('-') >= 2:
# antigen_note = 'No subspecies marker was detected and less than 2 serotype antigens were detected; further, this genome was not identified as Salmonella. This is an atypical result that should be further investigated. '
# else:
# antigen_note = 'No subspecies marker was detected. This genome may not be Salmonella. This is an atypical result that should be further investigated. '
return (antigen_note,NA_note)
## ed_SL_06062020: rename subspecies ID
subspecies_ID_dir = {'I': 'Salmonella enterica subspecies enterica (subspecies I)',
'II': 'Salmonella enterica subspecies salamae (subspecies II)',
'IIIa': 'Salmonella enterica subspecies arizonae (subspecies IIIa)',
'IIIb': 'Salmonella enterica subspecies diarizonae (subspecies IIIb)',
'IV': 'Salmonella enterica subspecies houtenae (subspecies IV)',
'VI': 'Salmonella enterica subspecies indica (subspecies VI)',
'VII': 'Salmonella enterica subspecies VII (subspecies VII)',
'bongori': 'Salmonella bongori',
'enterica': 'Salmonella enterica',
'-': '-'}
##
## ed_SL_08172020: format check for fasta or fastq in nanopore workflow, convert raw reads fastq to fasta
def format_check(input_file):
line=open(input_file,'r').readline()
if line.startswith('>'):
output_file = input_file
elif line.startswith('@'):
input_file_fa = input_file + '.fasta'
subprocess.check_call("seqtk seq -A "+input_file+" > "+input_file_fa,shell=True)
output_file = input_file_fa
else:
print ('please check the format of input files')
return (output_file)
##
def main():
#combine SeqSeroK and SeqSero2, also with SalmID
args = parse_args()
input_file = args.i
data_type = args.t
analysis_mode = args.m
mapping_mode=args.b
threads=args.p
make_dir=args.d
clean_mode=args.c
sample_name=args.n
ingore_header=args.s
phred_offset=args.phred_offset
k_size=27 #will change for bug fixing
dirpath = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
ex_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)),'seqsero2_db')) # ed_SL_09152019: add ex_dir for packaging
seqsero2_db=ex_dir+"/H_and_O_and_specific_genes.fasta" # ed_SL_11092019: change path to database for packaging
database="H_and_O_and_specific_genes.fasta"
note="Note: "
NA_note="This predicted serotype is not in the Kauffman-White scheme. " # ed_SL_09272019: add for new output format
if len(sys.argv)==1:
subprocess.check_call(dirpath+"/SeqSero2_package.py -h",shell=True)#change name of python file
else:
request_id = time.strftime("%m_%d_%Y_%H_%M_%S", time.localtime())
request_id += str(random.randint(1, 10000000))
if make_dir is None:
make_dir="SeqSero_result_"+request_id
make_dir=os.path.abspath(make_dir)
if os.path.isdir(make_dir):
pass
else:
subprocess.check_call("mkdir -p "+make_dir,shell=True)
#subprocess.check_call("cp "+dirpath+"/"+database+" "+" ".join(input_file)+" "+make_dir,shell=True)
#subprocess.check_call("ln -sr "+dirpath+"/"+database+" "+" ".join(input_file)+" "+make_dir,shell=True)
subprocess.check_call("ln -f -s "+seqsero2_db+" "+" ".join(input_file)+" "+make_dir,shell=True) # ed_SL_11092019: change path to database for packaging
#subprocess.check_call("ln -f -s "+dirpath+"/"+database+" "+" ".join(input_file)+" "+make_dir,shell=True) ### use -f option to force the replacement of links, remove -r and use absolute path instead to avoid link issue (use 'type=os.path.abspath' in -i argument).
############################begin the real analysis
if analysis_mode=="a":
if data_type in ["1","2","3"]:#use allele mode
for_fq,rev_fq=get_input_files(make_dir,input_file,data_type,dirpath)
os.chdir(make_dir)
###add a function to tell input files
fnameA=for_fq.split("/")[-1]
fnameB=rev_fq.split("/")[-1]
current_time=time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
sam,bam,sorted_bam,mapped_fq1,mapped_fq2,combined_fq,for_sai,rev_sai=get_temp_file_names(fnameA,fnameB) #get temp files id
map_and_sort(threads,database,fnameA,fnameB,sam,bam,for_sai,rev_sai,sorted_bam,mapping_mode) #do mapping and sort
### avoid error out when micro assembly fails. ed_SL_03172020
try:
xmlfile,new_fasta=extract_mapped_reads_and_do_assembly_and_blast(current_time,sorted_bam,combined_fq,mapped_fq1,mapped_fq2,threads,fnameA,fnameB,database,mapping_mode,phred_offset) #extract the mapped reads and do micro assembly and blast
except (UnboundLocalError, subprocess.CalledProcessError):
xmlfile="NA"
H1_cont_stat_list=[]
H2_cont_stat_list=[]
###
if xmlfile=="NA":
O_choice,fliC_choice,fljB_choice,special_gene_list,contamination_O,contamination_H=("-","-","-",[],"","")
else:
Final_list=xml_parse_score_comparision_seqsero(xmlfile) #analyze xml and get parsed results
file=open("data_log.txt","a")
for x in Final_list:
file.write("\t".join(str(y) for y in x)+"\n")
file.close()
Final_list_passed=[x for x in Final_list if float(x[0].split("_cov_")[1].split("_")[0])>=0.9 and (x[1]>=int(x[0].split("__")[1]) or x[1]>=int(x[0].split("___")[1].split("_")[3]) or x[1]>1000)]
O_choice,fliC_choice,fljB_choice,special_gene_list,contamination_O,contamination_H,Otypes_uniq,H1_cont_stat_list,H2_cont_stat_list=predict_O_and_H_types(Final_list,Final_list_passed,new_fasta) #predict O, fliC and fljB
subspecies=judge_subspecies(fnameA) #predict subspecies
### ed_SL_06062020: correction VIII -> II
if subspecies == 'VIII':
subspecies = 'II'
### ed_SL_08132020: correction VII -> IV, according to CDC's suggestion
if subspecies == 'VII':
subspecies = 'IV'
note+='SalmID reports this as ssp VII, which has not been formally recognized. '
###
### ed_SL_08182020: change serotype ouput for genome without definitive subspecies ID
ssp_pointer = subspecies
if subspecies == 'enterica':
subspecies = '-'
###
###output
predict_form,predict_sero,star,star_line,claim=seqsero_from_formula_to_serotypes(O_choice,fliC_choice,fljB_choice,special_gene_list,subspecies)
claim="" #04132019, disable claim for new report requirement
contamination_report=""
H_list=["fliC_"+x for x in H1_cont_stat_list if len(x)>0]+["fljB_"+x for x in H2_cont_stat_list if len(x)>0]
if contamination_O!="" and contamination_H=="":
contamination_report="#Potential inter-serotype contamination detected from O antigen signals. All O-antigens detected:"+"\t".join(Otypes_uniq)+"."
elif contamination_O=="" and contamination_H!="":
contamination_report="#Potential inter-serotype contamination detected or potential thrid H phase from H antigen signals. All H-antigens detected:"+"\t".join(H_list)+"."
elif contamination_O!="" and contamination_H!="":
contamination_report="#Potential inter-serotype contamination detected from both O and H antigen signals.All O-antigens detected:"+"\t".join(Otypes_uniq)+". All H-antigens detected:"+"\t".join(H_list)+"."
if contamination_report!="":
#contamination_report="potential inter-serotype contamination detected (please refer below antigen signal report for details)." #above contamination_reports are for back-up and bug fixing #web-based mode need to be re-used, 04132019
contamination_report="Co-existence of multiple serotypes detected, indicating potential inter-serotype contamination. See 'Extracted_antigen_alleles.fasta' for detected serotype determinant alleles. "
#claim="\n"+open("Extracted_antigen_alleles.fasta","r").read()#used to store H and O antigen sequeences #04132019, need to change if using web-version
#if contamination_report+star_line+claim=="": #0413, new output style
# note=""
#else:
# note="Note:"
### ed_SL_11232019: add notes for missing antigen
if O_choice=="":
O_choice="-"
antigen_note,NA_note=check_antigens(ssp_pointer,O_choice,fliC_choice,fljB_choice,NA_note)
if sample_name:
print ("Sample name:\t"+sample_name)
###
if clean_mode:
subprocess.check_call("rm -rf "+make_dir,shell=True)
make_dir="none-output-directory due to '-c' flag"
else:
new_file=open("SeqSero_result.txt","w")
### ed_SL_01152020: add new output
conta_note="yes" if "inter-serotype contamination" in contamination_report else "no"
tsv_file=open("SeqSero_result.tsv","w")
if ingore_header:
pass
else:
tsv_file.write("Sample name\tOutput directory\tInput files\tO antigen prediction\tH1 antigen prediction(fliC)\tH2 antigen prediction(fljB)\tPredicted identification\tPredicted antigenic profile\tPredicted serotype\tPotential inter-serotype contamination\tNote\n")
if sample_name:
new_file.write("Sample name:\t"+sample_name+"\n")
tsv_file.write(sample_name+'\t')
else:
tsv_file.write(input_file[0].split('/')[-1]+'\t')
###
if "N/A" not in predict_sero:
new_file.write("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+"\t".join(input_file)+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+fliC_choice+"\n"+
"H2 antigen prediction(fljB):\t"+fljB_choice+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+predict_sero+"\n"+
note+contamination_report+star_line+claim+antigen_note+"\n")#+##
tsv_file.write(make_dir+"\t"+" ".join(input_file)+"\t"+O_choice+"\t"+fliC_choice+"\t"+fljB_choice+"\t"+subspecies_ID_dir[ssp_pointer]+"\t"+predict_form+"\t"+predict_sero+"\t"+conta_note+"\t"+contamination_report+star_line+claim+antigen_note+"\n")
else:
#star_line=star_line.strip()+"\tNone such antigenic formula in KW.\n"
#star_line="" #04132019, for new output requirement, diable star_line if "NA" in output
new_file.write("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+"\t".join(input_file)+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+fliC_choice+"\n"+
"H2 antigen prediction(fljB):\t"+fljB_choice+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+subspecies+' '+predict_form+"\n"+ # add serotype output for "N/A" prediction, add subspecies
note+NA_note+contamination_report+star_line+claim+antigen_note+"\n")#+##
tsv_file.write(make_dir+"\t"+" ".join(input_file)+"\t"+O_choice+"\t"+fliC_choice+"\t"+fljB_choice+"\t"+subspecies_ID_dir[ssp_pointer]+"\t"+predict_form+"\t"+subspecies+' '+predict_form+"\t"+conta_note+"\t"+NA_note+contamination_report+star_line+claim+antigen_note+"\n")
new_file.close()
tsv_file.close()
#subprocess.check_call("cat Seqsero_result.txt",shell=True)
#subprocess.call("rm H_and_O_and_specific_genes.fasta* *.sra *.bam *.sam *.fastq *.gz *.fq temp.txt *.xml "+fnameA+"*_db* 2> /dev/null",shell=True)
subprocess.call("rm H_and_O_and_specific_genes.fasta* *.sra *.bam *.sam *.fastq *.gz *.fq temp.txt "+fnameA+"*_db* 2> /dev/null",shell=True)
if "N/A" not in predict_sero:
#print("Output_directory:"+make_dir+"\nInput files:\t"+for_fq+" "+rev_fq+"\n"+"O antigen prediction:\t"+O_choice+"\n"+"H1 antigen prediction(fliC):\t"+fliC_choice+"\n"+"H2 antigen prediction(fljB):\t"+fljB_choice+"\n"+"Predicted antigenic profile:\t"+predict_form+"\n"+"Predicted subspecies:\t"+subspecies+"\n"+"Predicted serotype(s):\t"+predict_sero+star+"\nNote:"+contamination_report+star+star_line+claim+"\n")#+##
print("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+"\t".join(input_file)+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+fliC_choice+"\n"+
"H2 antigen prediction(fljB):\t"+fljB_choice+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+predict_sero+"\n"+
note+contamination_report+star_line+claim+antigen_note+"\n")#+##
else:
print("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+"\t".join(input_file)+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+fliC_choice+"\n"+
"H2 antigen prediction(fljB):\t"+fljB_choice+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+subspecies+' '+predict_form+"\n"+ # add serotype output for "N/A" prediction, subspecies
note+NA_note+contamination_report+star_line+claim+antigen_note+"\n")
else:
print("Allele modes only support raw reads datatype, i.e. '-t 1 or 2 or 3'; please use '-m k'")
elif analysis_mode=="k":
#ex_dir = os.path.dirname(os.path.realpath(__file__))
ex_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)),'seqsero2_db')) # ed_SL_09152019: change ex_dir for packaging
#output_mode = args.mode
for_fq,rev_fq=get_input_files(make_dir,input_file,data_type,dirpath)
input_file = for_fq #-k will just use forward because not all reads were used
os.chdir(make_dir)
### ed_SL_08182020: use assembly workflow for nanopore fastq, convert fastq to fasta
if data_type == "5":
input_file = format_check(for_fq)
###
f = open(ex_dir + '/antigens.pickle', 'rb')
lib_dict = pickle.load(f)
f.close
input_Ks=get_input_K(input_file,lib_dict,data_type,k_size)
O_dict,H_dict,Special_dict=get_kmer_dict(lib_dict,input_Ks)
highest_O,highest_fliC,highest_fljB=call_O_and_H_type(O_dict,H_dict,Special_dict,make_dir)
subspecies=judge_subspecies_Kmer(Special_dict)
if subspecies=="IIb" or subspecies=="IIa":
subspecies="II"
### ed_SL_06062020: correction VIII -> II
if subspecies == 'VIII':
subspecies = 'II'
### ed_SL_08132020: correction VII -> IV, according to CDC's suggestion
if subspecies == 'VII':
subspecies = 'IV'
note+='SalmID reports this as ssp VII, which has not been formally recognized. '
###
### ed_SL_08182020: change serotype ouput for genome without definitive subspecies ID
ssp_pointer = subspecies
if subspecies == 'enterica':
subspecies = '-'
###
predict_form,predict_sero,star,star_line,claim = seqsero_from_formula_to_serotypes(
highest_O.split('-')[1], highest_fliC, highest_fljB, Special_dict,subspecies)
claim="" #no claim any more based on new output requirement
#if star_line+claim=="": #0413, new output style
# note=""
#else:
# note="Note:"
### ed_SL_11232019: add notes for missing antigen
if highest_O.split('-')[-1]=="":
O_choice="-"
else:
O_choice=highest_O.split('-')[-1]
antigen_note,NA_note=check_antigens(ssp_pointer,O_choice,highest_fliC,highest_fljB,NA_note)
if sample_name:
print ("Sample name:\t"+sample_name)
###
if clean_mode:
subprocess.check_call("rm -rf "+make_dir,shell=True)
make_dir="none-output-directory due to '-c' flag"
else:
new_file=open("SeqSero_result.txt","w")
tsv_file=open("SeqSero_result.tsv","w")
# ### ed_SL_05282019, fix the assignment issue of variable 'O_choice' using "-m k -c"
# if highest_O.split('-')[-1]=="":
# O_choice="-"
# else:
# O_choice=highest_O.split('-')[-1]
# ###
# else:
# if highest_O.split('-')[-1]=="":
# O_choice="-"
# else:
# O_choice=highest_O.split('-')[-1]
#print("Output_directory:"+make_dir+"\tInput_file:"+input_file+"\tPredicted subpecies:"+subspecies + '\tPredicted antigenic profile:' + predict_form + '\tPredicted serotype(s):' + predict_sero)
# new_file=open("SeqSero_result.txt","w")
#new_file.write("Output_directory:"+make_dir+"\nInput files:\t"+input_file+"\n"+"O antigen prediction:\t"+O_choice+"\n"+"H1 antigen prediction(fliC):\t"+highest_fliC+"\n"+"H2 antigen prediction(fljB):\t"+highest_fljB+"\n"+"Predicted antigenic profile:\t"+predict_form+"\n"+"Predicted subspecies:\t"+subspecies+"\n"+"Predicted serotype(s):\t"+predict_sero+star+"\n"+star+star_line+claim+"\n")#+##
### ed_SL_01152020: add new output
# tsv_file=open("SeqSero_result.tsv","w")
if ingore_header:
pass
else:
tsv_file.write("Sample name\tOutput directory\tInput files\tO antigen prediction\tH1 antigen prediction(fliC)\tH2 antigen prediction(fljB)\tPredicted identification\tPredicted antigenic profile\tPredicted serotype\tNote\n")
if sample_name:
new_file.write("Sample name:\t"+sample_name+"\n")
tsv_file.write(sample_name+'\t')
else:
tsv_file.write(input_file.split('/')[-1]+'\t')
###
if "N/A" not in predict_sero:
new_file.write("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+input_file+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+highest_fliC+"\n"+
"H2 antigen prediction(fljB):\t"+highest_fljB+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+predict_sero+"\n"+
note+star_line+claim+antigen_note+"\n")#+##
tsv_file.write(make_dir+"\t"+input_file+"\t"+O_choice+"\t"+highest_fliC+"\t"+highest_fljB+"\t"+subspecies_ID_dir[ssp_pointer]+"\t"+predict_form+"\t"+predict_sero+"\t"+star_line+claim+antigen_note+"\n")
else:
#star_line=star_line.strip()+"\tNone such antigenic formula in KW.\n"
#star_line = "" #changed for new output requirement, 04132019
new_file.write("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+input_file+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+highest_fliC+"\n"+
"H2 antigen prediction(fljB):\t"+highest_fljB+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+subspecies+' '+predict_form+"\n"+ # add serotype output for "N/A" prediction, subspecies
note+NA_note+star_line+claim+antigen_note+"\n")#+##
tsv_file.write(make_dir+"\t"+input_file+"\t"+O_choice+"\t"+highest_fliC+"\t"+highest_fljB+"\t"+subspecies_ID_dir[ssp_pointer]+"\t"+predict_form+"\t"+subspecies+' '+predict_form+"\t"+NA_note+star_line+claim+antigen_note+"\n")
new_file.close()
tsv_file.close()
subprocess.call("rm *.fasta* *.fastq *.gz *.fq temp.txt *.sra 2> /dev/null",shell=True)
if "N/A" not in predict_sero:
print("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+input_file+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+highest_fliC+"\n"+
"H2 antigen prediction(fljB):\t"+highest_fljB+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+predict_sero+"\n"+
note+star_line+claim+antigen_note+"\n")#+##
else:
print("Output directory:\t"+make_dir+"\n"+
"Input files:\t"+input_file+"\n"+
"O antigen prediction:\t"+O_choice+"\n"+
"H1 antigen prediction(fliC):\t"+highest_fliC+"\n"+
"H2 antigen prediction(fljB):\t"+highest_fljB+"\n"+
"Predicted identification:\t"+subspecies_ID_dir[ssp_pointer]+"\n"+
"Predicted antigenic profile:\t"+predict_form+"\n"+
"Predicted serotype:\t"+subspecies+' '+predict_form+"\n"+ # add serotype output for "N/A" prediction, subspecies
note+NA_note+star_line+claim+antigen_note+"\n")#+##
if __name__ == '__main__':
main()
|
denglab/SeqSero2
|
bin/SeqSero2_package.py
|
Python
|
gpl-2.0
| 86,947
|
[
"BLAST",
"BWA"
] |
78dc120e80303b50960b362803abe96b77e6212368caa6a901a94ea30830f400
|
#!/usr/bin/env python2
import sys
#sys.path.append('/data/antares/aux')
import os
import glob
import json
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import rec2csv, rec2txt
from astropy.visualization import hist
from collections import Counter, OrderedDict
from ANTARES_object import TouchstoneObject
import scipy.interpolate as scinterp
import pickle
# since the claimedtype in the sne.space data is ordered by time (newest claimedtype first)
# it makes sense to store this, and keep a count of how many studies agree with that type
# this effectively decides what the final classification should be
# since, of course, people don't actually agree on type, despite the spectra
class OrderedCounter(Counter, OrderedDict):
"""
trivial implementation of an ordered counter
"""
pass
def check_bad_types(ntype):
if ntype == 'Candidate' or\
ntype.endswith('?') or\
ntype =='I' or\
ntype.startswith('Star') or\
ntype.startswith('CV') or\
ntype.startswith('AGN') or\
ntype.startswith('LBV') or\
ntype == 'Radio':
return True
else:
return False
def GProcessing():
"""
This method does the heavy lifting of actually processing all the sne.space lightcurves
Each lightcurve is read in parallel with MPI, and has to pass various cuts
A dictionary of all the objects is built up, containing auxillary information on the object
as well as the status of processing and the output of the processing
If it fails the cuts, the object is not used, and simply marked as failed
If it passes the cuts, a gaussian process is used to attempt to smooth the light curve in each band
Individual bands are treated separately, and allowed to fail independent of other bands
If all the bands fail, the object is marked as having failed, even if it did pass the cuts
(as no useful data was extracted)
We attempt to align the lightcurves in an absolute sense (i.e. max to fixed phase)
rather than relative to each other (as this processing is done in parallel, and we don't have that info)
A single json file is written out with the gaussian process smoothed data
"""
# setup the MPI process, and divide up the files for processing
# this division is just by number of files, not relative amount of data in each file
#Set up final json file
des_sn = {}
outfile = 'des_sn.p'
#Generate dictionary of all SN types from key file
base_path = '../gen_lightcurves/DES_lcurves/DES_BLIND+HOSTZ/'
key_file = '../gen_lightcurves/DES_lcurves/TEST+HOST.KEY'
with open(key_file, 'r') as f:
data = f.readlines()
SN_key = {}
for line in data:
if line.startswith('NVAR') or line.startswith('VARNAMES'):
continue
#Only need 2nd 3rd and 4th element
_, sn_id, sntype, confirm_type, genz, hostz, hostzerr = line.split()
SN_key[int(sn_id)] = {'sntype': int(sntype), 'confirm_type': int(confirm_type),\
'genz': float(genz), 'hostz': float(hostz), 'hostzerr': float(hostzerr)}
lightcurves = os.listdir(base_path)
for i,lightcurve in enumerate(lightcurves):
#Eliminate the three header files and the 4 filter files
base_header = 'DES_BLIND+HOSTZ'
if lightcurve.startswith(base_header):
continue
elif lightcurve[4] in ['g', 'r', 'i', 'z']:
continue
tobj = TouchstoneObject.fromfile(base_path + lightcurve)
mwebv = tobj.header['mwebv']
#Look up the types for future analysis
object_id = int(tobj.objectname)
object_key = SN_key[object_id]
sntype = object_key['sntype']
confirm_type = object_key['confirm_type']
hostz = object_key['hostz']
hostzerr = object_key['hostzerr']
genz = object_key['genz']
outbspline = tobj.spline_smooth(per=False, minobs=6)
outgp = tobj.gaussian_process_alt_smooth(per=False, scalemin=np.log(10**-4), scalemax=np.log(10**5), minobs=6)
outjson = {}
#Only loop over filters that both outgp and outbspline share
#print("OutGP: ", list(outgp.keys()))
#print("OutBspline: ", list(outbspline.keys()))
#print(outgp.keys(),outbspline.keys())
outfilters = list(set(outgp.keys()) & set(outbspline.keys()))
if set(outgp.keys()) != set(outbspline.keys()):
print("Filter difference between bspline and GP")
for filt in outfilters:
# Generate resampled values from the Gaussian Process regression
thisgp, thisjd, thismag, thisdmag = outgp[filt]
#I need to choose whether to sample at a frequency or
# a fixed number of points
## FOR NOW, I'M CHOOSING A FIXED NUMBER OF POINTS
#mod_dates = np.arange(thisjd.min(), thisjd.max(), 1.)
#### 128 chosen to allow for more levels of pywt analysis (2** divisible)
mod_dates = np.linspace(thisjd.min(), thisjd.max(), 128)
thismod, modcovar = thisgp.predict(thismag, mod_dates)
thismody, modcovary = thisgp.predict(thismag, thisjd)
thiserr = np.sqrt(np.diag(modcovar))
# Generate resampled values from the spline model
thisbspline = outbspline[filt]
thismod_bspline = scinterp.splev(mod_dates, thisbspline)
goodstatus = True
mad_test = np.median(np.abs(thismody - np.median(thismody)))
mad_mod = np.median(np.abs(thismod - np.median(thismod )))
mad_data = np.median(np.abs(thismag - np.median(thismag )))
if (mad_test - mad_data) > 0.5 or np.abs(mad_mod - mad_data) > 0.5:
goodstatus=False
message = 'Outlier rejection failed (data: %.3f model: %.3f interp: %.3f)'%(mad_data, mad_test, mad_mod)
#print(message)
outjson[filt] = {'kernel':list(thisgp.kernel.pars),\
'mjd':thisjd.tolist(),\
'mag':thismag.tolist(),\
'dmag':thisdmag.tolist(),\
'modeldate':mod_dates.tolist(),\
'modelmag':thismod.tolist(),\
'modelerr':thiserr.tolist(),\
'bsplinemag':thismod_bspline.tolist(),\
'goodstatus':goodstatus,\
'hostz': hostz,\
'hostzerr': hostzerr,\
'confirm_type': confirm_type,\
'type': sntype}
if len(outjson.keys()) == 0:
continue
des_sn[object_id] = outjson
with open(outfile, mode='wb') as f:
pickle.dump(des_sn, f)
#close JSON
#endfor over files
def main():
GProcessing()
if __name__=='__main__':
sys.exit(main())
|
tayebzaidi/HonorsThesisTZ
|
ThesisCode/DES_Pipeline/gen_lightcurves/parse_des.py
|
Python
|
gpl-3.0
| 7,003
|
[
"Gaussian"
] |
59e5586bec408b02d5526fb18520706409d134bc7e9988ab8d4388f2b2c5f8ba
|
# Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import importlib_wrapper
def shorten_loop(code):
# stop reaction before the exception is raised
breakpoint = "while True:"
assert breakpoint in code
code = code.replace(breakpoint, "for _ in range(6):", 1)
return code
sample, skipIfMissingFeatures = importlib_wrapper.configure_and_import(
"@SAMPLES_DIR@/wang_landau_reaction_ensemble.py",
substitutions=shorten_loop)
@skipIfMissingFeatures
class Sample(ut.TestCase):
system = sample.system
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/scripts/samples/test_wang_landau_reaction_ensemble.py
|
Python
|
gpl-3.0
| 1,250
|
[
"ESPResSo"
] |
620ca951fe4cf612762995a57500cdb9995bd7c78bb97c950eeea0ba92663afe
|
"""
Testing code.
Updated BSM August 2015
"""
import unittest
import os
import numpy as np
import kriging_tools as kt
import core
import variogram_models
from ok import OrdinaryKriging
from uk import UniversalKriging
from ok3d import OrdinaryKriging3D
from uk3d import UniversalKriging3D
class TestPyKrige(unittest.TestCase):
def setUp(self):
self.test_data = np.genfromtxt(os.path.join(os.getcwd(), 'test_data/test_data.txt'))
self.ok_test_answer, self.ok_test_gridx, self.ok_test_gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test1_answer.asc'), footer=2)
self.uk_test_answer, self.uk_test_gridx, self.uk_test_gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test2_answer.asc'), footer=2)
self.simple_data = np.array([[0.3, 1.2, 0.47],
[1.9, 0.6, 0.56],
[1.1, 3.2, 0.74],
[3.3, 4.4, 1.47],
[4.7, 3.8, 1.74]])
self.simple_gridx = np.arange(0.0, 6.0, 1.0)
self.simple_gridx_2 = np.arange(0.0, 5.5, 0.5)
self.simple_gridy = np.arange(0.0, 5.5, 0.5)
xi, yi = np.meshgrid(self.simple_gridx, self.simple_gridy)
self.mask = np.array(xi == yi)
self.simple_data_3d = np.array([[0.1, 0.1, 0.3, 0.9],
[0.2, 0.1, 0.4, 0.8],
[0.1, 0.3, 0.1, 0.9],
[0.5, 0.4, 0.4, 0.5],
[0.3, 0.3, 0.2, 0.7]])
self.simple_gridx_3d = np.arange(0.0, 0.6, 0.05)
self.simple_gridy_3d = np.arange(0.0, 0.6, 0.01)
self.simple_gridz_3d = np.arange(0.0, 0.6, 0.1)
zi, yi, xi = np.meshgrid(self.simple_gridz_3d, self.simple_gridy_3d, self.simple_gridx_3d, indexing='ij')
self.mask_3d = np.array((xi == yi) & (yi == zi))
def test_core_adjust_for_anisotropy(self):
x = np.array([1.0, 0.0, -1.0, 0.0])
y = np.array([0.0, 1.0, 0.0, -1.0])
rotated_x, rotated_y = core.adjust_for_anisotropy(x, y, 0.0, 0.0, 2.0, 90.0)
self.assertTrue(np.allclose(rotated_x, np.array([0.0, 1.0, 0.0, -1.0])))
self.assertTrue(np.allclose(rotated_y, np.array([-2.0, 0.0, 2.0, 0.0])))
def test_core_adjust_for_anisotropy_3d(self):
x = np.array([1.0, 0.0, 0.0])
y = np.array([0.0, 1.0, 0.0])
z = np.array([0.0, 0.0, 1.0])
rotated_x, rotated_y, rotated_z = core.adjust_for_anisotropy_3d(x, y, z, 0., 0., 0., 2., 2., 90., 0., 0.)
self.assertTrue(np.allclose(rotated_x, np.array([1., 0., 0.])))
self.assertTrue(np.allclose(rotated_y, np.array([0., 0., 2.])))
self.assertTrue(np.allclose(rotated_z, np.array([0., -2., 0.])))
rotated_x, rotated_y, rotated_z = core.adjust_for_anisotropy_3d(x, y, z, 0., 0., 0., 2., 2., 0., 90., 0.)
self.assertTrue(np.allclose(rotated_x, np.array([0., 0., -1.])))
self.assertTrue(np.allclose(rotated_y, np.array([0., 2., 0.])))
self.assertTrue(np.allclose(rotated_z, np.array([2., 0., 0.])))
rotated_x, rotated_y, rotated_z = core.adjust_for_anisotropy_3d(x, y, z, 0., 0., 0., 2., 2., 0., 0., 90.)
self.assertTrue(np.allclose(rotated_x, np.array([0., 1., 0.])))
self.assertTrue(np.allclose(rotated_y, np.array([-2., 0., 0.])))
self.assertTrue(np.allclose(rotated_z, np.array([0., 0., 2.])))
def test_core_initialize_variogram_model(self):
# Note the variogram_function argument is not a string in real life...
self.assertRaises(ValueError, core.initialize_variogram_model, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], 'linear', [0.0], 'linear', 6, False)
self.assertRaises(ValueError, core.initialize_variogram_model, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], 'spherical', [0.0], 'spherical', 6, False)
x = np.array([1.0 + n/np.sqrt(2) for n in range(4)])
y = np.array([1.0 + n/np.sqrt(2) for n in range(4)])
z = np.arange(1.0, 5.0, 1.0)
lags, semivariance, variogram_model_parameters = core.initialize_variogram_model(x, y, z, 'linear',
[0.0, 0.0], 'linear',
6, False)
self.assertTrue(np.allclose(lags, np.array([1.0, 2.0, 3.0])))
self.assertTrue(np.allclose(semivariance, np.array([0.5, 2.0, 4.5])))
def test_core_initialize_variogram_model_3d(self):
# Note the variogram_function argument is not a string in real life...
self.assertRaises(ValueError, core.initialize_variogram_model_3d, self.simple_data_3d[:, 0],
self.simple_data_3d[:, 1], self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
'linear', [0.0], 'linear', 6, False)
self.assertRaises(ValueError, core.initialize_variogram_model_3d, self.simple_data_3d[:, 0],
self.simple_data_3d[:, 1], self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
'spherical', [0.0], 'spherical', 6, False)
lags, semivariance, variogram_model_parameters = core.initialize_variogram_model_3d(np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
np.array([1., 2., 3., 4.]),
'linear', [0.0, 0.0],
'linear', 3, False)
self.assertTrue(np.allclose(lags, np.array([np.sqrt(3.), 2.*np.sqrt(3.), 3.*np.sqrt(3.)])))
self.assertTrue(np.allclose(semivariance, np.array([0.5, 2.0, 4.5])))
def test_core_calculate_variogram_model(self):
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([2.05, 2.95, 4.05, 4.95]),
'linear', variogram_models.linear_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([0.98, 1.05]), 0.01, 0.01))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([2.05, 2.95, 4.05, 4.95]),
'linear', variogram_models.linear_variogram_model, True)
self.assertTrue(np.allclose(res, np.array([0.98, 1.05]), 0.01, 0.01))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.0, 2.8284, 5.1962, 8.0]),
'power', variogram_models.power_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([1.0, 1.5, 0.0])))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.0, 1.4142, 1.7321, 2.0]),
'power', variogram_models.power_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([1.0, 0.5, 0.0])))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([1.2642, 1.7293, 1.9004, 1.9634]),
'exponential', variogram_models.exponential_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001))
res = core.calculate_variogram_model(np.array([1.0, 2.0, 3.0, 4.0]), np.array([0.5769, 1.4872, 1.9065, 1.9914]),
'gaussian', variogram_models.gaussian_variogram_model, False)
self.assertTrue(np.allclose(res, np.array([2.0, 3.0, 0.0]), 0.001, 0.001))
def test_core_krige(self):
# Example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.22],
[43.8, 24.6, 2.822]])
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (18.8, 67.9),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 1.6364, 4)
self.assertAlmostEqual(ss, 0.4201, 4)
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (43.8, 24.6),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 2.822, 3)
self.assertAlmostEqual(ss, 0.0, 3)
def test_core_krige_3d(self):
# Adapted from example 3.2 from Kitanidis
data = np.array([[9.7, 47.6, 1.0, 1.22],
[43.8, 24.6, 1.0, 2.822]])
z, ss = core.krige_3d(data[:, 0], data[:, 1], data[:, 2], data[:, 3], (18.8, 67.9, 1.0),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 1.6364, 4)
self.assertAlmostEqual(ss, 0.4201, 4)
z, ss = core.krige_3d(data[:, 0], data[:, 1], data[:, 2], data[:, 3], (43.8, 24.6, 1.0),
variogram_models.linear_variogram_model, [0.006, 0.1])
self.assertAlmostEqual(z, 2.822, 3)
self.assertAlmostEqual(ss, 0.0, 3)
def test_ok(self):
# Test to compare OK results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='exponential', variogram_parameters=[500.0, 3000.0, 0.0])
z, ss = ok.execute('grid', self.ok_test_gridx, self.ok_test_gridy, backend='vectorized')
self.assertTrue(np.allclose(z, self.ok_test_answer))
z, ss = ok.execute('grid', self.ok_test_gridx, self.ok_test_gridy, backend='loop')
self.assertTrue(np.allclose(z, self.ok_test_answer))
def test_ok_update_variogram_model(self):
self.assertRaises(ValueError, OrdinaryKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], variogram_model='blurg')
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2])
variogram_model = ok.variogram_model
variogram_parameters = ok.variogram_model_parameters
anisotropy_scaling = ok.anisotropy_scaling
anisotropy_angle = ok.anisotropy_angle
self.assertRaises(ValueError, ok.update_variogram_model, 'blurg')
ok.update_variogram_model('power', anisotropy_scaling=3.0, anisotropy_angle=45.0)
self.assertFalse(variogram_model == ok.variogram_model)
self.assertFalse(variogram_parameters == ok.variogram_model_parameters)
self.assertFalse(anisotropy_scaling == ok.anisotropy_scaling)
self.assertFalse(anisotropy_angle == ok.anisotropy_angle)
def test_ok_execute(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
self.assertRaises(ValueError, ok.execute, 'blurg', self.simple_gridx, self.simple_gridy)
z, ss = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
z, ss = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
self.assertRaises(IOError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='vectorized')
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(IOError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, ok.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='loop')
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(ValueError, ok.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='vectorized')
z, ss = ok.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='vectorized')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, ok.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='loop')
z, ss = ok.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='loop')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
def test_cython_ok(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z1, ss1 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
z2, ss2 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='C')
self.assertTrue(np.allclose(z1, z2))
self.assertTrue(np.allclose(ss1, ss2))
closest_points = 4
z1, ss1 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop',
n_closest_points=closest_points)
z2, ss2 = ok.execute('grid', self.simple_gridx, self.simple_gridy, backend='C',
n_closest_points=closest_points)
self.assertTrue(np.allclose(z1, z2))
self.assertTrue(np.allclose(ss1, ss2))
def test_uk(self):
# Test to compare UK with linear drift to results from KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='exponential', variogram_parameters=[500.0, 3000.0, 0.0],
drift_terms=['regional_linear'])
z, ss = uk.execute('grid', self.uk_test_gridx, self.uk_test_gridy, backend='vectorized')
self.assertTrue(np.allclose(z, self.uk_test_answer))
z, ss = uk.execute('grid', self.uk_test_gridx, self.uk_test_gridy, backend='loop')
self.assertTrue(np.allclose(z, self.uk_test_answer))
def test_uk_update_variogram_model(self):
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], variogram_model='blurg')
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['external_Z'])
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['external_Z'], external_drift=np.array([0]))
self.assertRaises(ValueError, UniversalKriging, self.test_data[:, 0], self.test_data[:, 1],
self.test_data[:, 2], drift_terms=['point_log'])
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2])
variogram_model = uk.variogram_model
variogram_parameters = uk.variogram_model_parameters
anisotropy_scaling = uk.anisotropy_scaling
anisotropy_angle = uk.anisotropy_angle
self.assertRaises(ValueError, uk.update_variogram_model, 'blurg')
uk.update_variogram_model('power', anisotropy_scaling=3.0, anisotropy_angle=45.0)
self.assertFalse(variogram_model == uk.variogram_model)
self.assertFalse(variogram_parameters == uk.variogram_model_parameters)
self.assertFalse(anisotropy_scaling == uk.anisotropy_scaling)
self.assertFalse(anisotropy_angle == uk.anisotropy_angle)
def test_uk_calculate_data_point_zscalars(self):
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'])
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem, external_drift_x=dem_x,
external_drift_y=np.arange(0.0, 5.0, 1.0))
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', variogram_parameters=[1.0, 0.0],
drift_terms=['external_Z'], external_drift=dem, external_drift_x=dem_x,
external_drift_y=dem_y)
self.assertTrue(np.allclose(uk.z_scalars, self.simple_data[:, 0]))
xi, yi = np.meshgrid(np.arange(0.0, 5.3, 0.1), self.simple_gridy)
self.assertRaises(ValueError, uk._calculate_data_point_zscalars, xi, yi)
xi, yi = np.meshgrid(np.arange(0.0, 5.0, 0.1), self.simple_gridy)
z_scalars = uk._calculate_data_point_zscalars(xi, yi)
self.assertTrue(np.allclose(z_scalars[0, :], np.arange(0.0, 5.0, 0.1)))
def test_uk_execute_single_point(self):
# Test data and answer from lecture notes by Nicolas Christou, UCLA Stats
data = np.array([[61.0, 139.0, 477.0],
[63.0, 140.0, 696.0],
[64.0, 129.0, 227.0],
[68.0, 128.0, 646.0],
[71.0, 140.0, 606.0],
[73.0, 141.0, 791.0],
[75.0, 128.0, 783.0]])
point = (65.0, 137.0)
z_answer = 567.54
ss_answer = 9.044
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model='exponential',
variogram_parameters=[10.0, 9.99, 0.0], drift_terms=['regional_linear'])
z, ss = uk.execute('points', np.array([point[0]]), np.array([point[1]]), backend='vectorized')
self.assertAlmostEqual(z_answer, z, places=0)
self.assertAlmostEqual(ss_answer, ss, places=0)
z, ss = uk.execute('points', np.array([61.0]), np.array([139.0]), backend='vectorized')
self.assertAlmostEqual(z, 477.0, 3)
self.assertAlmostEqual(ss, 0.0, 3)
z, ss = uk.execute('points', np.array([61.0]), np.array([139.0]), backend='loop')
self.assertAlmostEqual(z, 477.0, 3)
self.assertAlmostEqual(ss, 0.0, 3)
def test_uk_execute(self):
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
self.assertRaises(ValueError, uk.execute, 'blurg', self.simple_gridx, self.simple_gridy)
self.assertRaises(ValueError, uk.execute, 'grid', self.simple_gridx, self.simple_gridy, backend='mrow')
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
shape = (self.simple_gridy.size, self.simple_gridx.size)
self.assertEqual(z.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(z), np.amin(z))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(z))
self.assertRaises(IOError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='vectorized')
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(IOError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, uk.execute, 'masked', self.simple_gridx, self.simple_gridy, mask=mask,
backend='loop')
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
z, ss = uk.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0], np.ma.masked)
self.assertIs(ss[0, 0], np.ma.masked)
self.assertRaises(ValueError, uk.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='vectorized')
z, ss = uk.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='vectorized')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, uk.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
backend='loop')
z, ss = uk.execute('points', self.simple_gridx[0], self.simple_gridy[0], backend='loop')
self.assertEqual(z.shape, (1,))
self.assertEqual(ss.shape, (1,))
def test_ok_uk_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_ok, ss_ok = ok.execute('grid', gridx, gridy, backend='vectorized')
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_uk, ss_uk = uk.execute('grid', gridx, gridy, backend='vectorized')
self.assertTrue(np.allclose(z_ok, z_uk))
self.assertTrue(np.allclose(ss_ok, ss_uk))
z_ok, ss_ok = ok.execute('grid', gridx, gridy, backend='loop')
z_uk, ss_uk = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_ok, z_uk))
self.assertTrue(np.allclose(ss_ok, ss_uk))
def test_ok_backends_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
ok = OrdinaryKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_ok_v, ss_ok_v = ok.execute('grid', gridx, gridy, backend='vectorized')
z_ok_l, ss_ok_l = ok.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_ok_v, z_ok_l))
self.assertTrue(np.allclose(ss_ok_v, ss_ok_l))
def test_uk_backends_produce_same_result(self):
gridx = np.linspace(1067000.0, 1072000.0, 100)
gridy = np.linspace(241500.0, 244000.0, 100)
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='linear', verbose=False, enable_plotting=False)
z_uk_v, ss_uk_v = uk.execute('grid', gridx, gridy, backend='vectorized')
z_uk_l, ss_uk_l = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z_uk_v, z_uk_l))
self.assertTrue(np.allclose(ss_uk_v, ss_uk_l))
def test_kriging_tools(self):
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z_write, ss_write = ok.execute('grid', self.simple_gridx, self.simple_gridy)
kt.write_asc_grid(self.simple_gridx, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=1)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.allclose(z_write, z_read, 0.01, 0.01))
self.assertTrue(np.allclose(self.simple_gridx, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
z_write, ss_write = ok.execute('masked', self.simple_gridx, self.simple_gridy, mask=self.mask)
kt.write_asc_grid(self.simple_gridx, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=1)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.ma.allclose(z_write, np.ma.masked_where(z_read == no_data, z_read),
masked_equal=True, rtol=0.01, atol=0.01))
self.assertTrue(np.allclose(self.simple_gridx, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2])
z_write, ss_write = ok.execute('grid', self.simple_gridx_2, self.simple_gridy)
kt.write_asc_grid(self.simple_gridx_2, self.simple_gridy, z_write,
filename=os.path.join(os.getcwd(), 'test_data/temp.asc'), style=2)
z_read, x_read, y_read, cellsize, no_data = kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/temp.asc'))
self.assertTrue(np.allclose(z_write, z_read, 0.01, 0.01))
self.assertTrue(np.allclose(self.simple_gridx_2, x_read))
self.assertTrue(np.allclose(self.simple_gridy, y_read))
os.remove(os.path.join(os.getcwd(), 'test_data/temp.asc'))
def test_uk_three_primary_drifts(self):
well = np.array([[1.1, 1.1, -1.0]])
dem = np.arange(0.0, 5.1, 0.1)
dem = np.repeat(dem[np.newaxis, :], 6, axis=0)
dem_x = np.arange(0.0, 5.1, 0.1)
dem_y = np.arange(0.0, 6.0, 1.0)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'external_Z', 'point_log'],
point_drift=well, external_drift=dem, external_drift_x=dem_x, external_drift_y=dem_y)
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='vectorized')
self.assertEquals(z.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertEquals(ss.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertTrue(np.all(np.isfinite(z)))
self.assertFalse(np.all(np.isnan(z)))
self.assertTrue(np.all(np.isfinite(ss)))
self.assertFalse(np.all(np.isnan(ss)))
z, ss = uk.execute('grid', self.simple_gridx, self.simple_gridy, backend='loop')
self.assertEquals(z.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertEquals(ss.shape, (self.simple_gridy.shape[0], self.simple_gridx.shape[0]))
self.assertTrue(np.all(np.isfinite(z)))
self.assertFalse(np.all(np.isnan(z)))
self.assertTrue(np.all(np.isfinite(ss)))
self.assertFalse(np.all(np.isnan(ss)))
def test_uk_specified_drift(self):
xg, yg = np.meshgrid(self.simple_gridx, self.simple_gridy)
well = np.array([[1.1, 1.1, -1.0]])
point_log = well[0, 2] * np.log(np.sqrt((xg - well[0, 0])**2. + (yg - well[0, 1])**2.)) * -1.
if np.any(np.isinf(point_log)):
point_log[np.isinf(point_log)] = -100. * well[0, 2] * -1.
point_log_data = well[0, 2] * np.log(np.sqrt((self.simple_data[:, 0] - well[0, 0])**2. +
(self.simple_data[:, 1] - well[0, 1])**2.)) * -1.
if np.any(np.isinf(point_log_data)):
point_log_data[np.isinf(point_log_data)] = -100. * well[0, 2] * -1.
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'])
self.assertRaises(TypeError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'],
specified_drift=self.simple_data[:, 0])
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:2, 0]])
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:, 0], self.simple_data[:, 1]])
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[self.simple_gridx, self.simple_gridy])
self.assertRaises(TypeError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=self.simple_gridx)
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[xg])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy, specified_drift_arrays=[xg, yg])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[point_log_data])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[point_log])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['point_log'], point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
uk_spec = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data[:, 0], self.simple_data[:, 1], point_log_data])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx, self.simple_gridy,
specified_drift_arrays=[xg, yg, point_log])
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'point_log'],
point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
def test_uk_functional_drift(self):
well = np.array([[1.1, 1.1, -1.0]])
func_x = lambda x, y: x
func_y = lambda x, y: y
func_well = lambda x, y: - well[0, 2] * np.log(np.sqrt((x - well[0, 0])**2. + (y - well[0, 1])**2.))
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['functional'])
self.assertRaises(TypeError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='linear', drift_terms=['functional'],
functional_drift=func_x)
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'], functional_drift=[func_well])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['point_log'], point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
uk_func = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y, func_well])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx, self.simple_gridy)
uk_lin = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear', drift_terms=['regional_linear', 'point_log'],
point_drift=well)
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx, self.simple_gridy)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
def test_uk_with_external_drift(self):
dem, demx, demy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test3_dem.asc'))
uk = UniversalKriging(self.test_data[:, 0], self.test_data[:, 1], self.test_data[:, 2],
variogram_model='spherical',
variogram_parameters=[500.0, 3000.0, 0.0],
anisotropy_scaling=1.0, anisotropy_angle=0.0,
drift_terms=['external_Z'], external_drift=dem,
external_drift_x=demx, external_drift_y=demy,
verbose=False)
answer, gridx, gridy, cellsize, no_data = \
kt.read_asc_grid(os.path.join(os.getcwd(), 'test_data/test3_answer.asc'))
z, ss = uk.execute('grid', gridx, gridy, backend='vectorized')
self.assertTrue(np.allclose(z, answer))
z, ss = uk.execute('grid', gridx, gridy, backend='loop')
self.assertTrue(np.allclose(z, answer))
def test_force_exact(self):
data = np.array([[1., 1., 2.],
[2., 2., 1.5],
[3., 3., 1.]])
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[1.0, 1.0])
z, ss = ok.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='vectorized')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = ok.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='vectorized')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = ok.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='vectorized')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = ok.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = ok.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = ok.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='vectorized',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = ok.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='loop')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = ok.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='loop')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = ok.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='loop')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = ok.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = ok.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = ok.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='loop',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2])
z, ss = uk.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='vectorized')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = uk.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='vectorized')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = uk.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='vectorized')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = uk.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = uk.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='vectorized')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = uk.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='vectorized',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = uk.execute('grid', [1., 2., 3.], [1., 2., 3.], backend='loop')
self.assertAlmostEqual(z[0, 0], 2.0)
self.assertAlmostEqual(ss[0, 0], 0.0)
self.assertAlmostEqual(z[1, 1], 1.5)
self.assertAlmostEqual(ss[1, 1], 0.0)
self.assertAlmostEqual(z[2, 2], 1.0)
self.assertAlmostEqual(ss[2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 0], 0.0)
z, ss = uk.execute('points', [1., 2., 3., 3.], [2., 1., 1., 3.], backend='loop')
self.assertNotAlmostEqual(ss[0], 0.0)
self.assertNotAlmostEqual(ss[1], 0.0)
self.assertNotAlmostEqual(ss[2], 0.0)
self.assertAlmostEqual(z[3], 1.0)
self.assertAlmostEqual(ss[3], 0.0)
z, ss = uk.execute('grid', np.arange(0., 4., 0.1), np.arange(0., 4., 0.1), backend='loop')
self.assertAlmostEqual(z[10, 10], 2.)
self.assertAlmostEqual(ss[10, 10], 0.)
self.assertAlmostEqual(z[20, 20], 1.5)
self.assertAlmostEqual(ss[20, 20], 0.)
self.assertAlmostEqual(z[30, 30], 1.0)
self.assertAlmostEqual(ss[30, 30], 0.)
self.assertNotAlmostEqual(ss[0, 0], 0.0)
self.assertNotAlmostEqual(ss[15, 15], 0.0)
self.assertNotAlmostEqual(ss[10, 0], 0.0)
self.assertNotAlmostEqual(ss[0, 10], 0.0)
self.assertNotAlmostEqual(ss[20, 10], 0.0)
self.assertNotAlmostEqual(ss[10, 20], 0.0)
self.assertNotAlmostEqual(ss[30, 20], 0.0)
self.assertNotAlmostEqual(ss[20, 30], 0.0)
z, ss = uk.execute('grid', np.arange(0., 3.1, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertTrue(np.any(ss <= 1e-15))
self.assertFalse(np.any(ss[:9, :30] <= 1e-15))
self.assertFalse(np.allclose(z[:9, :30], 0.))
z, ss = uk.execute('grid', np.arange(0., 1.9, 0.1), np.arange(2.1, 3.1, 0.1), backend='loop')
self.assertFalse(np.any(ss <= 1e-15))
z, ss = uk.execute('masked', np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25), backend='loop',
mask=np.asarray(np.meshgrid(np.arange(2.5, 3.5, 0.1), np.arange(2.5, 3.5, 0.25))[0] == 0.))
self.assertTrue(ss[2, 5] <= 1e-15)
self.assertFalse(np.allclose(ss, 0.))
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (1., 1.),
variogram_models.linear_variogram_model, [1.0, 1.0])
self.assertAlmostEqual(z, 2.)
self.assertAlmostEqual(ss, 0.)
z, ss = core.krige(data[:, 0], data[:, 1], data[:, 2], (1., 2.),
variogram_models.linear_variogram_model, [1.0, 1.0])
self.assertNotAlmostEqual(ss, 0.)
data = np.zeros((50, 3))
x, y = np.meshgrid(np.arange(0., 10., 1.), np.arange(0., 10., 2.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(x) * np.ravel(y)
ok = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[100.0, 1.0])
z, ss = ok.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='vectorized')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='vectorized')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='loop')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = ok.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='loop')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
uk = UniversalKriging(data[:, 0], data[:, 1], data[:, 2],
variogram_model='linear', variogram_parameters=[100.0, 1.0])
z, ss = uk.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='vectorized')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='vectorized')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0., 10., 1.), np.arange(0., 10., 2.), backend='loop')
self.assertTrue(np.allclose(np.ravel(z), data[:, 2]))
self.assertTrue(np.allclose(ss, 0.))
z, ss = uk.execute('grid', np.arange(0.5, 10., 1.), np.arange(0.5, 10., 2.), backend='loop')
self.assertFalse(np.allclose(np.ravel(z), data[:, 2]))
self.assertFalse(np.allclose(ss, 0.))
def test_custom_variogram(self):
func = lambda params, dist: params[0] * np.log10(dist + params[1]) + params[2]
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='mrow')
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom')
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=0)
self.assertRaises(ValueError, UniversalKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=func)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(uk.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
uk = UniversalKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear')
uk.update_variogram_model('custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(uk.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='mrow')
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom')
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=0)
self.assertRaises(ValueError, OrdinaryKriging, self.simple_data[:, 0], self.simple_data[:, 1],
self.simple_data[:, 2], variogram_model='custom', variogram_function=func)
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(ok.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
ok = OrdinaryKriging(self.simple_data[:, 0], self.simple_data[:, 1], self.simple_data[:, 2],
variogram_model='linear')
ok.update_variogram_model('custom', variogram_parameters=[1., 1., 1.], variogram_function=func)
self.assertAlmostEqual(ok.variogram_function([1., 1., 1.], 1.), 1.3010, 4)
def test_ok3d(self):
# Test to compare K3D results to those obtained using KT3D_H2O.
# (M. Karanovic, M. Tonkin, and D. Wilson, 2009, Groundwater, vol. 47, no. 4, 580-586.)
k3d = OrdinaryKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500.0, 3000.0, 0.0])
k, ss = k3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
self.assertTrue(np.allclose(k, self.ok_test_answer))
k, ss = k3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
self.assertTrue(np.allclose(k, self.ok_test_answer))
# Test to compare K3D results to those obtained using KT3D.
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ans = np.genfromtxt('./test_data/test3d_answer.txt')
ans_z = ans[:, 0].reshape((10, 10, 10))
ans_ss = ans[:, 1].reshape((10, 10, 10))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
k, ss = k3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
self.assertTrue(np.allclose(k, ans_z, rtol=1e-3))
self.assertTrue(np.allclose(ss, ans_ss, rtol=1e-3))
def test_ok3d_uk3d_and_backends_produce_same_results(self):
ok3d = OrdinaryKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500.0, 3000.0, 0.0])
ok_v, oss_v = ok3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
ok_l, oss_l = ok3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
uk3d = UniversalKriging3D(self.test_data[:, 0], self.test_data[:, 1], np.zeros(self.test_data[:, 1].shape),
self.test_data[:, 2], variogram_model='exponential',
variogram_parameters=[500., 3000., 0.])
uk_v, uss_v = uk3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='vectorized')
self.assertTrue(np.allclose(uk_v, ok_v))
uk_l, uss_l = uk3d.execute('grid', self.ok_test_gridx, self.ok_test_gridy, np.array([0.]), backend='loop')
self.assertTrue(np.allclose(uk_l, ok_l))
self.assertTrue(np.allclose(uk_l, uk_v))
self.assertTrue(np.allclose(uss_l, uss_v))
data = np.genfromtxt('./test_data/test3d_data.txt', skip_header=1)
ok3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
ok_v, oss_v = ok3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
ok_l, oss_l = ok3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
uk3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3],
variogram_model='linear', variogram_parameters=[1., 0.1])
uk_v, uss_v = uk3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='vectorized')
self.assertTrue(np.allclose(uk_v, ok_v))
self.assertTrue(np.allclose(uss_v, oss_v))
uk_l, uss_l = uk3d.execute('grid', np.arange(10.), np.arange(10.), np.arange(10.), backend='loop')
self.assertTrue(np.allclose(uk_l, ok_l))
self.assertTrue(np.allclose(uss_l, oss_l))
self.assertTrue(np.allclose(uk_l, uk_v))
self.assertTrue(np.allclose(uss_l, uss_v))
def test_ok3d_update_variogram_model(self):
self.assertRaises(ValueError, OrdinaryKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='blurg')
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
variogram_model = k3d.variogram_model
variogram_parameters = k3d.variogram_model_parameters
anisotropy_scaling_y = k3d.anisotropy_scaling_y
anisotropy_scaling_z = k3d.anisotropy_scaling_z
anisotropy_angle_x = k3d.anisotropy_angle_x
anisotropy_angle_y = k3d.anisotropy_angle_y
anisotropy_angle_z = k3d.anisotropy_angle_z
self.assertRaises(ValueError, k3d.update_variogram_model, 'blurg')
k3d.update_variogram_model('power', anisotropy_scaling_y=3.0, anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0, anisotropy_angle_y=45.0, anisotropy_angle_z=45.0)
self.assertFalse(variogram_model == k3d.variogram_model)
self.assertFalse(variogram_parameters == k3d.variogram_model_parameters)
self.assertFalse(anisotropy_scaling_y == k3d.anisotropy_scaling_y)
self.assertFalse(anisotropy_scaling_z == k3d.anisotropy_scaling_z)
self.assertFalse(anisotropy_angle_x == k3d.anisotropy_angle_x)
self.assertFalse(anisotropy_angle_y == k3d.anisotropy_angle_y)
self.assertFalse(anisotropy_angle_z == k3d.anisotropy_angle_z)
def test_uk3d_update_variogram_model(self):
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='blurg')
uk3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
variogram_model = uk3d.variogram_model
variogram_parameters = uk3d.variogram_model_parameters
anisotropy_scaling_y = uk3d.anisotropy_scaling_y
anisotropy_scaling_z = uk3d.anisotropy_scaling_z
anisotropy_angle_x = uk3d.anisotropy_angle_x
anisotropy_angle_y = uk3d.anisotropy_angle_y
anisotropy_angle_z = uk3d.anisotropy_angle_z
self.assertRaises(ValueError, uk3d.update_variogram_model, 'blurg')
uk3d.update_variogram_model('power', anisotropy_scaling_y=3.0, anisotropy_scaling_z=3.0,
anisotropy_angle_x=45.0, anisotropy_angle_y=45.0, anisotropy_angle_z=45.0)
self.assertFalse(variogram_model == uk3d.variogram_model)
self.assertFalse(variogram_parameters == uk3d.variogram_model_parameters)
self.assertFalse(anisotropy_scaling_y == uk3d.anisotropy_scaling_y)
self.assertFalse(anisotropy_scaling_z == uk3d.anisotropy_scaling_z)
self.assertFalse(anisotropy_angle_x == uk3d.anisotropy_angle_x)
self.assertFalse(anisotropy_angle_y == uk3d.anisotropy_angle_y)
self.assertFalse(anisotropy_angle_z == uk3d.anisotropy_angle_z)
def test_ok3d_backends_produce_same_result(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k_k3d_v, ss_k3d_v = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
backend='vectorized')
k_k3d_l, ss_k3d_l = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
backend='loop')
self.assertTrue(np.allclose(k_k3d_v, k_k3d_l))
self.assertTrue(np.allclose(ss_k3d_v, ss_k3d_l))
def test_ok3d_execute(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
self.assertRaises(ValueError, k3d.execute, 'blurg', self.simple_gridx_3d,
self.simple_gridy_3d, self.simple_gridz_3d)
k, ss = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
k, ss = k3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
self.assertRaises(IOError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='vectorized')
k, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='vectorized')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(IOError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, k3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='loop')
k, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='loop')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = k3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(ValueError, k3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='vectorized')
k, ss = k3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='vectorized')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, k3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='loop')
k, ss = k3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='loop')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
data = np.zeros((125, 4))
z, y, x = np.meshgrid(np.arange(0., 5., 1.), np.arange(0., 5., 1.), np.arange(0., 5., 1.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='vectorized')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='loop')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k3d = OrdinaryKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='vectorized')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='loop')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
def test_uk3d_execute(self):
uk3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3])
self.assertRaises(ValueError, uk3d.execute, 'blurg', self.simple_gridx_3d,
self.simple_gridy_3d, self.simple_gridz_3d)
k, ss = uk3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
k, ss = uk3d.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
shape = (self.simple_gridz_3d.size, self.simple_gridy_3d.size, self.simple_gridx_3d.size)
self.assertEqual(k.shape, shape)
self.assertEqual(ss.shape, shape)
self.assertNotEqual(np.amax(k), np.amin(k))
self.assertNotEqual(np.amax(ss), np.amin(ss))
self.assertFalse(np.ma.is_masked(k))
self.assertRaises(IOError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='vectorized')
mask = np.array([True, False])
self.assertRaises(ValueError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='vectorized')
k, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='vectorized')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='vectorized')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(IOError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, backend='loop')
mask = np.array([True, False])
self.assertRaises(ValueError, uk3d.execute, 'masked', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, mask=mask, backend='loop')
k, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d, backend='loop')
self.assertTrue(np.ma.is_masked(k))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(k[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
z, ss = uk3d.execute('masked', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
mask=self.mask_3d.T, backend='loop')
self.assertTrue(np.ma.is_masked(z))
self.assertTrue(np.ma.is_masked(ss))
self.assertIs(z[0, 0, 0], np.ma.masked)
self.assertIs(ss[0, 0, 0], np.ma.masked)
self.assertRaises(ValueError, uk3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='vectorized')
k, ss = uk3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='vectorized')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
self.assertRaises(ValueError, uk3d.execute, 'points', np.array([0.0, 1.0, 2.0]), np.array([0.0, 1.0]),
np.array([1.0]), backend='loop')
k, ss = uk3d.execute('points', self.simple_gridx_3d[0], self.simple_gridy_3d[0],
self.simple_gridz_3d[0], backend='loop')
self.assertEqual(k.shape, (1,))
self.assertEqual(ss.shape, (1,))
data = np.zeros((125, 4))
z, y, x = np.meshgrid(np.arange(0., 5., 1.), np.arange(0., 5., 1.), np.arange(0., 5., 1.))
data[:, 0] = np.ravel(x)
data[:, 1] = np.ravel(y)
data[:, 2] = np.ravel(z)
data[:, 3] = np.ravel(z)
k3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='vectorized')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k, ss = k3d.execute('grid', np.arange(2., 3., 0.1), np.arange(2., 3., 0.1),
np.arange(0., 4., 1.), backend='loop')
self.assertTrue(np.allclose(k[0, :, :], 0., atol=0.01))
self.assertTrue(np.allclose(k[1, :, :], 1., rtol=1.e-2))
self.assertTrue(np.allclose(k[2, :, :], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[3, :, :], 3., rtol=1.e-2))
k3d = UniversalKriging3D(data[:, 0], data[:, 1], data[:, 2], data[:, 3], variogram_model='linear')
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='vectorized')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
k, ss = k3d.execute('points', [2.5, 2.5, 2.5], [2.5, 2.5, 2.5], [1., 2., 3.], backend='loop')
self.assertTrue(np.allclose(k[0], 1., atol=0.01))
self.assertTrue(np.allclose(k[1], 2., rtol=1.e-2))
self.assertTrue(np.allclose(k[2], 3., rtol=1.e-2))
def test_force_exact_3d(self):
k3d = OrdinaryKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='vectorized')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='loop')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k3d = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear')
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='vectorized')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
k, ss = k3d.execute('grid', [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], backend='loop')
self.assertAlmostEqual(k[2, 0, 0], 0.9)
self.assertAlmostEqual(ss[2, 0, 0], 0.0)
self.assertAlmostEqual(k[0, 2, 0], 0.9)
self.assertAlmostEqual(ss[0, 2, 0], 0.0)
self.assertAlmostEqual(k[1, 2, 2], 0.7)
self.assertAlmostEqual(ss[1, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[2, 2, 2], 0.0)
self.assertNotAlmostEqual(ss[0, 0, 0], 0.0)
def test_uk3d_specified_drift(self):
zg, yg, xg = np.meshgrid(self.simple_gridz_3d, self.simple_gridy_3d, self.simple_gridx_3d, indexing='ij')
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
variogram_model='linear', drift_terms=['specified'])
self.assertRaises(TypeError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['specified'], specified_drift=self.simple_data_3d[:, 0])
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['specified'], specified_drift=[self.simple_data_3d[:2, 0]])
uk_spec = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear', drift_terms=['specified'],
specified_drift=[self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2]])
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=[self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d])
self.assertRaises(TypeError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=self.simple_gridx_3d)
self.assertRaises(ValueError, uk_spec.execute, 'grid', self.simple_gridx_3d, self.simple_gridy_3d,
self.simple_gridz_3d, specified_drift_arrays=[zg])
z_spec, ss_spec = uk_spec.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d,
specified_drift_arrays=[xg, yg, zg])
uk_lin = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
self.assertTrue(np.allclose(z_spec, z_lin))
self.assertTrue(np.allclose(ss_spec, ss_lin))
def test_uk3d_functional_drift(self):
func_x = lambda x, y, z: x
func_y = lambda x, y, z: y
func_z = lambda x, y, z: z
self.assertRaises(ValueError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3],
variogram_model='linear', drift_terms=['functional'])
self.assertRaises(TypeError, UniversalKriging3D, self.simple_data_3d[:, 0], self.simple_data_3d[:, 1],
self.simple_data_3d[:, 2], self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['functional'], functional_drift=func_x)
uk_func = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear', drift_terms=['functional'],
functional_drift=[func_x, func_y, func_z])
z_func, ss_func = uk_func.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
uk_lin = UniversalKriging3D(self.simple_data_3d[:, 0], self.simple_data_3d[:, 1], self.simple_data_3d[:, 2],
self.simple_data_3d[:, 3], variogram_model='linear',
drift_terms=['regional_linear'])
z_lin, ss_lin = uk_lin.execute('grid', self.simple_gridx_3d, self.simple_gridy_3d, self.simple_gridz_3d)
self.assertTrue(np.allclose(z_func, z_lin))
self.assertTrue(np.allclose(ss_func, ss_lin))
if __name__ == '__main__':
unittest.main()
|
yejingxin/PyKrige
|
pykrige/test.py
|
Python
|
bsd-3-clause
| 82,157
|
[
"Gaussian"
] |
11ea5ef4795a75c0f2de45cefaa92b21f611d1d2e23d07ab11f3c1a378a63c5a
|
#! usr/bin/env python
import optparse, os, csv, glob, sys
import MySQLdb
import PEATSA.Core as Core
import PEATSA.Core.Matrix
import matplotlib.pyplot as plt
import numpy as np
import pypar
import Environment
class ProteinComplexTool:
def __init__(self):
self.proc = pypar.size()
self.myid = pypar.rank()
self.node = pypar.get_processor_name()
return
def allProc(self):
if self.myid in range(self.proc):
return True
else:
return False
def isSlave(self):
if self.myid in range(1,self.proc):
return True
else:
return False
return
def DeltaStability(self,inputFile, mutationList, configurationFile, workingDirectory, outputDirectory):
'''Calculates the stability difference between a protein and set of mutants
Parameters:
inputFile: A PDB file of the protein
mutationList: A list of Data.MutationSet instances. Each represents a mutant of the protein.
configurationFile: The location of a proteinDesignTool.conf file - defaults to home directory.
workingDirectory: Where the calculation will be run.
outputDirectory: Where the results will be written.
Returns
A Data.DataSet instance containing one matrix, stabilityResults.
Each row of this matrix corresponds to a mutant defined in the mutationList argument.'''
#Create the ProteinDesignTool instance
tool = Core.ProteinDesignTool.ProteinDesignTool(configurationFile,
workingDirectory=workingDirectory,
pdbFile=inputFile,
outputDirectory=outputDirectory,
removeHeterogens=True)
#The above cleans the pdb file and copies it to the working directory.
#Use this pdb from now on.
inputFile = tool.pdbFile
#Create the mutants
mutantCollection = Core.Data.MutantCollection(pdbFile=inputFile,mutationList=mutationList,location=outputDirectory,temporary=True)
#Run stability calculation
#The results are added to the ProteinDesignTool instance's dataDirectory attribute
#This is an instance of Data.DataSet class
tool.runStabilityCalculation(mutantFiles=mutantCollection.mutantFiles())
#Clean up - Deletes files copied to the working directory for Uffbaps
tool.cleanUp()
return tool.dataDirectory
def remALT(self,pdbfile, environment):
import Protool
x = Protool.structureIO()
x.readpdb('%s.pdb' % (pdbfile))
x.RemoveALT()
x.writepdb('%s.pdb' % (pdbfile), dont_write_HETATMS=1)
environment.output('Removed alternate residues')
def splitter(self,pdbDir,pdb,reactions_list,cur,db, environment):
import string
if reactions_list == ['']:
# query the database
environment.output(cur.execute("SELECT DISTINCT Chain_ID from pdb where PDB_ID = '%s';" % (pdb)))
a = cur.fetchall() # fetch results
print 'a', a
expr=[]
chains = [i[0] for i in a]
for i in chains:
s=["segid "+i]
expr.append(s)
b = str(a) # convert from tuple to string
exclude = set(string.punctuation) # set of punctutation characters
b = ''.join(ch for ch in b if ch not in exclude) # remove punctuation from b
e = ''.join(b.split(' '))
self.do_split(pdbDir, pdb, expr, e, environment)
return e
else:
expr1=[]
for c in reactions_list:
if len(c)>1:
s = ["segid "+i for i in c]
expr1.append(s)
else:
expr1.append(["segid "+c])
self._do_split(pdbDir,pdb, expr1, reactions_list, environment)
return reactions_list
def _do_split(self,pdbDir,pdb, expr, e, environment):
import MDAnalysis
u = MDAnalysis.Universe(pdbDir, permissive=False)
for i in range(len(expr)):
Z = u.selectAtoms(*expr[i])
Z.write('%s_%s.pdb' % (pdb,e[i]))
#print 'Extracted chain(s) %s from %s' % (e[i], pdb)
environment.output('Extracted chain(s) %s from %s' % (e[i], pdb))
def createMutlist(self,pdb):
mutList = Core.Data.CreateScanList(pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
return mutList
def displayResults(self,pdb,split_list,comp_list,cur,db, environment):
width=0.5
environment.output(cur.execute("SELECT * FROM results_%s;" % (split_list[0])))
complexResults = cur.fetchall()
mutations = [i[0] for i in complexResults] # Mutation list
complexScores = [i[1] for i in complexResults] # dG scores of pdb complex
count = len(mutations) # Number of calcs
ind = np.arange(count)
if len(split_list)>1: # For binding calcs, no matter in what order chains were split
chainResults = []
for i in split_list[1:]:
cur.execute("select * from results_%s;" % (i))
chainResults.append(cur.fetchall())
chainScores = [i[1] for y in chainResults for i in y] # dG scores of chains split from pdb
ddG = []
cur.execute("create table if not exists ddG_%s_%s(mutation VARCHAR(10), ddG FLOAT);" % (pdb, comp_list))
for i in range(len(complexScores)):
ddG.append(complexScores[i] - chainScores[i])
for i in range(len(mutations)):
environment.output("ddG %s %s" % (mutations[i], ddG[i]))
cur.execute("insert into ddG_%s_%s (mutation, ddG) VALUES (%s%s%s, %s%s%s);" % (pdb,comp_list, '"', mutations[i], '"', '"',ddG[i],'"'))
plt.plot(ind+(width/2), ddG, 'o-')
plt.axhline(linewidth=2, color='r')
plt.title("ddG Binding calculations for ALA scan of %s" % (split_list[0]))
else:
for i in range(len(mutations)):
environment.output("%s, %s" % (mutations[i], complexScores[i]))
plt.bar(ind,complexScores,width,color='r')
plt.title("dG Stability calculations for ALA scan of %s" % (split_list[0]))
plt.xticks(ind+(width/2), mutations, rotation=90, fontsize=8)
plt.show()
sys.exit()
def main():
# Run program
environment = Environment.Environment()
# Connect to local database containing info about BMP pdbs
db = MySQLdb.connect(host="localhost", user = "root", passwd = "samsung", db = "sat")
cur = db.cursor()
cur.execute("SELECT VERSION()")
ver = cur.fetchone()
environment.output( "MySQLdb connection successful")
environment.output("MySQL server version: %s" % ver[0])
#print "MySQL server version: %s" % ver[0]
# Show pdbs in database
cur.execute("SELECT distinct PDB_ID from pdb;")
a = cur.fetchall()
b = ','.join([i[0] for i in a])
environment.output("PDBs in database:")
environment.output(b)
# Option to select pdb, config file, working dir etc..
parser = optparse.OptionParser()
# PDB option
parser.add_option("-p", "--pdb", help="Choose all or a pdb id", dest="pdb", default ="all")
# Mutation List or ALA scan option
parser.add_option("-m", "--mutationList", help="Location of mutation list file", dest="mutList", default="ALA")
# Configuration File
parser.add_option("-c", "--configurationFile", help="Location of configuration file", dest="configFile", default="/home/satnam/proteinDesignTool.conf")
# Output Directory
parser.add_option("-o", "--outputDirectory", help="Location of output directory", dest="outputDir", default=os.getcwd())
# Working Directory
parser.add_option("-w", "--workingDirectory", help="Location of working directory", dest="workingDir", default=os.getcwd())
# Choose option for user-defined calculations
parser.add_option("-u", "--userCalcs", help="Choose True or False if you would like to specifiy the calculations, otherwise each chain will be split", dest="userCalcOpt", default=False)
# Show Results Option
parser.add_option("-s", "--showResults", help="Shows previous results? True or False. If they don't exist, they will be calculated.", dest="showResults", default=True)
# Delete results from database
parser.add_option("-d", "--deleteResults", help="Deletes all results for the specified pdb from the database. Default False.", dest="deleteResults", default=False)
(opts, args) = parser.parse_args()
# Instantiate the class
run = ProteinComplexTool()
# pdb name/file handling
if environment.isRoot():
pdb = opts.pdb
pdbFile = ''.join((pdb,'.pdb'))
pdbDir = os.path.join(opts.outputDir,pdbFile)
environment.output(pdbDir)
# Checking if user selected PDB is in the database
if opts.pdb != None:
if opts.pdb not in b:
raise sys.exit('PDB not in Database, choose one from list')
if opts.pdb in b:
environment.output('PDB in Database')
environment.output('Checking what calculations can be performed')
# Check what calcs can be done with user defined PDB
cur.execute("SELECT distinct Entity_ID, Chain_ID, Chain_name, type from pdb where PDB_ID = %s%s%s;" % ('"',pdb,'"'))
entity = [] # entities in the pdbfile
chains = []
for i in cur.fetchall():
environment.output("Entity: %s, Chain Name: %s, Type: %s, Chain ID: %s" % (i[0], i[2], i[3], i[1]))
entity.append(i[0])
chains.append(i[1])
entity.sort()
# Delete results
if opts.deleteResults == 'True':
cur.execute("SHOW tables like 'results_%s%s';" % (pdb, '%'))
drop_tables=cur.fetchall()
#environment.output(drop_tables)
for i in drop_tables:
cur.execute("DROP TABLE '%s';" % (i))
environment.output("Results for %s deleted" % i)
else:
pass
# Remove Alternate Residues from pdb, will overwrite the file
run.remALT(pdb, environment)
# User defined splitting of chains from PDB, can be left
# blank and the PDB will be split to individual chains
reactions_list = ['']
if opts.userCalcOpt != 'False':
environment.output("What components are consumed (enter chain IDs in the form AB+C+D):")
reactants = sys.stdin.readline()
reactants = reactants.rstrip("\n")
environment.output("What products are produced (enter chain IDs in the form ABC+D):")
products = sys.stdin.readline()
products = products.rstrip("\n")
else:
pass
# If user leaves input blank, then the default is to calculate
# every chain individually vs complex
if reactants == '':
reactants = '+'.join(chains)
else:
pass
if products == '':
products = ''.join(chains)
else:
pass
reactants_list = reactants.split('+')
products_list = products.split('+')
# Split the pdb into chains, returns chains that have been split (A,B etc)
split_reactants = run.splitter(pdbDir,pdb,reactants_list,cur,db, environment)
split_products = run.splitter(pdbDir,pdb,products_list,cur,db, environment)
comp_list = split_products + split_reactants
comp_list = '_'.join(comp_list)
split_list = []
split_list_products = []
split_list_reactants = []
for i in split_reactants:
s = pdb+'_'+i
split_list_reactants.append(s)
for i in split_products:
s = pdb+'_'+i
split_list_products.append(s)
splitlist = split_list_products + split_list_reactants
for i in splitlist:
if i not in split_list:
split_list.append(i)
# Split_list is a list of the pdb and the individual pdbs
# that have been split
environment.output(split_list)
#print split_list
# Show results
if opts.showResults == 'True':
count = 0
environment.output(cur.execute("show tables;"))
tables = cur.fetchall()
resTable = "".join(("results_",pdb))
for i in tables:
for y in i:
if y.startswith(resTable):
count +=1
if count != 0:
run.displayResults(pdb,split_list,comp_list,cur,db,environment)
else:
pass
comp_list = comp_list +'_'+os.path.split(opts.mutList)[1]
#environment.output(comp_list)
# Run the calculations
# Load and check mutant list given by user, else do ALA scan
if opts.mutList != "ALA":
mfile = Core.Data.MutationListFile(filename=opts.mutList,create=True)
mfile.removeDuplicates(autoUpdate=False)
mutList = mfile.mutantList()
else:
for i in split_list:
w_pdb = os.path.join(opts.outputDir,'%s.pdb' % (i))
mutList = Core.Data.CreateScanList(pdbFile=w_pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
else:
print 'i, processor %d, am waiting' %(run.myid)
#split_list=['2H62_ABCD','2H62_AB','26H2_CD']
for i in split_list:
if run.allProc():
w_pdb = os.path.join(opts.outputDir,'%s.pdb' % (i))
mutList = Core.Data.CreateScanList(pdbFile=w_pdb, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
results = run.DeltaStability(inputFile=w_pdb,
mutationList=mutList,
configurationFile=opts.configFile,
workingDirectory=opts.workingDir,
outputDirectory=opts.outputDir)
if environment.isRoot():
for mutant in range(results.stabilityResults.numberOfRows()):
cur.execute("insert into results (PDB_ID, mutation, score) VALUES (%s,%s,%s);", (i,results.stabilityResults[mutant][0],results.stabilityResults[mutant][-1]))
environment.output("Calculated %s stability and results added to database" % (i))
# Display results
#run.displayResults(pdb,split_list,comp_list,cur,db,environment)
if __name__=='__main__':
main()
|
dmnfarrell/peat
|
sandbox/ProteinComplexTool_parallel.py
|
Python
|
mit
| 15,027
|
[
"MDAnalysis"
] |
d5e9f083db211f90bc32d98908b65484578d16d83a1c283f6acd0ec73cd1ea84
|
# A Python implementation of Ailey's matlab tensor code.
import os
import numpy as np
import math
import SimpleITK as sitk
from scipy import ndimage
import nibabel as nib
from PIL import Image
import scipy.misc
from scipy import signal
import warnings
#warnings.filterwarnings("ignore")
def doggen(sigma):
"""
Helper function to generate derivatives of Gaussian kernels, in either 1D, 2D, or 3D.
Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015
:param sigma: Sigma for use (see defaults in generate_FSL_structure_tensor)
:return: Derivative of Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * np.max(sigma))
x = range(np.single(-halfsize), np.single(halfsize + 1)); # Python colon is not inclusive at end, while MATLAB is.
dim = len(sigma);
if dim == 1:
X = np.array(x); # Remember that, by default, numpy arrays are elementwise multiplicative
X = X.astype(float);
k = -X * np.exp(-X**2/(2 * sigma**2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = -X * np.exp(-X**2/(2*sigma[0]^2) * np.exp(-Y**2))
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = -X * np.exp(np.divide(-np.power(X, 2), 2 * np.power(sigma[0], 2))) * np.exp(np.divide(-np.power(Y,2), 2 * np.power(sigma[1],2))) * np.exp(np.divide(-np.power(Z,2), 2 * np.power(sigma[2],2)))
else:
print 'Only supports up to 3 dimensions'
return np.divide(k, np.sum(np.abs(k[:])));
def gaussgen(sigma):
"""
Function to generate Gaussian kernels, in 1D, 2D and 3D.
Source code in MATLAB obtained from Qiyuan Tian, Stanford University, September 2015
:param sigma: Sigma for use in generating Gaussian kernel (see defaults in generate_FSL_structure_tensor)
:return: Gaussian kernel with dimensions of sigma.
"""
halfsize = np.ceil(3 * max(sigma));
x = range(np.single(-halfsize), np.single(halfsize + 1));
dim = len(sigma);
if dim == 1:
x = x.astype(float);
k = np.exp(-x**2 / (2 * sigma^2));
elif dim == 2:
[X, Y] = np.meshgrid(x, x);
X = X.astype(float);
Y = Y.astype(float);
k = np.exp(-X**2 / (2 * sigma[0]**2)) * np.exp(-Y**2 / (2 * sigma[1]**2));
elif dim == 3:
[X, Y, Z] = np.meshgrid(x, x, x);
X = X.transpose(0, 2, 1); # Obtained through vigorous testing (see below...)
Y = Y.transpose(2, 0, 1);
Z = Z.transpose(2, 1, 0);
X = X.astype(float);
Y = Y.astype(float);
Z = Z.astype(float);
k = np.exp(-X**2 / (2 * sigma[0]**2)) * np.exp(-Y**2 / (2 * sigma[1]**2)) * np.exp(-Z**2 / (2 * sigma[2]**2));
else:
print 'Only supports up to dimension 3'
return np.divide(k, np.sum(np.abs(k)));
def tiff_to_array(folder_path, input_path):
"""
Function takes a single image (TIFF, or other also works), and returns
the single image as a numpy array. Called by tiff_stack_to_array.
:param input_path: Single image file to open.
:return: Numpy representation of image.
"""
# The convert tag makes sure that we're dealing with floats, not uint8
# This prevents underflow.
im = Image.open(folder_path + input_path).convert("F")
# im.show()
imarray = np.array(im)
# print(imarray)
# print(imarray.dtype)
return imarray
def tiff_stack_to_array(input_path):
"""
Function takes input_path, which should should lead to a directory.
Loads all TIFFs in input_path, then generates numpy arrays from the
TIFF stack by calling tiff_to_array helper function. Make sure TIFF
images are ordered in numerical order.
:param input_path: Folder or directory containing .tiff stack.
:return: Numpy array of tiff stack.
"""
im_list = [];
for filename in os.listdir(input_path):
if filename.endswith(".tiff"):
# print(os.path.join(directory, filename))
im_arr = tiff_to_array(input_path, filename)
im_list.append(im_arr)
s = np.stack(im_list, axis=2)
print s.shape
return s
def nii_to_tiff_stack(input_path, token):
"""
Function loads an nii using SITK, then converts the nii into a folder containing a TIFF stack.
This function is useful later on for generating the structure tensor.
:param input_path: Path to .nii file.
:param token: Name of token.
"""
image = sitk.ReadImage(input_path);
planes_number = image.GetSize();
data = sitk.GetArrayFromImage(image)
z_dimension = planes_number[2];
## if we have (i, j, k), we want (k, j, i) (converts nibabel format to sitk format)
##new_im = aut_1367.swapaxes(0,2) # just swap i and k
if not os.path.exists(token + "_TIFFs"):
os.makedirs(token + "_TIFFs");
plane = 0;
for plane in range(0, z_dimension):
output = data[plane, :, :]
scipy.misc.toimage(output).save(token + "_TIFFs/" + token + "_" + str(plane) + '.tiff')
def generate_FSL_structure_tensor(img_data, filename, dogsigmaArr=[1], gausigmaArr=[2.3], angleArr=[25]):
"""
Function takes a numpy array (from TIFF_stack_to_array) and saves output
FSL structure tensor as filename string. Allows inputting alternate dogsigmaArr,
gausigmaArr, angleArr, although defaults to currently to parameters from MATLAB script.
Also returns tensorfsl (the tensor fsl structure) image numpy array.
## Parameters (the script loops through all parameters and saves each result automatically)
# dogsigmaArr = [1]; Sigma values for derivative of gaussian filter, recommended value: 0.6 - 1.3 (based on actual data)
# gausigmaArr = [2.3]; Sigma values for gaussian filter, recommended value: 1.3 - 2.3 (based on actual data)
# angleArr = [25]; Angle thresholds for fiber tracking, recommended value: 20 - 30.
Follows code from MATLAB CAPTURE scripts.
:param img_data: Numpy array of image, typically from tiff_stack_to_array called on a directory of TIFFs.
:param filename: Name to save the FSL structure tensor as.
:param dogsigmaArr: Sigma values for derivative of Gaussian filter, with recommended values between 0.6 - 1.3.
:param gausigmaArr: Sigma values for Gaussian filter, with recommended values between 1.3 - 2.3.
:param angleArr: Angle threshold for fiber tracking, with recommended values between 20 - 30.
:return tensorfsl: TensorFSL format of structure tensor (upper triangular matrix)
"""
for jj in range(len(dogsigmaArr)):
dogsigma = dogsigmaArr[jj];
print "Start DoG Sigma on " + str(dogsigma);
# Generate dog kernels
dogkercc = doggen([dogsigma, dogsigma, dogsigma]);
dogkercc = np.transpose(dogkercc, (0, 2, 1)); # annoying
#print dogkercc.shape;
#print dogkercc[:, :, 0];
dogkerrr = np.transpose(dogkercc, (1, 0, 2));
#print dogkerrr[:, :, 0];
dogkerzz = np.transpose(dogkercc, (0, 2, 1));
#print dogkerzz[:, :, 0];
# Compute gradients
grr = signal.convolve(img_data, dogkerrr, 'same');
#print grr[:, :, 0];
gcc = signal.convolve(img_data, dogkercc, 'same');
#print gcc[:, :, 0];
gzz = signal.convolve(img_data, dogkerzz, 'same');
#print gzz[:, :, 0];
# Compute gradient products
gprrrr = np.multiply(grr, grr);
#print gprrrr[:, :, 0];
gprrcc = np.multiply(grr, gcc);
#print gprrcc[:, :, 0];
gprrzz = np.multiply(grr, gzz);
#print gprrzz[:, :, 0]
gpcccc = np.multiply(gcc, gcc);
gpcczz = np.multiply(gcc, gzz);
gpzzzz = np.multiply(gzz, gzz);
# Compute gradient amplitudes
# print ga.dtype;
ga = np.sqrt(gprrrr + gpcccc + gpzzzz);
#print ga[:, :, 0];
#print "GA SHAPE:"
#print ga.shape;
# Convert numpy ndarray object to Nifti data type
gradient_amplitudes_data = nib.Nifti1Image(ga, affine=np.eye(4));
# Save gradient amplitudes image
nib.save(gradient_amplitudes_data, 'gradient_amplitudes.nii');
# Compute gradient vectors
gv = np.concatenate((grr[..., np.newaxis], gcc[..., np.newaxis], gzz[..., np.newaxis]), axis = 3);
#print gv[:, :, 0, 0];
gv = np.divide(gv, np.tile(ga[..., None], [1, 1, 1, 3]));
#print gv[:, :, 0, 1];
#print "GV SHAPE:"
#print gv.shape;
# Convert numpy ndarray object to Nifti data type
gradient_vectors_data = nib.Nifti1Image(gv, affine=np.eye(4));
# Save gradient vectors
nib.save(gradient_vectors_data, 'gradient_vectors.nii');
# Compute structure tensor
for kk in range(len(gausigmaArr)):
gausigma = gausigmaArr[kk];
print "Start Gauss Sigma with gausigma = " + str(gausigma);
print "Generating Gaussian kernel..."
gaussker = np.single(gaussgen([gausigma, gausigma, gausigma]));
#print gaussker[:, :, 0];
print "Blurring gradient products..."
gprrrrgauss = signal.convolve(gprrrr, gaussker, "same");
#print gprrrrgauss[:, :, 0];
gprrccgauss = signal.convolve(gprrcc, gaussker, "same");
#print gprrccgauss[:, :, 0];
gprrzzgauss = signal.convolve(gprrzz, gaussker, "same");
gpccccgauss = signal.convolve(gpcccc, gaussker, "same");
gpcczzgauss = signal.convolve(gpcczz, gaussker, "same");
gpzzzzgauss = signal.convolve(gpzzzz, gaussker, "same");
print "Saving a copy for this Gaussian sigma..."
tensorfsl = np.concatenate((gprrrrgauss[..., np.newaxis], gprrccgauss[..., np.newaxis], gprrzzgauss[..., np.newaxis], gpccccgauss[..., np.newaxis], gpcczzgauss[..., np.newaxis], gpzzzzgauss[..., np.newaxis]), axis = 3);
tmp = np.copy(tensorfsl[:,:,:,3])
tensorfsl[:,:,:,3] = tensorfsl[:,:,:,2]
tensorfsl[:,:,:,2] = tmp
# Convert numpy ndarray object to Nifti data type
tensor_fsl_data = nib.Nifti1Image(tensorfsl, affine=np.eye(4));
nib.save(tensor_fsl_data, str(filename) + "dogsigma_" + str(jj) + "gausigma_" + str(kk) + 'tensorfsl.nii');
print 'Completed computing structure tensor on ' + str(filename) + '!'
return tensorfsl
def plot_rgb(im):
plt.rcParams.update({'axes.labelsize': 'x-large',
'axes.titlesize': 'x-large'})
if im.shape == (182, 218, 182):
x = [78, 90, 100]
y = [82, 107, 142]
z = [88, 103, 107]
else:
shap = im.shape
x = [int(shap[0]*0.35), int(shap[0]*0.51), int(shap[0]*0.65)]
y = [int(shap[1]*0.35), int(shap[1]*0.51), int(shap[1]*0.65)]
z = [int(shap[2]*0.35), int(shap[2]*0.51), int(shap[2]*0.65)]
coords = (x, y, z)
labs = ['Sagittal Slice (YZ fixed)',
'Coronal Slice (XZ fixed)',
'Axial Slice (XY fixed)']
var = ['X', 'Y', 'Z']
idx = 0
for i, coord in enumerate(coords):
for pos in coord:
idx += 1
ax = plt.subplot(3, 3, idx)
ax.set_title(var[i] + " = " + str(pos))
if i == 0:
image = ndimage.rotate(im[pos, :, :,0:3], 90)
elif i == 1:
image = ndimage.rotate(im[:, pos, :,0:3], 90)
else:
image = im[:, :, pos,0:3]
print image.shape
if idx % 3 == 1:
ax.set_ylabel(labs[i])
ax.yaxis.set_ticks([0, image.shape[0]/2, image.shape[0] - 1])
ax.xaxis.set_ticks([0, image.shape[1]/2, image.shape[1] - 1])
plt.imshow(image)
fig = plt.gcf()
fig.set_size_inches(12.5, 10.5, forward=True)
return fig
def fiber_stream(f):
test = f
print len(test)
fig = plt.figure(1)
plt.subplots(figsize=(10, 10))
plt.subplot(311)
plt.title("Y-axis vs X-axis (" + str(len(test)) + " fibers)")
for i in range(len(test)):
plt.plot(test[i][:,0], test[i][:,1])
plt.subplot(312)
plt.title("Z-axis vs X-axis (" + str(len(test)) + " fibers)")
for i in range(len(test)):
plt.plot(test[i][:,0], test[i][:,2])
plt.subplot(313)
plt.title("Z-axis vs Y-axis (" + str(len(test)) + " fibers)")
for i in range(len(test)):
plt.plot(test[i][:,1], test[i][:,2])
plt.tight_layout()
#fig = plt.show()
fig.savefig('tensor_streamlines.png')
tensor2tract(struct_tensor, is_fsl):
if is_fsl:
tmp = np.copy(struct_tensor[:,:,:,3])
struct_tensor[:,:,:,3] = struct_tensor[:,:,:,2]
struct_tensor[:,:,:,2] = tmp
output = from_lower_triangular(struct_tensor)
evals, evecs = decompose_tensor(output)
FA = fractional_anisotropy(evals)
RGB = color_fa(FA, evecs)
# nb.save(nb.Nifti1Image(np.array(255 * RGB, 'uint8'), result.get_affine()), 'fsl_tensor_rgb_upper.nii.gz')
affine = result.get_affine()
fa = nb.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)
im = fa.get_data()
fig = plot_rgb(im)
plt.savefig('tensor_field_brain.png')
sphere = get_sphere('symmetric724')
peak_indices = quantize_evecs(evecs, sphere.vertices)
eu = EuDX(FA.astype('f8'), peak_indices, seeds=50000, odf_vertices = sphere.vertices, a_low=0.2)
tensor_streamlines = [streamline for streamline in eu]
return tensor_streamlines
|
NeuroDataDesign/seelviz
|
jon/algorithms/tractography.py
|
Python
|
apache-2.0
| 13,942
|
[
"Gaussian"
] |
793aa78b126847a4af1d3f4f83cdc56a49a3d91ba4d52a9560be104834f55838
|
#! /usr/bin/env python
# coding:utf-8
#########################################
# Anomaly Detection #
#########################################
from numpy import *
import numpy as np
from random import random
from matplotlib.pyplot import *
from pylab import *
from scipy.optimize import fmin_bfgs
from scipy.optimize import fmin_cg
from scipy.io import loadmat
from mpl_toolkits.mplot3d import Axes3D
class ML():
def __init__(self,x=[],y=[]):
self.X=x
self.Y=y
self.Theta=[]
self.Alpha=0.01
self.Iterations=50
self.Lambda=1
def load(self,fname,d=','):
data=loadtxt(fname,delimiter=d)
self.X=data[:,:-1]
self.Y=data[:,-1:]
def loadMat(self,fname):
return loadmat(fname)
def initXY(self,data):
m=data.shape[0]
x=hstack((ones((m,1)),data))
return x,self.Y,m
# Feature Normalize
def Normalization(self,data):
mu=mean(data,0)
sigma=std(data,0)
data_Norm=(data-mu)/sigma
return data_Norm,mu,sigma
def sigmoid(self,z):
return 1/(1+exp(-z))
def sigmoidGradient(self,z):
return self.sigmoid(z)*(1-self.sigmoid(z))
def J(self):
pass
def predict(self,x):
return array([1]+x).dot(self.Theta)
def evaluate(self):
pass
# x,x^2,x^3,....x^p
def polyFeatures(self,x,p):
x_poly=zeros((x.shape[0],p))
for i in xrange(p):
x_poly[:,i:i+1]=x**(i+1)
return x_poly
# x1,x2,x1*x2,...
def mapFeature(self,data,k):
x1,x2=data[:,0:1],data[:,1:]
m=x1.shape[0]
x=ones((m,1))
for i in xrange(1,k+1):
for j in xrange(i+1):
x=hstack((x,x1**j+x2**(i-j)))
return x
def addOne(self,x):
m=x.shape[0]
one=ones((m,1))
return hstack((one,x))
def plot(self):
pass
def show(self):
show()
class AD(ML):
def __init__(self,fname):
self.Lambda=1
self.Theta=[]
mat=self.loadMat(fname)
self.X=mat['X']
#self.Y=mat['y']
if 'Xval' in mat:
self.Xval=mat['Xval']
self.Yval=mat['yval']
#self.Xtest=mat['Xtest']
# Estimate the parameters of a Gaussian distribution
def estimateGaussian(self,x):
m,n=x.shape
mu=mean(x,0).reshape((n,1))
sigma2=var(x,0).reshape((n,1))
return mu,sigma2
# Compute the probability density function of the multivariate gaussian distribution
def multivariateGaussian(self,x,mu,sigma2):
x=x-mu.T
p=e**(-x**2/(2*sigma2.T))/sqrt(2*pi*sigma2.T)
return p
# Find the best threshold (epsilon) to use for selecting outliners
def selectThreshold(self,yval,pval):
bestEpsilon=0
bestF1=0
F1=0
stepsize=(pval.max()-pval.min())/1000
for epsilon in arange(pval.min(),pval.max(),stepsize):
predictions=pval<epsilon
tp=sum((double(yval==1)+double(predictions==1))==2)
fp=sum((double(yval==0)+double(predictions==1))==2)
fn=sum((double(yval==1)+double(predictions==0))==2)
if tp!=0:
prec=tp*1./(tp+fp)
rec=tp*1./(tp+fn)
F1=2*prec*rec/(prec+rec)
if F1>bestF1:
bestF1=F1
bestEpsilon=epsilon
return bestEpsilon,bestF1
#################
# Plot Function #
#################
# Plot 2D Data
def plotData(self):
x=self.X
plot(x[:,0],x[:,1],'bo',markersize=2,linewidth=0)
xlabel('Latency (ms)')
ylabel('Throughput (mb/s)')
return self
def drawLine(self,p1,p2):
plot([p1[0],p2[0]],[p1[1],p2[1]],linewidth=2)
return self
def visualizeFit(self,x,mu,sigma2):
r=arange(x.min(),x.max(),.5)
x1,x2=meshgrid(r,r)
m,n=x1.shape
x12=hstack((x1.flatten().reshape((m*n,1)),x2.flatten().reshape((m*n,1))))
z=self.multivariateGaussian(x12,mu,sigma2)
z=(z[:,0]*z[:,1]).reshape((m,n))
self.plotData()
contour(x1,x2,z)
return self
##########################
def testAD(self):
x=self.X
xval=self.Xval
yval=self.Yval
mu,sigma2=self.estimateGaussian(x)
# Get the density
p=self.multivariateGaussian(x,mu,sigma2)
# Visualize the fit
self.visualizeFit(x,mu,sigma2)
# Find Outliers
pval=self.multivariateGaussian(xval,mu,sigma2)
epsilon,F1=self.selectThreshold(yval,pval)
outliners=where(p<epsilon)[0]
plot(x[outliners,0],x[outliners,1],'ro',markersize=5)
self.show()
if __name__=='__main__':
test=AD('ex8data1.mat')
#test.plotData().show()
#test.testAD()
|
Urinx/Machine_Learning
|
Anomaly-Detection/anomaly_detection.py
|
Python
|
gpl-2.0
| 4,101
|
[
"Gaussian"
] |
7e567f1689e2198ddda3488e575575ba07b65407b9b8d64d47595dba6b6103cb
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import pickle
from . import dependency_check
from qcelemental import constants
from psi4.driver import psifiles as psif
from psi4.driver.ipi_broker import ipi_broker
from psi4.driver.molutil import *
from psi4.driver.inputparser import process_input
from psi4.driver.p4util.util import *
from psi4.driver.p4util.testing import *
from psi4.driver.p4util.fcidump import *
from psi4.driver.p4util.text import *
from psi4.driver.qmmm import QMMM
from psi4.driver.pluginutil import *
from psi4.driver import gaussian_n
from psi4.driver import aliases
from psi4.driver import diatomic
from psi4.driver import wrapper_database
from psi4.driver import wrapper_autofrag
from psi4.driver import schema_wrapper
from psi4.driver import schema_wrapper as json_wrapper # Deprecate in 1.4
from psi4.driver import frac
from psi4.driver.driver import *
# Single functions
from psi4.driver.driver_cbs import cbs
from psi4.driver.p4util.python_helpers import set_options, set_module_options, pcm_helper, basis_helper
|
jgonthier/psi4
|
psi4/driver/__init__.py
|
Python
|
lgpl-3.0
| 1,918
|
[
"Psi4"
] |
da1cd11ffa4402a5c1c43263a381571f6a1b0de0c8585ea767ebfc4cd7a7cee7
|
import json
import os
import shutil
from click.testing import CliRunner, Result
from freezegun import freeze_time
from moto import mock_s3
import great_expectations
from great_expectations import DataContext
from great_expectations.cli import cli
from great_expectations.data_context.util import file_relative_path
from great_expectations.util import gen_directory_tree_str
from tests.cli.utils import (
VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
assert_no_logging_messages_or_tracebacks,
)
try:
from unittest import mock
except ImportError:
from unittest import mock
def test_project_upgrade_already_up_to_date(v10_project_directory, caplog):
# test great_expectations project upgrade command with project with config_version 2
# copy v2 yml
shutil.copy(
file_relative_path(
__file__, "../../test_fixtures/upgrade_helper/great_expectations_v2.yml"
),
os.path.join(v10_project_directory, "great_expectations.yml"),
)
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
["-c", v10_project_directory, "--v3-api", "project", "upgrade"],
input="\n",
catch_exceptions=False,
)
stdout: str = result.stdout
assert "Checking project..." in stdout
assert (
"The Upgrade Helper has performed the automated upgrade steps as part of upgrading your project to be compatible with Great Expectations V3 API, and the config_version of your great_expectations.yml has been automatically incremented to 3.0. However, manual steps are required in order for the upgrade process to be completed successfully."
in stdout
)
assert (
"Your project requires manual upgrade steps in order to be up-to-date."
in stdout
)
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
allowed_deprecation_message=VALIDATION_OPERATORS_DEPRECATION_MESSAGE,
)
def test_upgrade_helper_intervention_on_cli_command(
v10_project_directory, caplog, monkeypatch
):
# test if cli detects out of date project and asks to run upgrade helper
# decline upgrade and ensure config version was not modified
runner: CliRunner = CliRunner(mix_stderr=False)
monkeypatch.chdir(os.path.dirname(v10_project_directory))
result: Result = runner.invoke(
cli,
[
"--v3-api",
"checkpoint",
"list",
],
input="n\n",
catch_exceptions=False,
)
stdout: str = result.stdout
assert (
"Your project appears to have an out-of-date config version (1.0) - the version number must be at least 3."
in stdout
)
assert "In order to proceed, your project must be upgraded." in stdout
assert (
"Would you like to run the Upgrade Helper to bring your project up-to-date? [Y/n]:"
in stdout
)
assert (
"Ok, exiting now. To upgrade at a later time, use the following command: [36mgreat_expectations project "
"upgrade[0m" in stdout
)
assert (
"To learn more about the upgrade process, visit ["
"36mhttps://docs.greatexpectations.io/docs/guides/miscellaneous/migration_guide#migrating-to-the-batch-request-v3-api"
in stdout
)
assert_no_logging_messages_or_tracebacks(
my_caplog=caplog,
click_result=result,
)
# make sure config version unchanged
assert (
DataContext.get_ge_config_version(context_root_dir=v10_project_directory) == 1.0
)
expected_project_tree_str: str = """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
.gitkeep
expectations/
.gitkeep
notebooks/
.gitkeep
plugins/
custom_store_backends/
__init__.py
my_custom_store_backend.py
uncommitted/
config_variables.yml
data_docs/
local_site/
expectations/
.gitkeep
static/
.gitkeep
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.html
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.json
"""
obs_project_tree_str: str = gen_directory_tree_str(startpath=v10_project_directory)
assert obs_project_tree_str == expected_project_tree_str
@freeze_time("09/26/2019 13:42:41")
def test_basic_project_upgrade(v10_project_directory, caplog):
# test project upgrade that requires no manual steps
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
["-c", v10_project_directory, "--v3-api", "project", "upgrade"],
input="\n",
catch_exceptions=False,
)
stdout: str = result.stdout
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/test_basic_project_upgrade_expected_stdout.fixture",
)
) as f:
expected_stdout: str = f.read()
expected_stdout = expected_stdout.replace(
"GE_PROJECT_DIR", v10_project_directory
)
assert stdout == expected_stdout
expected_project_tree_str: str = """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
.gitkeep
expectations/
.ge_store_backend_id
.gitkeep
notebooks/
.gitkeep
plugins/
custom_store_backends/
__init__.py
my_custom_store_backend.py
uncommitted/
config_variables.yml
data_docs/
local_site/
expectations/
.gitkeep
static/
.gitkeep
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.html
logs/
project_upgrades/
UpgradeHelperV11_20190926T134241.000000Z.json
UpgradeHelperV13_20190926T134241.000000Z.json
validations/
.ge_store_backend_id
diabetic_data/
warning/
20200430T191246.763896Z/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.json
"""
obs_project_tree_str: str = gen_directory_tree_str(startpath=v10_project_directory)
assert obs_project_tree_str == expected_project_tree_str
# make sure config number incremented
assert (
DataContext.get_ge_config_version(context_root_dir=v10_project_directory) == 3.0
)
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_log.json",
)
) as f:
expected_upgrade_log_dict: dict = json.load(f)
expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict)
expected_upgrade_log_str = expected_upgrade_log_str.replace(
"GE_PROJECT_DIR", v10_project_directory
)
expected_upgrade_log_dict: dict = json.loads(expected_upgrade_log_str)
with open(
f"{v10_project_directory}/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json"
) as f:
obs_upgrade_log_dict: dict = json.load(f)
assert obs_upgrade_log_dict == expected_upgrade_log_dict
@freeze_time("09/26/2019 13:42:41")
def test_project_upgrade_with_manual_steps(
v10_project_directory, caplog, sa, postgresql_engine
):
# This test requires sqlalchemy because it includes database backends configured
# test project upgrade that requires manual steps
# copy v2 yml
shutil.copy(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/great_expectations_v1_needs_manual_upgrade.yml",
),
os.path.join(v10_project_directory, "great_expectations.yml"),
)
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
["-c", v10_project_directory, "--v3-api", "project", "upgrade"],
input="\n",
catch_exceptions=False,
)
stdout: str = result.stdout
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/test_project_upgrade_with_manual_steps_expected_stdout.fixture",
)
) as f:
expected_stdout: str = f.read()
expected_stdout = expected_stdout.replace(
"GE_PROJECT_DIR", v10_project_directory
)
assert stdout == expected_stdout
pycache_dir_path: str = os.path.join(
v10_project_directory, "plugins", "custom_store_backends", "__pycache__"
)
try:
shutil.rmtree(pycache_dir_path)
except FileNotFoundError:
pass
expected_project_tree_str: str = """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
.gitkeep
expectations/
.ge_store_backend_id
.gitkeep
notebooks/
.gitkeep
plugins/
custom_store_backends/
__init__.py
my_custom_store_backend.py
uncommitted/
config_variables.yml
data_docs/
local_site/
expectations/
.gitkeep
static/
.gitkeep
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.html
logs/
project_upgrades/
UpgradeHelperV11_20190926T134241.000000Z.json
validations/
.ge_store_backend_id
diabetic_data/
warning/
20200430T191246.763896Z/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.json
"""
obs_project_tree_str: str = gen_directory_tree_str(startpath=v10_project_directory)
assert obs_project_tree_str == expected_project_tree_str
# make sure config number not incremented
assert (
DataContext.get_ge_config_version(context_root_dir=v10_project_directory) == 1.0
)
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/UpgradeHelperV11_manual_steps_upgrade_log.json",
)
) as f:
expected_upgrade_log_dict: dict = json.load(f)
expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict)
expected_upgrade_log_str = expected_upgrade_log_str.replace(
"GE_PROJECT_DIR", v10_project_directory
)
expected_upgrade_log_dict = json.loads(expected_upgrade_log_str)
with open(
f"{v10_project_directory}/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json"
) as f:
obs_upgrade_log_dict: dict = json.load(f)
assert obs_upgrade_log_dict == expected_upgrade_log_dict
@freeze_time("09/26/2019 13:42:41")
@mock_s3
def test_project_upgrade_with_exception(v10_project_directory, caplog):
# test project upgrade that requires manual steps
# copy v2 yml
shutil.copy(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/great_expectations_v1_basic_with_exception.yml",
),
os.path.join(v10_project_directory, "great_expectations.yml"),
)
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
["-c", v10_project_directory, "--v3-api", "project", "upgrade"],
input="\n",
catch_exceptions=False,
)
stdout: str = result.stdout
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/test_project_upgrade_with_exception_expected_stdout.fixture",
)
) as f:
expected_stdout: str = f.read()
expected_stdout = expected_stdout.replace(
"GE_PROJECT_DIR", v10_project_directory
)
assert stdout == expected_stdout
expected_project_tree_str: str = """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
.gitkeep
expectations/
.ge_store_backend_id
.gitkeep
notebooks/
.gitkeep
plugins/
custom_store_backends/
__init__.py
my_custom_store_backend.py
uncommitted/
config_variables.yml
data_docs/
local_site/
expectations/
.gitkeep
static/
.gitkeep
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.html
logs/
project_upgrades/
UpgradeHelperV11_20190926T134241.000000Z.json
validations/
.ge_store_backend_id
diabetic_data/
warning/
20200430T191246.763896Z/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.json
"""
obs_project_tree_str: str = gen_directory_tree_str(startpath=v10_project_directory)
assert obs_project_tree_str == expected_project_tree_str
# make sure config number not incremented
assert (
DataContext.get_ge_config_version(context_root_dir=v10_project_directory) == 1.0
)
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/UpgradeHelperV11_basic_upgrade_with_exception_log.json",
)
) as f:
expected_upgrade_log_dict: dict = json.load(f)
expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict)
expected_upgrade_log_str = expected_upgrade_log_str.replace(
"GE_PROJECT_DIR", v10_project_directory
)
expected_upgrade_log_str = expected_upgrade_log_str.replace(
"GE_PATH", os.path.split(great_expectations.__file__)[0]
)
expected_upgrade_log_dict = json.loads(expected_upgrade_log_str)
with open(
f"{v10_project_directory}/uncommitted/logs/project_upgrades/UpgradeHelperV11_20190926T134241.000000Z.json"
) as f:
obs_upgrade_log_dict: dict = json.load(f)
obs_upgrade_log_dict["exceptions"][0]["exception_message"] = ""
assert obs_upgrade_log_dict == expected_upgrade_log_dict
@freeze_time("01/19/2021 13:26:39")
def test_v2_to_v3_project_upgrade_with_all_manual_steps_checkpoints_datasources_validation_operators(
v20_project_directory, caplog
):
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
["-c", v20_project_directory, "--v3-api", "project", "upgrade"],
input="\n",
catch_exceptions=False,
)
stdout: str = result.stdout
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints_datasources_validation_operators_expected_stdout.fixture",
)
) as f:
expected_stdout: str = f.read()
expected_stdout = expected_stdout.replace(
"GE_PROJECT_DIR", v20_project_directory
)
assert stdout == expected_stdout
expected_project_tree_str: str = """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
.gitkeep
my_checkpoint.yml
titanic_checkpoint_0.yml
titanic_checkpoint_1.yml
titanic_checkpoint_2.yml
expectations/
.ge_store_backend_id
.gitkeep
notebooks/
.gitkeep
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
styles/
data_docs_custom_styles.css
uncommitted/
config_variables.yml
data_docs/
local_site/
expectations/
.gitkeep
static/
.gitkeep
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.html
logs/
project_upgrades/
UpgradeHelperV13_20210119T132639.000000Z.json
validations/
.ge_store_backend_id
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.json
"""
obs_project_tree_str: str = gen_directory_tree_str(startpath=v20_project_directory)
assert obs_project_tree_str == expected_project_tree_str
# make sure config number incremented
assert (
DataContext.get_ge_config_version(context_root_dir=v20_project_directory) == 3.0
)
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/UpgradeHelperV13_upgrade_with_manual_steps_checkpoints_datasources_validation_operators_log.json",
)
) as f:
expected_upgrade_log_dict: dict = json.load(f)
expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict)
expected_upgrade_log_str = expected_upgrade_log_str.replace(
"GE_PROJECT_DIR", v20_project_directory
)
expected_upgrade_log_dict = json.loads(expected_upgrade_log_str)
with open(
f"{v20_project_directory}/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json"
) as f:
obs_upgrade_log_dict: dict = json.load(f)
assert obs_upgrade_log_dict == expected_upgrade_log_dict
@freeze_time("01/19/2021 13:26:39")
def test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints(
v20_project_directory_with_v30_configuration_and_v20_checkpoints, caplog
):
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
[
"-c",
v20_project_directory_with_v30_configuration_and_v20_checkpoints,
"--v3-api",
"project",
"upgrade",
],
input="\n",
catch_exceptions=False,
)
stdout: str = result.stdout
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_with_manual_steps_checkpoints.fixture",
)
) as f:
expected_stdout: str = f.read()
expected_stdout = expected_stdout.replace(
"GE_PROJECT_DIR",
v20_project_directory_with_v30_configuration_and_v20_checkpoints,
)
assert stdout == expected_stdout
expected_project_tree_str: str = """\
great_expectations/
.gitignore
great_expectations.yml
checkpoints/
.gitkeep
my_checkpoint.yml
titanic_checkpoint_0.yml
titanic_checkpoint_1.yml
titanic_checkpoint_2.yml
expectations/
.ge_store_backend_id
.gitkeep
notebooks/
.gitkeep
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
styles/
data_docs_custom_styles.css
uncommitted/
config_variables.yml
data_docs/
local_site/
expectations/
.gitkeep
static/
.gitkeep
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.html
logs/
project_upgrades/
UpgradeHelperV13_20210119T132639.000000Z.json
validations/
.ge_store_backend_id
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.json
"""
obs_project_tree_str: str = gen_directory_tree_str(
startpath=v20_project_directory_with_v30_configuration_and_v20_checkpoints
)
assert obs_project_tree_str == expected_project_tree_str
# make sure config number incremented
assert (
DataContext.get_ge_config_version(
context_root_dir=v20_project_directory_with_v30_configuration_and_v20_checkpoints
)
== 3.0
)
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/UpgradeHelperV13_upgrade_with_manual_steps_checkpoints_log.json",
)
) as f:
expected_upgrade_log_dict: dict = json.load(f)
expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict)
expected_upgrade_log_str = expected_upgrade_log_str.replace(
"GE_PROJECT_DIR",
v20_project_directory_with_v30_configuration_and_v20_checkpoints,
)
expected_upgrade_log_dict = json.loads(expected_upgrade_log_str)
with open(
f"{v20_project_directory_with_v30_configuration_and_v20_checkpoints}/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json"
) as f:
obs_upgrade_log_dict: dict = json.load(f)
assert obs_upgrade_log_dict == expected_upgrade_log_dict
@freeze_time("01/19/2021 13:26:39")
def test_v2_to_v3_project_upgrade_without_manual_steps(
v20_project_directory_with_v30_configuration_and_no_checkpoints, caplog
):
runner: CliRunner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(
cli,
[
"-c",
v20_project_directory_with_v30_configuration_and_no_checkpoints,
"--v3-api",
"project",
"upgrade",
],
input="\n",
catch_exceptions=False,
)
stdout: str = result.stdout
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/test_v2_to_v3_project_upgrade_without_manual_steps_expected_stdout.fixture",
)
) as f:
expected_stdout: str = f.read()
expected_stdout = expected_stdout.replace(
"GE_PROJECT_DIR",
v20_project_directory_with_v30_configuration_and_no_checkpoints,
)
assert stdout == expected_stdout
expected_project_tree_str: str = """\
great_expectations/
.gitignore
great_expectations.yml
expectations/
.ge_store_backend_id
.gitkeep
notebooks/
.gitkeep
pandas/
validation_playground.ipynb
spark/
validation_playground.ipynb
sql/
validation_playground.ipynb
plugins/
custom_data_docs/
styles/
data_docs_custom_styles.css
uncommitted/
config_variables.yml
data_docs/
local_site/
expectations/
.gitkeep
static/
.gitkeep
validations/
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.html
logs/
project_upgrades/
UpgradeHelperV13_20210119T132639.000000Z.json
validations/
.ge_store_backend_id
diabetic_data/
warning/
20200430T191246.763896Z/
c3b4c5df224fef4b1a056a0f3b93aba5.json
"""
obs_project_tree_str: str = gen_directory_tree_str(
startpath=v20_project_directory_with_v30_configuration_and_no_checkpoints
)
assert obs_project_tree_str == expected_project_tree_str
# make sure config number incremented
assert (
DataContext.get_ge_config_version(
context_root_dir=v20_project_directory_with_v30_configuration_and_no_checkpoints
)
== 3.0
)
with open(
file_relative_path(
__file__,
"../../test_fixtures/upgrade_helper/UpgradeHelperV13_upgrade_without_manual_steps_log.json",
)
) as f:
expected_upgrade_log_dict: dict = json.load(f)
expected_upgrade_log_str: str = json.dumps(expected_upgrade_log_dict)
expected_upgrade_log_str = expected_upgrade_log_str.replace(
"GE_PROJECT_DIR",
v20_project_directory_with_v30_configuration_and_no_checkpoints,
)
expected_upgrade_log_dict = json.loads(expected_upgrade_log_str)
with open(
f"{v20_project_directory_with_v30_configuration_and_no_checkpoints}/uncommitted/logs/project_upgrades/UpgradeHelperV13_20210119T132639.000000Z.json"
) as f:
obs_upgrade_log_dict: dict = json.load(f)
assert obs_upgrade_log_dict == expected_upgrade_log_dict
|
great-expectations/great_expectations
|
tests/cli/upgrade_helpers/test_upgrade_helper.py
|
Python
|
apache-2.0
| 25,748
|
[
"VisIt"
] |
8ffe58c5a9c824920b7fc8b1c46580c39d8caefe6d9a3453e6b93f415609912b
|
# Standard lib imports
from sys import argv
import os
from time import sleep
import re
import pdb
import logging
import datetime
import csv
import json
from collections import defaultdict
# Third-party imports
import pandas as pd
import requests
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
# Constants
DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(os.path.dirname(DIR)) # Root directory of the project
# Alter for any given race on a clarityelection.com site
CONTEST_URL = 'http://results.enr.clarityelections.com/GA/63991/182895/en/md_data.html?cid=5000&'
COUNTIES = ['CLAYTON', 'FULTON', 'GWINNETT', 'DEKALB', 'COBB']
LAST_COUNTY = 'Worth' # Used to check that all counties on the main page have loaded from AJAX request
CANDIDATES = {'dem': 'HILLARY CLINTON', 'rep': 'DONALD J. TRUMP'}
TOTAL_PRECINCTS = 914 # The number of precincts in the reapportionment office's map
PHANTOM_JS_INSTALLATION = '/Users/jcox/Desktop/phantomjs/bin/phantomjs'
# Input and output file locations. Change as needed
STATS_FILE = os.path.join(DIR, 'ajc_precincts_merged_centers.csv')
MAP_INPUT = os.path.join(DIR, '2014_income_race_centers.json')
VOTES_TMP = '/tmp/vote_data.csv'
UNMERGED_TMP = '/tmp/unmerged.csv'
MAP_OUTPUT = os.path.join(BASE_DIR, 'assets', 'data', '2014_precincts_income_raceUPDATE.json')
METADATA_OUTPUT = os.path.join(BASE_DIR, 'assets', 'data', '2014_metadata.json')
AGG_STATS_OUTPUT = os.path.join(BASE_DIR, 'assets', 'data', '2014agg_stats.json')
# End constants
# Configure logging
logging.basicConfig(level=logging.INFO)
class Parser(object):
"""
Base class that provides scraping functionality for Clarity Elections site.
Use Selenium's PhantomJS headless browser to simulate clicks and get URL of detail
pages for given counties, then gets precinct-level vote data for a given race.
"""
def __init__(self, contest_url):
self.main_url = contest_url
# These instance variables will be set by the user
self.county_urls = []
self.precinct_results = []
self.unmerged_precincts = None
self.merged_precincts = None
self.total_precincts = 0
def _build_driver(self):
"""
Create an instance of Selenium's webdriver.PhantomJS(), used to
simulate clicks on the Clarity elections site
"""
driver = webdriver.Firefox()
driver.get(self.main_url)
return driver
def get_county_urls(self, input_counties=COUNTIES, delay=5):
"""
Use Selenium to get the dynamically generated URLs for each county's
detail page by simulating clicks, and append the URLs to self.county_urls.
"""
self.county_urls = [] # Reset county URLs each time the scraper runs
logging.info('Creating Selenium driver and accessing Clarity')
driver = self._build_driver()
try:
string_counties = (', ').join(input_counties)
except TypeError:
string_counties = 'All counties'
print 'Getting detail page URLs for {}'.format(string_counties)
# Wait until counties have loaded through AJAX to run script
# Yes it's hacky but using WebDriverWait wasn't working
sleep(2)
# Get a list of all counties on the contest summary page
selector = 'table.vts-data > tbody > tr'
all_counties = driver.find_elements_by_css_selector(selector)
# Generate a list of county names
counties = []
for i, county in enumerate(all_counties):
try:
links = county.find_elements_by_tag_name('a')
name = links[0].get_attribute('id')
counties.append(name)
# Some of the rows in the table are just headers
except:
counties.append(None)
# Have to loop through names instead of looping through DOM elements because
# Selenium will throw a StaleElementReferenceException
for i, name in enumerate(counties):
# Because the page loads through AJAX wait until the information for
# the county is loaded
if name:
if input_counties is not None and name.upper() not in input_counties:
continue
try:
check = EC.presence_of_element_located((By.ID, name))
WebDriverWait(driver, delay).until(check)
except TimeoutException:
print 'Home page took too long to load'
print 'Stopping scraper. Your data has not been added'
return
else:
continue
sleep(.5) # Because, inexplicably, it takes a second after the to load the data after the precinct name loads
# Get links from the county row
county = driver.find_elements_by_css_selector(selector)[i]
links = county.find_elements_by_tag_name('a')
county_name = name
rep_votes = county.find_elements_by_css_selector('td')[2].text
dem_votes = county.find_elements_by_css_selector('td')[3].text
# The URL for each county is generated by Clarity on each page visit
# Emulating a click is a sure bet to get to the detail page
links[1].click()
# Wait until the new page loads
try:
check = EC.presence_of_element_located((By.ID, 'precinctDetailLabel'))
WebDriverWait(driver, delay).until(check)
except TimeoutException:
print 'Page took too long to load. Trying to add precincts anyway'
# Remove cruft at the end of URL and append it to our list of URLs
split_url = driver.current_url.split('/')
base_url = ('/').join(split_url[:-2])
self.county_urls.append([county_name.upper(), base_url, rep_votes, dem_votes])
print '{} county precincts added'.format(county_name)
driver.get(self.main_url)
# After looping through all the counties, close Firefox
driver.quit()
x = pd.DataFrame(self.county_urls)
# Save the county urls to the tmp directory so they can be reused on future passes
x.to_csv('/tmp/county_urls.csv', encoding='utf-8', index=False)
return
def get_precincts(self):
"""
Get JSON data from the endpoints listed in :county_urls: and parse
the precinct-level election results from each one
"""
self.precinct_results = [] # Reset the precinct results
for county_name, base_url, rep_votes, dem_votes in self.county_urls:
logging.info('Getting precinct details from {}'.format(base_url))
# Candidate names and votes are stored in separate files. God knows
# why.
candidate_data = requests.get(base_url + '/json/sum.json')
vote_data = requests.get(base_url + '/json/details.json')
# Get the list of candidates
contests = json.loads(candidate_data.content)['Contests']
# Find out which of the contests contains the candidates we're interested in.
# Clarity sometimes includes multiple contests in the same JSON file
try:
order = [i for i, val in enumerate(contests) if CANDIDATES['rep'] in val['CH']][0]
candidates = contests[order]['CH']
except:
continue
logging.error("""The contestant names you supplied don\'t match
any in the data files. Are you sure you spelled the names
correctly?""")
#Get votes for each candidate
contests = json.loads(vote_data.content)['Contests']
contest = contests[order]
for precinct, votes in zip(contest['P'], contest['V']):
data = {'precinct': precinct, 'county': county_name}
total = 0
for candidate, count in zip(candidates, votes):
if candidate == CANDIDATES['rep']:
total += int(count)
data['rep_votes'] = int(count)
elif candidate == CANDIDATES['dem']:
data['dem_votes'] = int(count)
total += int(count)
data['total'] = total
self.precinct_results.append(data)
votes = pd.DataFrame(self.precinct_results)
votes.to_csv(VOTES_TMP, index=False, encoding='utf-8')
return
class ResultSnapshot(Parser):
"""
Class that contains utilities for cleaning Georgia election results and
merging with statistical data gathered from the US Census.
"""
def __init__(self, **kwargs):
super(ResultSnapshot, self).__init__(**kwargs)
def _clean(self, row):
"""
Private method for renaming the few precincts scraped from the site that
have names that don't match names in the precinct shapefiles.
"""
r = re.compile(r'\d{3} ')
precinct1 = re.sub(r, '', row['precinct'])
precinct2 = re.sub(re.compile(r'EP04-05|EP04-13'), 'EP04', precinct1)
precinct3 = re.sub(re.compile(r'10H1|10H2'), '10H', precinct2)
precinct4 = re.sub(re.compile(r'CATES D - 04|CATES D - 07'), 'CATES D', precinct3)
precinct5 = re.sub(re.compile(r'AVONDALE HIGH - 05|AVONDALE HIGH - 04'), 'AVONDALE HIGH', precinct4)
precinct6 = re.sub(re.compile(r'CHAMBLEE 2'), 'CHAMBLEE', precinct5)
precinct7 = re.sub(re.compile(r'WADSWORTH ELEM - 04'), 'WADSWORTH ELEM', precinct6)
precinct8 = re.sub(re.compile(r'CP06A'), 'CP06', precinct7)
return precinct8.strip().upper()[:20] # Restrict to 20 chars
def _get_income(self, row):
if row['avg_income'] < 50000:
return 'low'
elif row['avg_income'] < 100000:
return 'mid'
else:
return 'high'
def _get_rep_proportion(self, row):
try:
return float(row['rep_votes'])/row['total']
except ZeroDivisionError:
return 0
def _get_dem_proportion(self, row):
try:
return float(row['dem_votes'])/row['total']
except ZeroDivisionError:
return 0
def _clean_vote_stats(self, precincts):
"""
Private method used to calculate proportions of voters for each
candidate by precinct, clean the precinct name, put the income in bins,
and perform other operations necessary before it's ready to be
consumed by the JS app
"""
cframe = precincts
# Calculate proportion of total votes that each candidate got
cframe['rep_p'] = cframe.apply(self._get_rep_proportion, axis=1)
cframe['dem_p'] = cframe.apply(self._get_dem_proportion, axis=1)
cframe['precinct'] = cframe.apply(self._clean, axis=1)
return cframe
def _get_income(self, row):
if row['avg_income'] < 50000:
return 'low'
elif row['avg_income'] < 100000:
return 'mid'
else:
return 'high'
def merge_votes(self, statsf=STATS_FILE, outf=VOTES_TMP):
"""
Public method used to merge the election result dataset with the precinct
maps from the Reapportionment office.
"""
votes_raw = self.precinct_results
votes = pd.DataFrame(votes_raw)
stats = pd.read_csv(statsf, index_col=False)
fvotes = self._clean_vote_stats(votes)
merged = stats.merge(fvotes,
left_on='ajc_precinct',
right_on='precinct',
how='left',
indicator=True)
# Write unmerged precincts to a CSV. Check this to see why you're
# missing them
self.unmerged_precincts = merged[merged._merge != 'both']
self.unmerged_precincts.to_csv(UNMERGED_TMP, index=False)
# Drop precincts with null values for the election results
self.merged_precincts = merged[merged._merge == 'both']
logging.info('Writing precinct information to csv {}'.format(outf))
self.merged_precincts.to_csv(outf)
return
def aggregate_stats(self, statsfile=STATS_FILE):
"""
Calculate an aggregate stats file that's used to populate summary
statistics in the map
"""
just_votes = self.merged_precincts
stats = pd.read_csv(statsfile)
merged = just_votes.merge(stats, how='inner')
merged['income_bin'] = merged.apply(self._get_income, axis=1)
# Calculate aggregated stats for summary table
race = merged.groupby(['county', 'race'])['rep_votes', 'dem_votes'].sum().unstack()
income = merged.groupby(['county','income_bin'])['rep_votes', 'dem_votes'].sum().unstack()
reps = race.rep_votes.merge(income.rep_votes, left_index=True, right_index=True)
reps['party'] = 'rep_votes'
repsf = reps.reset_index()
dems = race.dem_votes.merge(income.dem_votes, left_index=True, right_index=True)
dems['party'] = 'dem_votes'
demsf = dems.reset_index()
combined = pd.concat([repsf, demsf])
# Create a nested defaultdict
data = defaultdict(lambda: defaultdict(dict))
fields = ['black',
'white',
'hispanic',
'high',
'mid',
'low']
# Create a nested JSON object
for i, row in combined.iterrows():
county = row['county']
party = row['party']
county_res = [x[2:] for x in self.county_urls if x[0] == county.upper()][0]
data[county]['all'][party] = 0
for field in fields:
# Check if val is null for precincts missing a certain group
# (eg some precincts have no Hispanics)
if pd.isnull(row[field]):
continue
data[county][field][party] = row[field]
if field in ['high', 'mid', 'low']:
data[county]['all']['rep_votes'] = float(county_res[0])
data[county]['all']['dem_votes'] = float(county_res[1])
# It's impossible to use default dict for the below, because the factory can't
# generate both dicts and ints by default
try:
data['ALL COUNTIES'][field][party] += row[field]
except KeyError:
data['ALL COUNTIES'][field][party] = 0
# Lastly, calculate summary stats for counties
data['ALL COUNTIES']['all']['rep_votes'] = sum([float(x[2]) for x in self.county_urls])
data['ALL COUNTIES']['all']['dem_votes'] = sum([float(x[3]) for x in self.county_urls])
logging.info('Writing aggregated stats to {}'.format(AGG_STATS_OUTPUT))
with open(AGG_STATS_OUTPUT, 'w') as f:
f.write(json.dumps(data, indent=4))
return
def update_map(self, vote_file=VOTES_TMP, geoJSON=MAP_INPUT):
"""
Take map JSON data and generate a new map with updated election data.
"""
logging.info('Adding latest vote information to map file {}'.format(MAP_OUTPUT))
f = open(vote_file)
votes = csv.DictReader(f)
map_data = open(geoJSON, 'r').read()
map_ = json.loads(map_data)
metadata = {}
reporting = 0
for i, feature in enumerate(map_['features']):
name = feature['properties']['PRECINCT_N']
try:
f.seek(0)
match = [x for x in votes if x['PRECINCT_N'] == name][0]
# CSV DictReader automatically parses all columns as strings,
# so we need to manually convert these back to floats
floats = [
'rep_votes',
'dem_votes',
'rep_p',
'dem_p',
'total',
'avg_income'
]
for x in floats:
match[x] = float(match[x])
map_['features'][i]['properties'] = match
if int(match['dem_votes']) != 0 or int(match['rep_votes']) != 0:
reporting += 1
# Catch cases where the map has precincts that aren't in the voter
# files
except IndexError:
continue
# Add relevant metadata
f = '%-I:%M %p, %A %b %-d' # eg: 12:30 AM, Wednesday Nov. 8
metadata['last_update'] = datetime.datetime.now().strftime(f)
metadata['precincts_reporting'] = reporting
metadata['total_precincts'] = TOTAL_PRECINCTS
with open(MAP_OUTPUT, 'w') as a, open(METADATA_OUTPUT, 'w') as b:
a.write(json.dumps(map_))
b.write(json.dumps(metadata))
if __name__ == '__main__':
p = ResultSnapshot(contest_url=CONTEST_URL)
p.get_county_urls()
p.get_precincts()
p.merge_votes()
p.aggregate_stats()
p.update_map()
|
NewsappAJC/precinct-election-map
|
data_cleaning/2016/clarity_live.py
|
Python
|
mit
| 17,373
|
[
"VisIt"
] |
369d063e2fc25e1fbb9c610758f427e01dd651e594a328a25b9c93190c7e6553
|
#!/usr/bin/env python
# MIDAS: Metagenomic Intra-species Diversity Analysis System
# Copyright (C) 2015 Stephen Nayfach
# Freely distributed under the GNU General Public License (GPLv3)
import sys, os, subprocess, Bio.SeqIO
from time import time
from midas import utility
from operator import itemgetter
def read_annotations(args):
info = {}
inpath = '%s/species_info.txt' % args['db']
for r in utility.parse_file(inpath):
info[r['species_id']] = r
return info
def read_marker_info(args):
""" Read info for marker genes from phyeco.fa """
info = {}
for seq in Bio.SeqIO.parse('%s/marker_genes/phyeco.fa' % args['db'], 'fasta'):
info[seq.id] = None
for r in utility.parse_file('%s/marker_genes/phyeco.map' % args['db']):
if r['gene_id'] in info:
info[r['gene_id']] = r
return info
def map_reads_hsblast(args):
""" Use hs-blastn to map reads in fasta file to marker database """
# stream sequences
command = 'python %s' % args['stream_seqs']
command += ' -1 %s' % args['m1'] # fasta/fastq
if args['m2']: command += ' -2 %s' % args['m2'] # mate
if args['max_reads']: command += ' -n %s' % args['max_reads'] # number of reads
if args['read_length']: command += ' -l %s' % args['read_length'] # read length
command += ' 2> %s/species/temp/read_count.txt' % args['outdir'] # tmpfile to store # of reads, bp sampled
# hs-blastn
command += ' | %s align' % args['hs-blastn']
command += ' -word_size %s' % args['word_size']
command += ' -query /dev/stdin'
command += ' -db %s/marker_genes/phyeco.fa' % args['db']
command += ' -outfmt 6'
command += ' -num_threads %s' % args['threads']
command += ' -out %s/species/temp/alignments.m8' % args['outdir']
command += ' -evalue 1e-3'
args['log'].write('command: '+command+'\n')
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
utility.check_exit_code(process, command)
def parse_blast(inpath):
""" Yield formatted record from BLAST m8 file """
formats = [str,str,float,int,float,float,float,float,float,float,float,float]
fields = ['query','target','pid','aln','mis','gaps','qstart','qend','tstart','tend','evalue','score']
for line in open(inpath):
values = line.rstrip().split()
yield dict([(field, format(value)) for field, format, value in zip(fields, formats, values)])
def query_coverage(aln):
""" Compute alignment coverage of query """
qlen = aln['query'].split('_')[-1] # get qlen from sequence header
return float(aln['aln'])/int(qlen)
def find_best_hits(args, marker_info):
""" Find top scoring alignment for each read """
best_hits = {}
marker_cutoffs = get_markers(args)
i = 0
qcovs = []
for aln in parse_blast('%s/species/temp/alignments.m8' % args['outdir']):
i += 1
marker_id = marker_info[aln['target']]['marker_id'] # get gene family from marker_info
cutoff = args['mapid'] if args['mapid'] else marker_cutoffs[marker_id]
if aln['pid'] < cutoff: # does not meet marker cutoff
continue
elif query_coverage(aln) < args['aln_cov']: # filter local alignments
continue
elif aln['query'] not in best_hits: # record aln
best_hits[aln['query']] = [aln]
elif best_hits[aln['query']][0]['score'] == aln['score']: # add aln
best_hits[aln['query']] += [aln]
elif best_hits[aln['query']][0]['score'] < aln['score']: # update aln
best_hits[aln['query']] = [aln]
print(" total alignments: %s" % i)
return list(best_hits.values())
def assign_unique(args, alns, species_info, marker_info):
""" Count the number of uniquely mapped reads to each genome species """
unique_alns = dict([(_,[]) for _ in species_info])
unique = 0
non_unique = 0
for aln in alns:
if len(aln) == 1:
unique += 1
#species_id = aln[0]['target'].split('_')[0]
species_id = marker_info[aln[0]['target']]['species_id']
unique_alns[species_id].append(aln[0])
else:
non_unique += 1
print(" uniquely mapped reads: %s" % unique)
print(" ambiguously mapped reads: %s" % non_unique)
return unique_alns
def assign_non_unique(args, alns, unique_alns, marker_info):
""" Probabalistically assign ambiguously mapped reads """
import numpy as np
import random
total_alns = unique_alns.copy()
for aln in alns:
if len(aln) > 1:
species_ids = [marker_info[_['target']]['species_id'] for _ in aln]
counts = [len(unique_alns[_]) for _ in species_ids]
if sum(counts) == 0:
species_id = random.sample(species_ids, 1)[0]
else:
probs = [float(count)/sum(counts) for count in counts]
species_id = np.random.choice(species_ids, 1, p=probs)[0]
total_alns[species_id].append(aln[species_ids.index(species_id)])
return total_alns
def get_markers(args):
""" Read in optimal mapping parameters for marker genes; override if user has provided cutoff """
marker_cutoffs = {}
inpath = '/'.join([args['db'], 'marker_genes/phyeco.mapping_cutoffs'])
if not os.path.isfile(inpath): sys.exit("File not found: %s" % inpath)
for line in open(inpath):
marker_id, min_pid = line.rstrip().split()
if args['mapid']:
marker_cutoffs[marker_id] = args['mapid']
else:
marker_cutoffs[marker_id] = float(min_pid)
return marker_cutoffs
def read_gene_lengths(args, species_info, marker_info):
""" Read in total gene length per species_id """
total_gene_length = dict([(_,0) for _ in species_info])
for r in marker_info.values():
total_gene_length[r['species_id']] += int(r['gene_length'])
return total_gene_length
def normalize_counts(species_alns, total_gene_length):
""" Normalize counts by gene length and sum contrain """
# norm by gene length, compute cov
species_abundance = {}
total_cov = 0.0
for species_id, alns in species_alns.items():
species_abundance[species_id] = {}
# compute coverage
if len(alns) > 0:
bp = sum([aln['aln'] for aln in alns])
cov = float(bp)/total_gene_length[species_id]
else:
cov = 0.0
# store results
species_abundance[species_id] = {'cov':cov, 'count':len(alns)}
total_cov += cov
# compute relative abundance
total_cov = sum([_['cov'] for _ in species_abundance.values()])
for species_id in species_abundance.keys():
cov = species_abundance[species_id]['cov']
species_abundance[species_id]['rel_abun'] = cov/total_cov if total_cov > 0 else 0
print(" total marker-gene coverage: %s" % round(total_cov, 3))
return species_abundance
def write_abundance(outdir, species_abundance, annotations):
""" Write species results to specified output file """
outpath = '%s/species/species_profile.txt' % outdir
outfile = open(outpath, 'w')
fields = ['species_id', 'count_reads', 'coverage', 'relative_abundance']
outfile.write('\t'.join(fields)+'\n')
species_ids = sorted([(x, y['count']) for x, y in species_abundance.items()], key=itemgetter(1), reverse=True)
for species_id, count_reads in species_ids:
values = species_abundance[species_id]
record = [species_id, values['count'], values['cov'], values['rel_abun']]
outfile.write('\t'.join([str(x) for x in record])+'\n')
def read_abundance(inpath):
""" Parse species abundance file """
if not os.path.isfile(inpath):
sys.exit("\nCould not locate species profile: %s\nTry rerunning with run_midas.py species" % inpath)
abun = {}
for rec in utility.parse_file(inpath):
# format record
if 'species_id' in rec: rec['species_id'] = rec['species_id']
if 'count_reads' in rec: rec['count_reads'] = int(rec['count_reads'])
if 'coverage' in rec: rec['coverage'] = float(rec['coverage'])
if 'relative_abundance' in rec: rec['relative_abundance'] = float(rec['relative_abundance'])
abun[rec['species_id']] = rec
return abun
def select_species(args):
""" Select genome species to map to """
import operator
species_sets = {}
# read in species abundance if necessary
if any([args['species_topn'], args['species_cov']]):
species_abundance = read_abundance('%s/species/species_profile.txt' % args['outdir'])
# user specifed a coverage threshold
if args['species_cov']:
species_sets['species_cov'] = set([])
for species_id, values in species_abundance.items():
if values['coverage'] >= args['species_cov']:
species_sets['species_cov'].add(species_id)
# user specifed topn genome-species
if args['species_topn']:
species_sets['species_topn'] = set([])
species_abundance = [(i,d['relative_abundance']) for i,d in species_abundance.items()]
sorted_abundance = sorted(species_abundance, key=operator.itemgetter(1), reverse=True)
for species_id, rel_abun in sorted_abundance[0:args['species_topn']]:
species_sets['species_topn'].add(species_id)
# user specified a list of one or more genome-species
if args['species_id']:
species_sets['species_id'] = set([])
for species_id in args['species_id']:
species_sets['species_id'].add(species_id)
# intersect sets of genome-species
my_species = list(set.intersection(*list(species_sets.values())))
# optionally remove bad species_ids
inpath = '/'.join([args['db'], 'exclude.txt'])
if os.path.isfile(inpath):
for line in open(inpath):
try: my_species.remove(line.rstrip())
except: pass
# check that at least one genome-species was selected
if len(my_species) == 0:
sys.exit("\nError: no species sastisfied your selection criteria. \n")
return my_species
def run_pipeline(args):
""" Run entire pipeline """
# read info files
species_info = read_annotations(args)
marker_info = read_marker_info(args)
# align reads
start = time()
print("\nAligning reads to marker-genes database")
args['log'].write("\nAligning reads to marker-genes database\n")
map_reads_hsblast(args)
print(" %s minutes" % round((time() - start)/60, 2))
print(" %s Gb maximum memory" % utility.max_mem_usage())
# find best hit for each read
start = time()
print("\nClassifying reads")
args['log'].write("\nClassifying reads\n")
best_hits = find_best_hits(args, marker_info)
unique_alns = assign_unique(args, best_hits, species_info, marker_info)
species_alns = assign_non_unique(args, best_hits, unique_alns, marker_info)
print(" %s minutes" % round((time() - start)/60, 2))
print(" %s Gb maximum memory" % utility.max_mem_usage())
# estimate species abundance
start = time()
print("\nEstimating species abundance")
args['log'].write("\nEstimating species abundance\n")
total_gene_length = read_gene_lengths(args, species_info, marker_info)
species_abundance = normalize_counts(species_alns, total_gene_length)
print(" %s minutes" % round((time() - start)/60, 2) )
print(" %s Gb maximum memory" % utility.max_mem_usage())
# write results
write_abundance(args['outdir'], species_abundance, species_info)
# clean up
if args['remove_temp']:
import shutil
shutil.rmtree('%s/species/temp' % args['outdir'])
|
snayfach/MIDAS
|
midas/run/species.py
|
Python
|
gpl-3.0
| 10,659
|
[
"BLAST"
] |
927cd9d6443e2572d5a5dd0aa19e6d153d983536a26ca36864708ebefc3c3d73
|
"""
Model grism spectra in individual FLTs
"""
import os
from collections import OrderedDict
import copy
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
from astropy.table import Table
import astropy.wcs as pywcs
import astropy.units as u
#import stwcs
### Helper functions from a document written by Pirzkal, Brammer & Ryan
from . import grismconf
from . import utils
from .utils_c import disperse
from .utils_c import interp
from . import GRIZLI_PATH
# Would prefer 'nearest' but that occasionally segment faults out
SEGMENTATION_INTERP = 'nearest'
### Factors for converting HST countrates to Flamba flux densities
photflam_list = {'F098M': 6.0501324882418389e-20,
'F105W': 3.038658152508547e-20,
'F110W': 1.5274130068787271e-20,
'F125W': 2.2483414275260141e-20,
'F140W': 1.4737154005353565e-20,
'F160W': 1.9275637653833683e-20,
'F435W': 3.1871480286278679e-19,
'F606W': 7.8933594352047833e-20,
'F775W': 1.0088466875014488e-19,
'F814W': 7.0767633156044843e-20,
'VISTAH':1.9275637653833683e-20*0.95,
'GRISM': 1.e-20,
'G800L': 1.,
'G280': 1.}
### Filter pivot wavelengths
photplam_list = {'F098M': 9864.722728110915,
'F105W': 10551.046906405772,
'F110W': 11534.45855553774,
'F125W': 12486.059785775655,
'F140W': 13922.907350356367,
'F160W': 15369.175708965562,
'F435W': 4328.256914042873,
'F606W': 5921.658489236346,
'F775W': 7693.297933335407,
'F814W': 8058.784799323767,
'VISTAH':1.6433e+04,
'GRISM': 1.6e4,
'G800L': 7.4737026e3,
'G280': 3651.}
# character to skip clearing line on STDOUT printing
#no_newline = '\x1b[1A\x1b[1M'
### Demo for computing photflam and photplam with pysynphot
if False:
import pysynphot as S
n = 1.e-20
spec = S.FlatSpectrum(n, fluxunits='flam')
photflam_list = {}
photplam_list = {}
for filter in ['F098M', 'F105W', 'F110W', 'F125W', 'F140W', 'F160W', 'G102', 'G141']:
bp = S.ObsBandpass('wfc3,ir,{0}'.format(filter.lower()))
photplam_list[filter] = bp.pivot()
obs = S.Observation(spec, bp)
photflam_list[filter] = n/obs.countrate()
for filter in ['F435W', 'F606W', 'F775W', 'F814W']:
bp = S.ObsBandpass('acs,wfc1,{0}'.format(filter.lower()))
photplam_list[filter] = bp.pivot()
obs = S.Observation(spec, bp)
photflam_list[filter] = n/obs.countrate()
class GrismDisperser(object):
def __init__(self, id=0, direct=None,
segmentation=None, origin=[500, 500],
xcenter=0., ycenter=0., pad=0, grow=1, beam='A',
conf=['WFC3','F140W', 'G141'], scale=1.,
fwcpos=None, MW_EBV=0., yoffset=0):
"""Object for computing dispersed model spectra
Parameters
----------
id : int
Only consider pixels in the segmentation image with value `id`.
Default of zero to match the default empty segmentation image.
direct : `~numpy.ndarray`
Direct image cutout in f_lambda units (i.e., e-/s times PHOTFLAM).
Default is a trivial zeros array.
segmentation : `~numpy.ndarray` (float32) or None
Segmentation image. If None, create a zeros array with the same
shape as `direct`.
origin : [int, int]
`origin` defines the lower left pixel index (y,x) of the `direct`
cutout from a larger detector-frame image
xcenter, ycenter : float, float
Sub-pixel centering of the exact center of the object, relative
to the center of the thumbnail. Needed for getting exact
wavelength grid correct for the extracted 2D spectra.
pad : int
Offset between origin = [0,0] and the true lower left pixel of the
detector frame. This can be nonzero for cases where one creates
a direct image that extends beyond the boundaries of the nominal
detector frame to model spectra at the edges.
grow : int >= 1
Interlacing factor.
beam : str
Spectral order to compute. Must be defined in `self.conf.beams`
conf : [str, str, str] or `grismconf.aXeConf` object.
Pre-loaded aXe-format configuration file object or if list of
strings determine the appropriate configuration filename with
`grismconf.get_config_filename` and load it.
scale : float
Multiplicative factor to apply to the modeled spectrum from
`compute_model`.
fwcpos : float
Rotation position of the NIRISS filter wheel
MW_EBV : float
Galactic extinction
yoffset : float
Cross-dispersion offset to apply to the trace
Attributes
----------
sh : 2-tuple
shape of the direct array
sh_beam : 2-tuple
computed shape of the 2D spectrum
seg : `~numpy.array`
segmentation array
lam : `~numpy.array`
wavelength along the trace
ytrace : `~numpy.array`
y pixel center of the trace. Has same dimensions as sh_beam[1].
sensitivity : `~numpy.array`
conversion factor from native e/s to f_lambda flux densities
lam_beam, ytrace_beam, sensitivity_beam : `~numpy.array`
Versions of the above attributes defined for just the specific
pixels of the pixel beam, not the full 2D extraction.
modelf, model : `~numpy.array`, `~numpy.ndarray`
2D model spectrum. `model` is linked to `modelf` with "reshape",
the later which is a flattened 1D array where the fast
calculations are actually performed.
model : `~numpy.ndarray`
2D model spectrum linked to `modelf` with reshape.
slx_parent, sly_parent : slice
slices defined relative to `origin` to match the location of the
computed 2D spectrum.
total_flux : float
Total f_lambda flux in the thumbail within the segmentation
region.
"""
self.id = id
### lower left pixel of the `direct` array in native detector
### coordinates
self.origin = origin
self.pad = pad
self.grow = grow
### Galactic extinction
self.MW_EBV = MW_EBV
self.init_galactic_extinction(self.MW_EBV)
self.fwcpos = fwcpos
self.scale = scale
### Direct image
if direct is None:
direct = np.zeros((20,20), dtype=np.float32)
self.direct = direct
self.sh = self.direct.shape
if self.direct.dtype is not np.float32:
self.direct = np.cast[np.float32](self.direct)
### Segmentation image, defaults to all zeros
if segmentation is None:
self.seg = np.zeros_like(self.direct, dtype=np.float32)
else:
self.seg = segmentation
if self.seg.dtype is not np.float32:
self.seg = np.cast[np.float32](self.seg)
self.total_flux = self.direct[self.seg == self.id].sum()
### Initialize attributes
self.spectrum_1d = None
self.is_cgs = False
self.xc = self.sh[1]/2+self.origin[1]
self.yc = self.sh[0]/2+self.origin[0]
# Sub-pixel centering of the exact center of the object, relative
# to the center of the thumbnail
self.xcenter = xcenter
self.ycenter = ycenter
self.beam = beam
## Config file
if isinstance(conf, list):
conf_f = grismconf.get_config_filename(conf[0], conf[1], conf[2])
self.conf = grismconf.load_grism_config(conf_f)
else:
self.conf = conf
# Get Pixel area map (xxx need to add test for WFC3)
self.PAM_value = self.get_PAM_value(verbose=False)
#print('xxx PAM!')
self.process_config()
self.yoffset = yoffset
if yoffset != 0:
#print('yoffset!', yoffset)
self.add_ytrace_offset(yoffset)
def init_galactic_extinction(self, MW_EBV=0., R_V=utils.MW_RV):
"""
Initialize Fitzpatrick 99 Galactic extinction
Parameters
----------
MW_EBV : float
Local E(B-V)
R_V : float
Relation between specific and total extinction,
``a_v = r_v * ebv``.
Returns
-------
Sets `self.MW_F99` attribute, which is a callable function that
returns the extinction for a supplied array of wavelengths.
If MW_EBV <= 0, then sets `self.MW_F99 = None`.
"""
self.MW_F99 = None
if MW_EBV > 0:
self.MW_F99 = utils.MW_F99(MW_EBV*R_V, r_v=R_V)
def process_config(self):
"""Process grism config file
Parameters
----------
none
Returns
-------
Sets attributes that define how the dispersion is computed. See the
attributes list for `~grizli.model.GrismDisperser`.
"""
### Get dispersion parameters at the reference position
self.dx = self.conf.dxlam[self.beam] #+ xcenter #-xoff
if self.grow > 1:
self.dx = np.arange(self.dx[0]*self.grow, self.dx[-1]*self.grow)
xoff = 0.
if ('G14' in self.conf.conf_file) & (self.beam == 'A'):
xoff = -0.5 # necessary for WFC3/IR G141, v4.32
#xoff = 0. # suggested by ACS
#xoff = -2.5 # test
self.xoff = xoff
self.ytrace_beam, self.lam_beam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dx+self.xcenter*0+self.xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace_beam *= self.grow
### Integer trace
# Add/subtract 20 for handling int of small negative numbers
dyc = np.cast[int](self.ytrace_beam+20)-20+1
### Account for pixel centering of the trace
self.yfrac_beam = self.ytrace_beam - np.floor(self.ytrace_beam)
### Interpolate the sensitivity curve on the wavelength grid.
ysens = self.lam_beam*0
so = np.argsort(self.lam_beam)
conf_sens = self.conf.sens[self.beam]
if self.MW_F99 is not None:
MWext = 10**(-0.4*(self.MW_F99(conf_sens['WAVELENGTH']*u.AA)))
else:
MWext = 1.
ysens[so] = interp.interp_conserve_c(self.lam_beam[so],
conf_sens['WAVELENGTH'],
conf_sens['SENSITIVITY']*MWext,
integrate=1, left=0, right=0)
self.lam_sort = so
### Needs term of delta wavelength per pixel for flux densities
#dl = np.abs(np.append(self.lam_beam[1] - self.lam_beam[0],
# np.diff(self.lam_beam)))
#ysens *= dl#*1.e-17
self.sensitivity_beam = ysens
### Initialize the model arrays
self.NX = len(self.dx)
self.sh_beam = (self.sh[0], self.sh[1]+self.NX)
self.modelf = np.zeros(np.product(self.sh_beam), dtype=np.float)
self.model = self.modelf.reshape(self.sh_beam)
self.idx = np.arange(self.modelf.size).reshape(self.sh_beam)
## Indices of the trace in the flattened array
self.x0 = np.array(self.sh) // 2
self.dxpix = self.dx - self.dx[0] + self.x0[1] #+ 1
try:
self.flat_index = self.idx[dyc + self.x0[0], self.dxpix]
except IndexError:
#print('Index Error', id, dyc.dtype, self.dxpix.dtype, self.x0[0], self.xc, self.yc, self.beam, self.ytrace_beam.max(), self.ytrace_beam.min())
raise IndexError
###### Trace, wavelength, sensitivity across entire 2D array
self.dxfull = np.arange(self.sh_beam[1], dtype=int)
self.dxfull += self.dx[0]-self.x0[1]
# self.ytrace, self.lam = self.conf.get_beam_trace(x=self.xc,
# y=self.yc, dx=self.dxfull, beam=self.beam)
self.ytrace, self.lam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dxfull+self.xcenter+xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace *= self.grow
ysens = self.lam*0
so = np.argsort(self.lam)
ysens[so] = interp.interp_conserve_c(self.lam[so],
conf_sens['WAVELENGTH'],
conf_sens['SENSITIVITY']*MWext,
integrate=1, left=0, right=0)
# dl = np.abs(np.append(self.lam[1] - self.lam[0],
# np.diff(self.lam)))
# ysens *= dl#*1.e-17
self.sensitivity = ysens
# Slices of the parent array based on the origin parameter
self.slx_parent = slice(self.origin[1] + self.dxfull[0] + self.x0[1],
self.origin[1] + self.dxfull[-1] + self.x0[1]+1)
self.sly_parent = slice(self.origin[0], self.origin[0] + self.sh[0])
#print 'XXX wavelength: %s %s %s' %(self.lam[-5:], self.lam_beam[-5:], dl[-5:])
def add_ytrace_offset(self, yoffset):
"""Add an offset in Y to the spectral trace
Parameters
----------
yoffset : float
Y-offset to apply
"""
self.ytrace_beam, self.lam_beam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dx+self.xcenter*0+self.xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace_beam *= self.grow
self.yoffset = yoffset
self.ytrace_beam += yoffset
### Integer trace
# Add/subtract 20 for handling int of small negative numbers
dyc = np.cast[int](self.ytrace_beam+20)-20+1
### Account for pixel centering of the trace
self.yfrac_beam = self.ytrace_beam - np.floor(self.ytrace_beam)
try:
self.flat_index = self.idx[dyc + self.x0[0], self.dxpix]
except IndexError:
#print 'Index Error', id, self.x0[0], self.xc, self.yc, self.beam, self.ytrace_beam.max(), self.ytrace_beam.min()
raise IndexError
###### Trace, wavelength, sensitivity across entire 2D array
self.ytrace, self.lam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dxfull+self.xcenter+self.xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace *= self.grow
self.ytrace += yoffset
def compute_model(self, id=None, thumb=None, spectrum_1d=None,
in_place=True, modelf=None, scale=None, is_cgs=False):
"""Compute a model 2D grism spectrum
Parameters
----------
id : int
Only consider pixels in the segmentation image (`self.seg`) with
values equal to `id`.
thumb : `~numpy.ndarray` with shape = `self.sh` or None
Optional direct image. If `None` then use `self.direct`.
spectrum_1d : [`~numpy.array`, `~numpy.array`] or None
Optional 1D template [wave, flux] to use for the 2D grism model.
If `None`, then implicitly assumes flat f_lambda spectrum.
in_place : bool
If True, put the 2D model in `self.model` and `self.modelf`,
otherwise put the output in a clean array or preformed `modelf`.
modelf : `~numpy.array` with shape = `self.sh_beam`
Preformed (flat) array to which the 2D model is added, if
`in_place` is False.
scale : float or None
Multiplicative factor to apply to the modeled spectrum.
is_cgs : bool
Units of `spectrum_1d` fluxes are f_lambda cgs.
Returns
-------
model : `~numpy.ndarray`
If `in_place` is False, returns the 2D model spectrum. Otherwise
the result is stored in `self.model` and `self.modelf`.
"""
if id is None:
id = self.id
else:
self.id = id
### Template (1D) spectrum interpolated onto the wavelength grid
if in_place:
self.spectrum_1d = spectrum_1d
if scale is None:
scale = self.scale
else:
self.scale = scale
if spectrum_1d is not None:
xspec, yspec = spectrum_1d
scale_spec = self.sensitivity_beam*0.
int_func = interp.interp_conserve_c
scale_spec[self.lam_sort] = int_func(self.lam_beam[self.lam_sort],
xspec, yspec)*scale
else:
scale_spec = scale
self.is_cgs = is_cgs
if is_cgs:
scale_spec /= self.total_flux
### Output data, fastest is to compute in place but doesn't zero-out
### previous result
if in_place:
self.modelf *= 0
modelf = self.modelf
else:
if modelf is None:
modelf = self.modelf*0
### Optionally use a different direct image
if thumb is None:
thumb = self.direct
else:
if thumb.shape != self.sh:
print("""
Error: `thumb` must have the same dimensions as the direct image! ({0:d},{1:d})
""".format(self.sh[0], self.sh[1]))
return False
### Now compute the dispersed spectrum using the C helper
nonz = (self.sensitivity_beam*scale_spec) != 0
if nonz.sum() > 0:
status = disperse.disperse_grism_object(thumb, self.seg, id,
self.flat_index[nonz], self.yfrac_beam[nonz],
(self.sensitivity_beam*scale_spec)[nonz],
modelf, self.x0, np.array(self.sh),
self.x0, np.array(self.sh_beam))
#print('yyy PAM')
modelf /= self.PAM_value #= self.get_PAM_value()
if not in_place:
return modelf
else:
self.model = modelf.reshape(self.sh_beam)
return True
def init_optimal_profile(self):
"""Initilize optimal extraction profile
"""
if hasattr(self, 'psf_params'):
m = self.compute_model_psf(id=self.id, in_place=False)
else:
m = self.compute_model(id=self.id, in_place=False)
m = m.reshape(self.sh_beam)
m[m < 0] = 0
self.optimal_profile = m/m.sum(axis=0)
def optimal_extract(self, data, bin=0, ivar=1., weight=1.):
"""`Horne (1986) <http://adsabs.harvard.edu/abs/1986PASP...98..609H>`_ optimally-weighted 1D extraction
Parameters
----------
data : `~numpy.ndarray` with shape `self.sh_beam`
2D data to extract
bin : int, optional
Simple boxcar averaging of the output 1D spectrum
ivar : float or `~numpy.ndarray` with shape `self.sh_beam`
Inverse variance array or scalar float that multiplies the
optimal weights
weight : TBD
Returns
-------
wave, opt_flux, opt_rms : `~numpy.array`
`wave` is the wavelength of 1D array
`opt_flux` is the optimally-weighted 1D extraction
`opt_rms` is the weighted uncertainty of the 1D extraction
All are optionally binned in wavelength if `bin` > 1.
"""
import scipy.ndimage as nd
if not hasattr(self, 'optimal_profile'):
self.init_optimal_profile()
if data.shape != self.sh_beam:
print("""
`data` ({0},{1}) must have the same shape as the data array ({2},{3})
""".format(data.shape[0], data.shape[1], self.sh_beam[0],
self.sh_beam[1]))
return False
if not isinstance(ivar, float):
if ivar.shape != self.sh_beam:
print("""
`ivar` ({0},{1}) must have the same shape as the data array ({2},{3})
""".format(ivar.shape[0], ivar.shape[1], self.sh_beam[0],
self.sh_beam[1]))
return False
num = self.optimal_profile*data*ivar*weight
den = self.optimal_profile**2*ivar*weight
opt_flux = num.sum(axis=0)/den.sum(axis=0)
opt_var = 1./den.sum(axis=0)
if bin > 1:
kern = np.ones(bin, dtype=float)/bin
opt_flux = nd.convolve(opt_flux, kern)[bin // 2::bin]
opt_var = nd.convolve(opt_var, kern**2)[bin // 2::bin]
wave = self.lam[bin // 2::bin]
else:
wave = self.lam
opt_rms = np.sqrt(opt_var)
opt_rms[opt_var == 0] = 0
return wave, opt_flux, opt_rms
def trace_extract(self, data, r=0, bin=0, ivar=1., dy0=0):
"""Aperture extraction along the trace
Parameters
----------
data : array-like
Data array with dimenions equivalent to those of `self.model`
r : int
Radius of of the aperture to extract, in pixels. The extraction
will be performed from `-r` to `+r` pixels below and above the
central pixel of the trace.
bin : int, optional
Simple boxcar averaging of the output 1D spectrum
ivar : float or `~numpy.ndarray` with shape `self.sh_beam`
Inverse variance array or scalar float that multiplies the
optimal weights
dy0 : float
Central pixel to extract, relative to the central pixel of
the trace
Returns
-------
wave, opt_flux, opt_rms : `~numpy.array`
`wave` is the wavelength of 1D array
`opt_flux` is the 1D aperture extraction
`opt_rms` is the uncertainty of the 1D extraction, derived from
the sum of the pixel variances within the aperture
All are optionally binned in wavelength if `bin` > 1.
"""
dy = np.cast[int](np.round(self.ytrace+dy0))
aper = np.zeros_like(self.model)
y0 = self.sh_beam[0] // 2
for d in range(-r, r+1):
for i in range(self.sh_beam[1]):
aper[y0+d+dy[i]-1,i] = 1
var = 1./ivar
if not np.isscalar(ivar):
var[ivar == 0] = 0
opt_flux = np.sum(data*aper, axis=0)
opt_var = np.sum(var*aper, axis=0)
if bin > 1:
kern = np.ones(bin, dtype=float)/bin
opt_flux = nd.convolve(opt_flux, kern)[bin // 2::bin]
opt_var = nd.convolve(opt_var, kern**2)[bin // 2::bin]
wave = self.lam[bin // 2::bin]
else:
wave = self.lam
opt_rms = np.sqrt(opt_var)
return wave, opt_flux, opt_rms
def contained_in_full_array(self, full_array):
"""Check if subimage slice is fully contained within larger array
"""
sh = full_array.shape
if (self.sly_parent.start < 0) | (self.slx_parent.start < 0):
return False
if (self.sly_parent.stop >= sh[0]) | (self.slx_parent.stop >= sh[1]):
return False
return True
def add_to_full_image(self, data, full_array):
"""Add spectrum cutout back to the full array
`data` is *added* to `full_array` in place, so, for example, to
subtract `self.model` from the full array, call the function with
>>> self.add_to_full_image(-self.model, full_array)
Parameters
----------
data : `~numpy.ndarray` shape `self.sh_beam` (e.g., `self.model`)
Spectrum cutout
full_array : `~numpy.ndarray`
Full detector array, where the lower left pixel of `data` is given
by `origin`.
"""
if self.contained_in_full_array(full_array):
full_array[self.sly_parent, self.slx_parent] += data
else:
sh = full_array.shape
xpix = np.arange(self.sh_beam[1])
xpix += self.origin[1] + self.dxfull[0] + self.x0[1]
ypix = np.arange(self.sh_beam[0])
ypix += self.origin[0]
okx = (xpix >= 0) & (xpix < sh[1])
oky = (ypix >= 0) & (ypix < sh[1])
if (okx.sum() == 0) | (oky.sum() == 0):
return False
sly = slice(ypix[oky].min(), ypix[oky].max()+1)
slx = slice(xpix[okx].min(), xpix[okx].max()+1)
full_array[sly, slx] += data[oky,:][:,okx]
#print sly, self.sly_parent, slx, self.slx_parent
return True
def cutout_from_full_image(self, full_array):
"""Get beam-sized cutout from a full image
Parameters
----------
full_array : `~numpy.ndarray`
Array of the size of the parent array from which the cutout was
extracted. If possible, the function first tries the slices with
>>> sub = full_array[self.sly_parent, self.slx_parent]
and then computes smaller slices for cases where the beam spectrum
falls off the edge of the parent array.
Returns
-------
cutout : `~numpy.ndarray`
Array with dimensions of `self.model`.
"""
#print self.sly_parent, self.slx_parent, full_array.shape
if self.contained_in_full_array(full_array):
data = full_array[self.sly_parent, self.slx_parent]
else:
sh = full_array.shape
###
xpix = np.arange(self.sh_beam[1])
xpix += self.origin[1] + self.dxfull[0] + self.x0[1]
ypix = np.arange(self.sh_beam[0])
ypix += self.origin[0]
okx = (xpix >= 0) & (xpix < sh[1])
oky = (ypix >= 0) & (ypix < sh[1])
if (okx.sum() == 0) | (oky.sum() == 0):
return False
sly = slice(ypix[oky].min(), ypix[oky].max()+1)
slx = slice(xpix[okx].min(), xpix[okx].max()+1)
data = self.model*0.
data[oky,:][:,okx] += full_array[sly, slx]
return data
def twod_axis_labels(self, wscale=1.e4, limits=None, mpl_axis=None):
"""Set 2D wavelength (x) axis labels based on spectral parameters
Parameters
----------
wscale : float
Scale factor to divide from the wavelength units. The default
value of 1.e4 results in wavelength ticks in microns.
limits : None, list = `[x0, x1, dx]`
Will automatically use the whole wavelength range defined by the
spectrum. To change, specify `limits = [x0, x1, dx]` to
interpolate `self.beam.lam_beam` between x0*wscale and x1*wscale.
mpl_axis : `matplotlib.axes._axes.Axes`
Plotting axis to place the labels, e.g.,
>>> fig = plt.figure()
>>> mpl_axis = fig.add_subplot(111)
Returns
-------
Nothing if `mpl_axis` is supplied, else pixels and wavelengths of the
tick marks.
"""
xarr = np.arange(len(self.lam))
if limits:
xlam = np.arange(limits[0], limits[1], limits[2])
xpix = np.interp(xlam, self.lam/wscale, xarr)
else:
xlam = np.unique(np.cast[int](self.lam / 1.e4*10)/10.)
xpix = np.interp(xlam, self.lam/wscale, xarr)
if mpl_axis is None:
return xpix, xlam
else:
mpl_axis.set_xticks(xpix)
mpl_axis.set_xticklabels(xlam)
def twod_xlim(self, x0, x1=None, wscale=1.e4, mpl_axis=None):
"""Set wavelength (x) axis limits on a 2D spectrum
Parameters
----------
x0 : float or list/tuple of floats
minimum or (min,max) of the plot limits
x1 : float or None
max of the plot limits if x0 is a float
wscale : float
Scale factor to divide from the wavelength units. The default
value of 1.e4 results in wavelength ticks in microns.
mpl_axis : `matplotlib.axes._axes.Axes`
Plotting axis to place the labels.
Returns
-------
Nothing if `mpl_axis` is supplied else pixels the desired wavelength
limits.
"""
if isinstance(x0, list) | isinstance(x0, tuple):
x0, x1 = x0[0], x0[1]
xarr = np.arange(len(self.lam))
xpix = np.interp([x0,x1], self.lam/wscale, xarr)
if mpl_axis:
mpl_axis.set_xlim(xpix)
else:
return xpix
def x_init_epsf(self, flat_sensitivity=False, psf_params=None, psf_filter='F140W', yoff=0.0, skip=0.5, get_extended=False, seg_mask=True):
"""Initialize ePSF fitting for point sources
TBD
"""
import scipy.sparse
import scipy.ndimage
#print('SKIP: {0}'.format(skip))
EPSF = utils.EffectivePSF()
if psf_params is None:
self.psf_params = [self.total_flux, 0., 0.]
else:
self.psf_params = psf_params
if self.psf_params[0] is None:
self.psf_params[0] = self.total_flux#/photflam_list[psf_filter]
origin = np.array(self.origin) - np.array(self.pad)
self.psf_yoff = yoff
self.psf_filter = psf_filter
self.psf = EPSF.get_ePSF(self.psf_params, origin=origin, shape=self.sh, filter=psf_filter, get_extended=get_extended)
#print('XXX', self.psf_params[0], self.psf.sum())
# self.psf_params[0] /= self.psf.sum()
# self.psf /= self.psf.sum()
# Center in detector coords
y0, x0 = np.array(self.sh)/2.-1
xd = x0+self.psf_params[1] + origin[1]
yd = y0+self.psf_params[2] + origin[0]
# Get wavelength array
psf_xy_lam = []
psf_ext_lam = []
for i, filter in enumerate(['F105W', 'F125W', 'F160W']):
psf_xy_lam.append(EPSF.get_at_position(x=xd, y=yd, filter=filter))
psf_ext_lam.append(EPSF.extended_epsf[filter])
filt_ix = np.arange(3)
filt_lam = np.array([1.0551, 1.2486, 1.5369])*1.e4
yp_beam, xp_beam = np.indices(self.sh_beam)
xarr = np.arange(0,self.lam_beam.shape[0], skip)
xarr = xarr[xarr <= self.lam_beam.shape[0]-1]
xbeam = np.arange(self.lam_beam.shape[0])*1.
#xbeam += 1.
#yoff = 0 #-0.15
psf_model = self.model*0.
A_psf = []
lam_psf = []
lam_offset = self.psf_params[1] #self.sh[1]/2 - self.psf_params[1] - 1
self.lam_offset = lam_offset
for xi in xarr:
yi = np.interp(xi, xbeam, self.ytrace_beam)
li = np.interp(xi, xbeam, self.lam_beam)
dx = xp_beam-self.psf_params[1]-xi-x0
dy = yp_beam-self.psf_params[2]-yi+yoff-y0
# wavelength-dependent
ii = np.interp(li, filt_lam, filt_ix, left=-1, right=10)
if ii == -1:
psf_xy_i = psf_xy_lam[0]*1
psf_ext_i = psf_ext_lam[0]*1
elif ii == 10:
psf_xy_i = psf_xy_lam[2]*1
psf_ext_i = psf_ext_lam[2]*1
else:
ni = int(ii)
f = 1-(li-filt_lam[ni])/(filt_lam[ni+1]-filt_lam[ni])
psf_xy_i = f*psf_xy_lam[ni] + (1-f)*psf_xy_lam[ni+1]
psf_ext_i = f*psf_ext_lam[ni] + (1-f)*psf_ext_lam[ni+1]
if not get_extended:
psf_ext_i = None
psf = EPSF.eval_ePSF(psf_xy_i, dx, dy, extended_data=psf_ext_i)*self.psf_params[0]
#print(xi, psf.sum())
if seg_mask:
segm = nd.maximum_filter((self.seg == self.id)*1., size=7)
#yps, xps = np.indices(self.sh)
seg_i = nd.map_coordinates(segm, np.array([dx+x0, dy+y0]), order=1, mode='constant', cval=0.0, prefilter=True) > 0
else:
seg_i = 1
A_psf.append((psf*seg_i).flatten())
lam_psf.append(li)
# Sensitivity
self.lam_psf = np.array(lam_psf)
#photflam = photflam_list[psf_filter]
photflam = 1
if flat_sensitivity:
psf_sensitivity = np.abs(np.gradient(self.lam_psf))*photflam
else:
sens = self.conf.sens[self.beam]
# so = np.argsort(self.lam_psf)
# s_i = interp.interp_conserve_c(self.lam_psf[so], sens['WAVELENGTH'], sens['SENSITIVITY'], integrate=1)
# psf_sensitivity = s_i*0.
# psf_sensitivity[so] = s_i
if self.MW_F99 is not None:
MWext = 10**(-0.4*(self.MW_F99(sens['WAVELENGTH']*u.AA)))
else:
MWext = 1.
psf_sensitivity = self.get_psf_sensitivity(sens['WAVELENGTH'], sens['SENSITIVITY']*MWext)
self.psf_sensitivity = psf_sensitivity
self.A_psf = scipy.sparse.csr_matrix(np.array(A_psf).T)
#self.init_extended_epsf()
self.PAM_value = self.get_PAM_value()
self.psf_scale_to_data = 1.
self.psf_renorm = 1.
self.renormalize_epsf_model()
self.init_optimal_profile()
def get_psf_sensitivity(self, wave, sensitivity):
"""
Integrate the sensitivity curve to the wavelengths for the
PSF model
"""
so = np.argsort(self.lam_psf)
s_i = interp.interp_conserve_c(self.lam_psf[so], wave, sensitivity, integrate=1)
psf_sensitivity = s_i*0.
psf_sensitivity[so] = s_i
return psf_sensitivity
def renormalize_epsf_model(self, spectrum_1d=None, verbose=False):
"""
Ensure normalization correct
"""
if not hasattr(self, 'A_psf'):
print('ePSF not initialized')
return False
if spectrum_1d is None:
dl = 0.1
flat_x = np.arange(self.lam.min()-10, self.lam.max()+10, dl)
flat_y = flat_x*0.+1.e-17
spectrum_1d = [flat_x, flat_y]
tab = self.conf.sens[self.beam]
if self.MW_F99 is not None:
MWext = 10**(-0.4*(self.MW_F99(tab['WAVELENGTH']*u.AA)))
else:
MWext = 1.
sens_i = interp.interp_conserve_c(spectrum_1d[0], tab['WAVELENGTH'], tab['SENSITIVITY']*MWext, integrate=1, left=0, right=0)
total_sens = np.trapz(spectrum_1d[1]*sens_i/np.gradient(spectrum_1d[0]), spectrum_1d[0])
m = self.compute_model_psf(spectrum_1d=spectrum_1d, is_cgs=True, in_place=False).reshape(self.sh_beam)
#m2 = self.compute_model(spectrum_1d=[flat_x, flat_y], is_cgs=True, in_place=False).reshape(self.sh_beam)
renorm = total_sens / m.sum()
self.psf_renorm = renorm
# Scale model to data, depends on Pixel Area Map and PSF normalization
scale_to_data = self.PAM_value #* (self.psf_params[0]/0.975)
self.psf_scale_to_data = scale_to_data
renorm /= scale_to_data # renorm PSF
if verbose:
print('Renorm ePSF model: {0:0.3f}'.format(renorm))
self.A_psf *= renorm
def get_PAM_value(self, verbose=False):
"""
Apply Pixel Area Map correction to WFC3 effective PSF model
http://www.stsci.edu/hst/wfc3/pam/pixel_area_maps
"""
confp = self.conf.conf
if ('INSTRUMENT' in confp) & ('CAMERA' in confp):
if '{0}-{1}'.format(confp['INSTRUMENT'], confp['CAMERA']) != 'WFC3-IR':
return 1
else:
return 1
try:
pam_data = pyfits.open(os.getenv('iref')+'ir_wfc3_map.fits')[1].data
pam_value = pam_data[int(self.yc-self.pad), int(self.xc-self.pad)]
except:
pam_value = 1
if verbose:
print ('PAM correction at x={0}, y={1}: {2:.3f}'.format(self.xc-self.pad, self.yc-self.pad, pam_value))
return pam_value
def init_extended_epsf(self):
"""
Hacky code for adding extended component of the EPSFs
"""
ext_file = os.path.join(GRIZLI_PATH, 'CONF',
'ePSF_extended_splines.npy')
if not os.path.exists(ext_file):
return False
bg_splines = np.load(ext_file)[0]
spline_waves = np.array(list(bg_splines.keys()))
spline_waves.sort()
spl_ix = np.arange(len(spline_waves))
yarr = np.arange(self.sh_beam[0]) - self.sh_beam[0]/2.+1
dy = self.psf_params[2]
spl_data = self.model * 0.
for i in range(self.sh_beam[1]):
dy_i = dy + self.ytrace[i]
x_i = np.interp(self.lam[i], spline_waves, spl_ix)
if (x_i == 0) | (x_i == len(bg_splines)-1):
spl_data[:,i] = bg_splines[spline_waves[int(x_i)]](yarr-dy_i)
else:
f = x_i-int(x_i)
sp = bg_splines[spline_waves[int(x_i)]](yarr-dy_i)*(1-f)
sp += bg_splines[spline_waves[int(x_i)+1]](yarr-dy_i)*f
spl_data[:,i] = sp
self.ext_psf_data = np.maximum(spl_data, 0)
def compute_model_psf(self, id=None, spectrum_1d=None, in_place=True, is_cgs=False):
if spectrum_1d is None:
#modelf = np.array(self.A_psf.sum(axis=1)).flatten()
#model = model.reshape(self.sh_beam)
coeffs = np.ones(self.A_psf.shape[1])
if not is_cgs:
coeffs *= self.total_flux
else:
dx = np.diff(self.lam_psf)[0]
if dx < 0:
coeffs = interp.interp_conserve_c(self.lam_psf[::-1],
spectrum_1d[0],
spectrum_1d[1])[::-1]
else:
coeffs = interp.interp_conserve_c(self.lam_psf,
spectrum_1d[0],
spectrum_1d[1])
if not is_cgs:
coeffs *= self.total_flux
modelf = self.A_psf.dot(coeffs*self.psf_sensitivity)
model = modelf.reshape(self.sh_beam)
# if hasattr(self, 'ext_psf_data'):
# model += self.ext_psf_data*model.sum(axis=0)
# modelf = model.flatten()
# model = modelf.reshape(self.sh_beam)
if in_place:
self.spectrum_1d = spectrum_1d
self.is_cgs = is_cgs
self.modelf = modelf #.flatten()
self.model = model
#self.modelf = self.model.flatten()
return True
else:
return modelf #.flatten()
class ImageData(object):
"""Container for image data with WCS, etc."""
def __init__(self, sci=None, err=None, dq=None,
header=None, wcs=None, photflam=1., photplam=1.,
origin=[0,0], pad=0,
instrument='WFC3', filter='G141', pupil=None, hdulist=None,
sci_extn=1):
"""
Parameters
----------
sci : `~numpy.ndarray`
Science data
err, dq : `~numpy.ndarray` or None
Uncertainty and DQ data. Defaults to zero if None
header : `~astropy.io.fits.Header`
Associated header with `data` that contains WCS information
wcs : `~astropy.wcs.WCS` or None
WCS solution to use. If `None` will derive from the `header`.
photflam : float
Multiplicative conversion factor to scale `data` to set units
to f_lambda flux density. If data is grism spectra, then use
photflam=1
origin : [int, int]
Origin of lower left pixel in detector coordinates
hdulist : `~astropy.io.fits.HDUList`, optional
If specified, read `sci`, `err`, `dq` from the HDU list from a
FITS file, e.g., WFC3 FLT.
sci_extn : int
Science EXTNAME to read from the HDUList, for example,
`sci` = hdulist['SCI',`sci_extn`].
Attributes
----------
parent_file : str
Filename of the parent from which the data were extracted
data : dict
Dictionary to store pixel data, with keys 'SCI', 'DQ', and 'ERR'.
If a reference image has been supplied and processed, will also
have an entry 'REF'. The data arrays can also be addressed with
the `__getitem__` method, i.e.,
>>> self = ImageData(...)
>>> print np.median(self['SCI'])
pad : int
Additional padding around the nominal image dimensions
wcs : `~astropy.wcs.WCS`
WCS of the data array
header : `~astropy.io.fits.Header`
FITS header
filter, instrument, photflam, photplam, APZP : str, float
Parameters taken from the header
ref_file, ref_photlam, ref_photplam, ref_filter : str, float
Corresponding parameters for the reference image, if necessary.
"""
import copy
### Easy way, get everything from an image HDU list
if isinstance(hdulist, pyfits.HDUList):
if ('REF',sci_extn) in hdulist:
ref_h = hdulist['REF', sci_extn].header
ref_data = hdulist['REF', sci_extn].data/ref_h['PHOTFLAM']
ref_data = np.cast[np.float32](ref_data)
ref_file = ref_h['REF_FILE']
ref_photflam = 1.
ref_photplam = ref_h['PHOTPLAM']
ref_filter = ref_h['FILTER']
else:
ref_data = None
if ('SCI',sci_extn) in hdulist:
sci = np.cast[np.float32](hdulist['SCI',sci_extn].data)
err = np.cast[np.float32](hdulist['ERR',sci_extn].data)
dq = np.cast[np.int16](hdulist['DQ',sci_extn].data)
base_extn = ('SCI', sci_extn)
else:
if ref_data is None:
raise KeyError ('No SCI or REF extensions found')
# Doesn't have SCI, get from ref
sci = err = ref_data*0.+1
dq = np.zeros(sci.shape, dtype=np.int16)
base_extn = ('REF', sci_extn)
if 'ORIGINX' in hdulist[base_extn].header:
h0 = hdulist[base_extn].header
origin = [h0['ORIGINY'], h0['ORIGINX']]
else:
origin = [0,0]
self.sci_extn = sci_extn
header = hdulist[base_extn].header.copy()
if 'PARENT' in header:
self.parent_file = header['PARENT']
else:
self.parent_file = hdulist.filename()
if 'CPDIS1' in header:
if 'Lookup' in header['CPDIS1']:
self.wcs_is_lookup = True
else:
self.wcs_is_lookup = False
else:
self.wcs_is_lookup = False
status = False
for ext in [base_extn, 0]:
h = hdulist[ext].header
if 'INSTRUME' in h:
status = True
break
if not status:
msg = ('Couldn\'t find \'INSTRUME\' keyword in the headers' +
' of extensions 0 or (SCI,{0:d})'.format(sci_extn))
raise KeyError (msg)
instrument = h['INSTRUME']
filter = utils.get_hst_filter(h)
if 'PUPIL' in h:
pupil = h['PUPIL']
if 'PHOTFLAM' in h:
photflam = h['PHOTFLAM']
else:
photflam = photflam_list[filter]
if 'PHOTPLAM' in h:
photplam = h['PHOTPLAM']
else:
photplam = photplam_list[filter]
self.mdrizsky = 0.
if 'MDRIZSKY' in header:
#sci -= header['MDRIZSKY']
self.mdrizsky = header['MDRIZSKY']
### ACS bunit
#self.exptime = 1.
if 'EXPTIME' in hdulist[0].header:
self.exptime = hdulist[0].header['EXPTIME']
else:
self.exptime = hdulist[0].header['EFFEXPTM']
# if 'BUNIT' in header:
# if header['BUNIT'] == 'ELECTRONS':
# self.exptime = hdulist[0].header['EXPTIME']
# # sci /= self.exptime
# # err /= self.exptime
sci = (sci-self.mdrizsky)
if 'BUNIT' in header:
if header['BUNIT'] == 'ELECTRONS':
sci /= self.exptime
err /= self.exptime
if filter.startswith('G'):
photflam = 1
if (instrument == 'NIRCAM') & (pupil is not None):
if pupil.startswith('G'):
photflam = 1
if 'PAD' in header:
pad = header['PAD']
self.grow = 1
if 'GROW' in header:
self.grow = header['GROW']
else:
if sci is None:
sci = np.zeros((1014,1014))
self.parent_file = 'Unknown'
self.sci_extn = None
self.grow = 1
ref_data = None
if 'EXPTIME' in header:
self.exptime = header['EXPTIME']
else:
self.exptime = 1.
if 'MDRIZSKY' in header:
self.mdrizsky = header['MDRIZSKY']
else:
self.mdrizsky = 0.
if 'CPDIS1' in header:
if 'Lookup' in header['CPDIS1']:
self.wcs_is_lookup = True
else:
self.wcs_is_lookup = False
else:
self.wcs_is_lookup = False
self.is_slice = False
### Array parameters
self.pad = pad
self.origin = origin
self.fwcpos = None
self.MW_EBV = 0.
self.data = OrderedDict()
self.data['SCI'] = sci*photflam
self.sh = np.array(self.data['SCI'].shape)
### Header-like parameters
self.filter = filter
self.pupil = pupil
self.instrument = instrument
self.header = header
if 'ISCUTOUT' in self.header:
self.is_slice = self.header['ISCUTOUT']
self.header['EXPTIME'] = self.exptime
self.photflam = photflam
self.photplam = photplam
self.ABZP = (0*np.log10(self.photflam) - 21.10 -
5*np.log10(self.photplam) + 18.6921)
self.thumb_extension = 'SCI'
if err is None:
self.data['ERR'] = np.zeros_like(self.data['SCI'])
else:
self.data['ERR'] = err*photflam
if self.data['ERR'].shape != tuple(self.sh):
raise ValueError ('err and sci arrays have different shapes!')
if dq is None:
self.data['DQ'] = np.zeros_like(self.data['SCI'], dtype=np.int16)
else:
self.data['DQ'] = dq
if self.data['DQ'].shape != tuple(self.sh):
raise ValueError ('err and dq arrays have different shapes!')
if ref_data is None:
self.data['REF'] = None
self.ref_file = None
self.ref_photflam = None
self.ref_photplam = None
self.ref_filter = None
else:
self.data['REF'] = ref_data
self.ref_file = ref_file
self.ref_photflam = ref_photflam
self.ref_photplam = ref_photplam
self.ref_filter = ref_filter
self.wcs = None
if instrument in ['NIRISS','NIRCAM']:
self.update_jwst_wcsheader(hdulist)
if self.header is not None:
if wcs is None:
self.get_wcs()
else:
self.wcs = wcs.copy()
else:
self.header = pyfits.Header()
# Detector chip
if 'CCDCHIP' in self.header:
self.ccdchip = self.header['CCDCHIP']
else:
self.ccdchip = 1
# For NIRISS
if 'FWCPOS' in self.header:
self.fwcpos = self.header['FWCPOS']
else:
self.fwcpos = None
# Galactic extinction
if 'MW_EBV' in self.header:
self.MW_EBV = self.header['MW_EBV']
else:
self.MW_EBV = 0.
def unset_dq(self):
"""Flip OK data quality bits using utils.unset_dq_bits
OK bits are defined as
>>> okbits_instrument = {'WFC3': 32+64+512, # blob OK
'NIRISS': 0,
'WFIRST': 0,}
"""
okbits_instrument = {'WFC3': 32+64+512, # blob OK
'NIRISS': 0,
'WFIRST': 0,}
if self.instrument not in okbits_instrument:
okbits = 1
else:
okbits = okbits_instrument[self.instrument]
self.data['DQ'] = utils.unset_dq_bits(self.data['DQ'], okbits=okbits)
def flag_negative(self, sigma=-3):
"""Flag negative data values with dq=4
Parameters
----------
sigma : float
Threshold for setting bad data
Returns
-------
n_negative : int
Number of flagged negative pixels
If `self.data['ERR']` is zeros, do nothing.
"""
if self.data['ERR'].max() == 0:
return 0
bad = self.data['SCI'] < sigma*self.data['ERR']
self.data['DQ'][bad] |= 4
return bad.sum()
def update_jwst_wcsheader(self, hdulist):
"""
For now generate an approximate SIP header for NIRISS
"""
from . import jwst as _jwst
datamodel = _jwst.img_with_wcs(hdulist)
sip_header = _jwst.model_wcs_header(datamodel, get_sip=True)
for k in sip_header:
self.header[k] = sip_header[k]
# Remove PC
for i in [1,2]:
for j in [1,2]:
k = 'PC{0}_{1}'.format(i,j)
if k in self.header:
self.header.remove(k)
def get_wcs(self):
"""Get WCS from header"""
import numpy.linalg
import stwcs
if self.wcs_is_lookup:
if 'CCDCHIP' in self.header:
ext = {1:2,2:1}[self.header['CCDCHIP']]
else:
ext = self.header['EXTVER']
if os.path.exists(self.parent_file):
fobj = pyfits.open(self.parent_file)
wcs = stwcs.wcsutil.hstwcs.HSTWCS(fobj=fobj, ext=('SCI',ext))
if self.pad > 0:
wcs = self.add_padding_to_wcs(wcs, pad=self.pad)
else:
# Get WCS from a stripped wcs.fits file (from self.save_wcs)
# already padded.
wcsfile = self.parent_file.replace('.fits', '.{0:02d}.wcs.fits'.format(ext))
fobj = pyfits.open(wcsfile)
fh = fobj[0].header
if fh['NAXIS'] == 0:
fh['NAXIS'] = 2
fh['NAXIS1'] = int(fh['CRPIX1']*2)
fh['NAXIS2'] = int(fh['CRPIX2']*2)
wcs = stwcs.wcsutil.hstwcs.HSTWCS(fobj=fobj, ext=0)
#print('XXX WCS',wcs)
# Object is a cutout
if self.is_slice:
slx = slice(self.origin[1], self.origin[1]+self.sh[1])
sly = slice(self.origin[0], self.origin[0]+self.sh[0])
wcs = self.get_slice_wcs(wcs, slx=slx, sly=sly)
else:
fobj = None
wcs = pywcs.WCS(self.header, relax=True, fobj=fobj)
if not hasattr(wcs, 'pscale'):
wcs.pscale = utils.get_wcs_pscale(wcs)
self.wcs = wcs
@staticmethod
def add_padding_to_wcs(wcs_in, pad=200):
"""Pad the appropriate WCS keywords"""
wcs = wcs_in.deepcopy()
for attr in ['naxis1', '_naxis1', 'naxis2', '_naxis2']:
if hasattr(wcs, attr):
value = wcs.__getattribute__(attr)
if value is not None:
wcs.__setattr__(attr, value+2*pad)
wcs.naxis1 = wcs._naxis1
wcs.naxis2 = wcs._naxis2
wcs.wcs.crpix[0] += pad
wcs.wcs.crpix[1] += pad
# Pad CRPIX for SIP
for wcs_ext in [wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[0] += pad
wcs_ext.crpix[1] += pad
# Pad CRVAL for Lookup Table, if necessary (e.g., ACS)
for wcs_ext in [wcs.cpdis1, wcs.cpdis2, wcs.det2im1, wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[0] += pad
wcs_ext.crval[1] += pad
return wcs
def add_padding(self, pad=200):
"""Pad the data array and update WCS keywords"""
### Update data array
new_sh = self.sh + 2*pad
for key in ['SCI', 'ERR', 'DQ', 'REF']:
if key not in self.data:
continue
else:
if self.data[key] is None:
continue
data = self.data[key]
new_data = np.zeros(new_sh, dtype=data.dtype)
new_data[pad:-pad, pad:-pad] += data
self.data[key] = new_data
self.sh = new_sh
self.pad += pad
### Padded image dimensions
self.header['NAXIS1'] += 2*pad
self.header['NAXIS2'] += 2*pad
self.header['CRPIX1'] += pad
self.header['CRPIX2'] += pad
### Add padding to WCS
self.wcs = self.add_padding_to_wcs(self.wcs, pad=pad)
def shrink_large_hdu(self, hdu=None, extra=100, verbose=False):
"""Shrink large image mosaic to speed up blotting
Parameters
----------
hdu : `~astropy.io.fits.ImageHDU`
Input reference HDU
extra : int
Extra border to put around `self.data` WCS to ensure the reference
image is large enough to encompass the distorted image
Returns
-------
new_hdu : `~astropy.io.fits.ImageHDU`
Image clipped to encompass `self.data['SCI']` + margin of `extra`
pixels.
Make a cutout of the larger reference image around the desired FLT
image to make blotting faster for large reference images.
"""
ref_wcs = pywcs.WCS(hdu.header)
### Borders of the flt frame
naxis = [self.header['NAXIS1'], self.header['NAXIS2']]
xflt = [-extra, naxis[0]+extra, naxis[0]+extra, -extra]
yflt = [-extra, -extra, naxis[1]+extra, naxis[1]+extra]
raflt, deflt = self.wcs.all_pix2world(xflt, yflt, 0)
xref, yref = np.cast[int](ref_wcs.all_world2pix(raflt, deflt, 0))
ref_naxis = [hdu.header['NAXIS1'], hdu.header['NAXIS2']]
### Slices of the reference image
xmi = np.maximum(0, xref.min())
xma = np.minimum(ref_naxis[0], xref.max())
slx = slice(xmi, xma)
ymi = np.maximum(0, yref.min())
yma = np.minimum(ref_naxis[1], yref.max())
sly = slice(ymi, yma)
if ((xref.min() < 0) | (yref.min() < 0) |
(xref.max() > ref_naxis[0]) | (yref.max() > ref_naxis[1])):
if verbose:
print('Image cutout: x={0}, y={1} [Out of range]'.format(slx, sly))
return hdu
else:
if verbose:
print('Image cutout: x={0}, y={1}'.format(slx, sly))
### Sliced subimage
slice_wcs = ref_wcs.slice((sly, slx))
slice_header = hdu.header.copy()
hwcs = slice_wcs.to_header(relax=True)
for k in hwcs.keys():
if not k.startswith('PC'):
slice_header[k] = hwcs[k]
slice_data = hdu.data[sly, slx]*1
new_hdu = pyfits.ImageHDU(data=slice_data, header=slice_header)
return new_hdu
def expand_hdu(self, hdu=None, verbose=True):
"""TBD
"""
ref_wcs = pywcs.WCS(hdu.header)
### Borders of the flt frame
naxis = [self.header['NAXIS1'], self.header['NAXIS2']]
xflt = [-self.pad, naxis[0]+self.pad, naxis[0]+self.pad, -self.pad]
yflt = [-self.pad, -self.pad, naxis[1]+self.pad, naxis[1]+self.pad]
raflt, deflt = self.wcs.all_pix2world(xflt, yflt, 0)
xref, yref = np.cast[int](ref_wcs.all_world2pix(raflt, deflt, 0))
ref_naxis = [hdu.header['NAXIS1'], hdu.header['NAXIS2']]
pad_min = np.minimum(xref.min(), yref.min())
pad_max = np.maximum((xref-ref_naxis[0]).max(), (yref-ref_naxis[1]).max())
if (pad_min > 0) & (pad_max < 0):
# do nothing
return hdu
pad = np.maximum(np.abs(pad_min), pad_max) + 50
if verbose:
print('{0} / Pad ref HDU with {1:d} pixels'.format(self.parent_file, pad))
### Update data array
sh = hdu.data.shape
new_sh = np.array(sh) + 2*pad
new_data = np.zeros(new_sh, dtype=hdu.data.dtype)
new_data[pad:-pad, pad:-pad] += hdu.data
header = hdu.header.copy()
### Padded image dimensions
header['NAXIS1'] += 2*pad
header['NAXIS2'] += 2*pad
### Add padding to WCS
header['CRPIX1'] += pad
header['CRPIX2'] += pad
new_hdu = pyfits.ImageHDU(data=new_data, header=header)
return new_hdu
def blot_from_hdu(self, hdu=None, segmentation=False, grow=3,
interp='nearest'):
"""Blot a rectified reference image to detector frame
Parameters
----------
hdu : `~astropy.io.fits.ImageHDU`
HDU of the reference image
segmentation : bool, False
If True, treat the reference image as a segmentation image and
preserve the integer values in the blotting.
grow : int, default=3
Number of pixels to dilate the segmentation regions
interp : str,
Form of interpolation to use when blotting float image pixels.
Valid options: {'nearest', 'linear', 'poly3', 'poly5' (default), 'spline3', 'sinc'}
Returns
-------
blotted : `np.ndarray`
Blotted array with the same shape and WCS as `self.data['SCI']`.
"""
import astropy.wcs
from drizzlepac import astrodrizzle
#ref = pyfits.open(refimage)
if hdu.data.dtype.type != np.float32:
#hdu.data = np.cast[np.float32](hdu.data)
refdata = np.cast[np.float32](hdu.data)
else:
refdata = hdu.data
if 'ORIENTAT' in hdu.header.keys():
hdu.header.remove('ORIENTAT')
if segmentation:
seg_ones = np.cast[np.float32](refdata > 0)-1
ref_wcs = pywcs.WCS(hdu.header, relax=True)
flt_wcs = self.wcs.copy()
### Fix some wcs attributes that might not be set correctly
for wcs in [ref_wcs, flt_wcs]:
if (not hasattr(wcs.wcs, 'cd')) & hasattr(wcs.wcs, 'pc'):
wcs.wcs.cd = wcs.wcs.pc
if hasattr(wcs, 'idcscale'):
if wcs.idcscale is None:
wcs.idcscale = np.mean(np.sqrt(np.sum(wcs.wcs.cd**2, axis=0))*3600.) #np.sqrt(np.sum(wcs.wcs.cd[0,:]**2))*3600.
else:
#wcs.idcscale = np.sqrt(np.sum(wcs.wcs.cd[0,:]**2))*3600.
wcs.idcscale = np.mean(np.sqrt(np.sum(wcs.wcs.cd**2, axis=0))*3600.) #np.sqrt(np.sum(wcs.wcs.cd[0,:]**2))*3600.
# wcs.pscale = np.sqrt(wcs.wcs.cd[0,0]**2 +
# wcs.wcs.cd[1,0]**2)*3600.
#
wcs.pscale = utils.get_wcs_pscale(wcs)
if segmentation:
### Handle segmentation images a bit differently to preserve
### integers.
### +1 here is a hack for some memory issues
seg_interp = 'nearest'
blotted_ones = astrodrizzle.ablot.do_blot(seg_ones+1, ref_wcs,
flt_wcs, 1, coeffs=True,
interp=seg_interp,
sinscl=1.0, stepsize=10, wcsmap=None)
blotted_seg = astrodrizzle.ablot.do_blot(refdata*1., ref_wcs,
flt_wcs, 1, coeffs=True,
interp=seg_interp,
sinscl=1.0, stepsize=10, wcsmap=None)
blotted_ones[blotted_ones == 0] = 1
#pixel_ratio = (flt_wcs.idcscale / ref_wcs.idcscale)**2
#in_seg = np.abs(blotted_ones - pixel_ratio) < 1.e-2
ratio = np.round(blotted_seg/blotted_ones)
seg = nd.maximum_filter(ratio, size=grow, mode='constant', cval=0)
ratio[ratio == 0] = seg[ratio == 0]
blotted = ratio
else:
### Floating point data
blotted = astrodrizzle.ablot.do_blot(refdata, ref_wcs, flt_wcs, 1,
coeffs=True, interp=interp, sinscl=1.0,
stepsize=10, wcsmap=None)
return blotted
@staticmethod
def get_slice_wcs(wcs, slx=slice(480,520), sly=slice(480,520)):
"""Get slice of a WCS including higher orders like SIP and DET2IM
The normal `~astropy.wcs.wcs.WCS` `slice` method doesn't apply the
slice to all of the necessary keywords. For example, SIP WCS also
has a `CRPIX` reference pixel that needs to be offset along with
the main `CRPIX`.
Parameters
----------
slx, sly : slice
Slices in x and y dimensions to extract
"""
NX = slx.stop - slx.start
NY = sly.stop - sly.start
slice_wcs = wcs.slice((sly, slx))
slice_wcs.naxis1 = slice_wcs._naxis1 = NX
slice_wcs.naxis2 = slice_wcs._naxis2 = NY
if hasattr(slice_wcs, 'sip'):
if slice_wcs.sip is not None:
for c in [0,1]:
slice_wcs.sip.crpix[c] = slice_wcs.wcs.crpix[c]
ACS_CRPIX = [4096/2,2048/2] # ACS
dx_crpix = slice_wcs.wcs.crpix[0] - ACS_CRPIX[0]
dy_crpix = slice_wcs.wcs.crpix[1] - ACS_CRPIX[1]
for ext in ['cpdis1','cpdis2','det2im1','det2im2']:
if hasattr(slice_wcs, ext):
wcs_ext = slice_wcs.__getattribute__(ext)
if wcs_ext is not None:
wcs_ext.crval[0] += dx_crpix
wcs_ext.crval[1] += dy_crpix
slice_wcs.__setattr__(ext, wcs_ext)
return slice_wcs
def get_slice(self, slx=slice(480,520), sly=slice(480,520),
get_slice_header=True):
"""Return cutout version of the `ImageData` object
Parameters
----------
slx, sly : slice
Slices in x and y dimensions to extract
get_slice_header : bool
Compute the full header of the slice. This takes a bit of time
and isn't necessary in all cases so can be omitted if only the
sliced data are of interest and the header isn't needed.
Returns
-------
slice_obj : `ImageData`
New `ImageData` object of the sliced subregion
"""
origin = [sly.start, slx.start]
NX = slx.stop - slx.start
NY = sly.stop - sly.start
### Test dimensions
if (origin[0] < 0) | (origin[0]+NY > self.sh[0]):
raise ValueError ('Out of range in y')
if (origin[1] < 0) | (origin[1]+NX > self.sh[1]):
raise ValueError ('Out of range in x')
### Sliced subimage
# sly = slice(origin[0], origin[0]+N)
# slx = slice(origin[1], origin[1]+N)
slice_origin = [self.origin[i] + origin[i] for i in range(2)]
slice_wcs = self.get_slice_wcs(self.wcs, slx=slx, sly=sly)
# slice_wcs = self.wcs.slice((sly, slx))
#slice_wcs.naxis1 = slice_wcs._naxis1 = NX
#slice_wcs.naxis2 = slice_wcs._naxis2 = NY
### Getting the full header can be slow as there appears to
### be substantial overhead with header.copy() and wcs.to_header()
if get_slice_header:
slice_header = self.header.copy()
slice_header['NAXIS1'] = NX
slice_header['NAXIS2'] = NY
### Sliced WCS keywords
hwcs = slice_wcs.to_header(relax=True)
for k in hwcs:
if not k.startswith('PC'):
slice_header[k] = hwcs[k]
else:
cd = k.replace('PC','CD')
slice_header[cd] = hwcs[k]
else:
slice_header = pyfits.Header()
### Generate new object
slice_obj = ImageData(sci=self.data['SCI'][sly, slx]/self.photflam,
err=self.data['ERR'][sly, slx]/self.photflam,
dq=self.data['DQ'][sly, slx]*1,
header=slice_header, wcs=slice_wcs,
photflam=self.photflam, photplam=self.photplam,
origin=slice_origin, instrument=self.instrument,
filter=self.filter, pupil=self.pupil)
slice_obj.ref_photflam = self.ref_photflam
slice_obj.ref_photplam = self.ref_photplam
slice_obj.ref_filter = self.ref_filter
slice_obj.mdrizsky = self.mdrizsky
slice_obj.exptime = self.exptime
slice_obj.ABZP = self.ABZP
slice_obj.thumb_extension = self.thumb_extension
if self.data['REF'] is not None:
slice_obj.data['REF'] = self.data['REF'][sly, slx]*1
else:
slice_obj.data['REF'] = None
slice_obj.grow = self.grow
slice_obj.pad = self.pad
slice_obj.parent_file = self.parent_file
slice_obj.ref_file = self.ref_file
slice_obj.sci_extn = self.sci_extn
slice_obj.is_slice = True
# if hasattr(slice_obj.wcs, 'sip'):
# if slice_obj.wcs.sip is not None:
# for c in [0,1]:
# slice_obj.wcs.sip.crpix[c] = slice_obj.wcs.wcs.crpix[c]
#
# ACS_CRPIX = [4096/2,2048/2] # ACS
# dx_crpix = slice_obj.wcs.wcs.crpix[0] - ACS_CRPIX[0]
# dy_crpix = slice_obj.wcs.wcs.crpix[1] - ACS_CRPIX[1]
# for ext in ['cpdis1','cpdis2','det2im1','det2im2']:
# if hasattr(slice_obj.wcs, ext):
# wcs_ext = slice_obj.wcs.__getattribute__(ext)
# if wcs_ext is not None:
# wcs_ext.crval[0] += dx_crpix
# wcs_ext.crval[1] += dy_crpix
# slice_obj.wcs.__setattr__(ext, wcs_ext)
return slice_obj#, slx, sly
def get_HDUList(self, extver=1):
"""Convert attributes and data arrays to a `~astropy.io.fits.HDUList`
Parameters
----------
extver : int, float, str
value to use for the 'EXTVER' header keyword. For example, with
extver=1, the science extension can be addressed with the index
`HDU['SCI',1]`.
returns : `~astropy.io.fits.HDUList`
HDUList with header keywords copied from `self.header` along with
keywords for additional attributes. Will have `ImageHDU`
extensions 'SCI', 'ERR', and 'DQ', as well as 'REF' if a reference
file had been supplied.
"""
h = self.header.copy()
h['EXTVER'] = extver #self.filter #extver
h['FILTER'] = self.filter, 'element selected from filter wheel'
h['INSTRUME'] = (self.instrument,
'identifier for instrument used to acquire data')
h['PHOTFLAM'] = (self.photflam,
'inverse sensitivity, ergs/cm2/Ang/electron')
h['PHOTPLAM'] = self.photplam, 'Pivot wavelength (Angstroms)'
h['PARENT'] = self.parent_file, 'Parent filename'
h['SCI_EXTN'] = self.sci_extn, 'EXTNAME of the science data'
h['ISCUTOUT'] = self.is_slice, 'Arrays are sliced from larger image'
h['ORIGINX'] = self.origin[1], 'Origin from parent image, x'
h['ORIGINY'] = self.origin[0], 'Origin from parent image, y'
h['PAD'] = (self.pad, 'Image padding used')
hdu = []
exptime_corr = 1.
if 'BUNIT' in self.header:
if self.header['BUNIT'] == 'ELECTRONS':
exptime_corr = self.exptime
# Put back into original units
sci_data = self['SCI']*exptime_corr + self.mdrizsky
err_data = self['ERR']*exptime_corr
hdu.append(pyfits.ImageHDU(data=sci_data, header=h,
name='SCI'))
hdu.append(pyfits.ImageHDU(data=err_data, header=h,
name='ERR'))
hdu.append(pyfits.ImageHDU(data=self.data['DQ'], header=h, name='DQ'))
if self.data['REF'] is not None:
h['PHOTFLAM'] = self.ref_photflam
h['PHOTPLAM'] = self.ref_photplam
h['FILTER'] = self.ref_filter
h['REF_FILE'] = self.ref_file
hdu.append(pyfits.ImageHDU(data=self.data['REF'], header=h,
name='REF'))
hdul = pyfits.HDUList(hdu)
return hdul
def __getitem__(self, ext):
if self.data[ext] is None:
return None
if ext == 'REF':
return self.data['REF']/self.ref_photflam
elif ext == 'DQ':
return self.data['DQ']
else:
return self.data[ext]/self.photflam
class GrismFLT(object):
"""Scripts for modeling of individual grism FLT images"""
def __init__(self, grism_file='', sci_extn=1, direct_file='',
pad=200, ref_file=None, ref_ext=0, seg_file=None,
shrink_segimage=True, force_grism='G141', verbose=True):
"""Read FLT files and, optionally, reference/segmentation images.
Parameters
----------
grism_file : str
Grism image (optional).
Empty string or filename of a FITS file that must contain
extensions ('SCI', `sci_extn`), ('ERR', `sci_extn`), and
('DQ', `sci_extn`). For example, a WFC3/IR "FLT" FITS file.
sci_extn : int
EXTNAME of the file to consider. For WFC3/IR this can only be
1. For ACS and WFC3/UVIS, this can be 1 or 2 to specify the two
chips.
direct_file : str
Direct image (optional).
Empty string or filename of a FITS file that must contain
extensions ('SCI', `sci_extn`), ('ERR', `sci_extn`), and
('DQ', `sci_extn`). For example, a WFC3/IR "FLT" FITS file.
pad : int
Padding to add around the periphery of the images to allow
modeling of dispersed spectra for objects that could otherwise
fall off of the direct image itself. Modeling them requires an
external reference image (`ref_file`) that covers an area larger
than the individual direct image itself (e.g., a mosaic of a
survey field).
For WFC3/IR spectra, the first order spectra reach 248 and 195
pixels for G102 and G141, respectively, and `pad` could be set
accordingly if the reference image is large enough.
ref_file : str or `~astropy.io.fits.ImageHDU`/`~astropy.io.fits.PrimaryHDU`
Image mosaic to use as the reference image in place of the direct
image itself. For example, this could be the deeper image
drizzled from all direct images taken within a single visit or it
could be a much deeper/wider image taken separately in perhaps
even a different filter.
.. note::
Assumes that the WCS are aligned between `grism_file`,
`direct_file` and `ref_file`!
ref_ext : int
FITS extension to use if `ref_file` is a filename string.
seg_file : str or `~astropy.io.fits.ImageHDU`/`~astropy.io.fits.PrimaryHDU`
Segmentation image mosaic to associate pixels with discrete
objects. This would typically be generated from a rectified
image like `ref_file`, though here it is not required that
`ref_file` and `seg_file` have the same image dimensions but
rather just that the WCS are aligned between them.
shrink_segimage : bool
Try to make a smaller cutout of the reference images to speed
up blotting and array copying. This is most helpful for very
large input mosaics.
force_grism : str
Use this grism in "simulation mode" where only `direct_file` is
specified.
verbose : bool
Print status messages to the terminal.
Attributes
----------
grism, direct : `ImageData`
Grism and direct image data and parameters
conf : `~grizli.grismconf.aXeConf`
Grism configuration object.
seg : array-like
Segmentation image array.
model : array-like
Model of the grism exposure with the same dimensions as the
full detector array.
object_dispersers : dict
Container for storing information about what objects have been
added to the model of the grism exposure
catalog : `~astropy.table.Table`
Associated photometric catalog. Not required.
"""
import stwcs.wcsutil
### Read files
self.grism_file = grism_file
if os.path.exists(grism_file):
grism_im = pyfits.open(grism_file)
if grism_im[0].header['INSTRUME'] == 'ACS':
wcs = stwcs.wcsutil.HSTWCS(grism_im, ext=('SCI',sci_extn))
else:
wcs = None
self.grism = ImageData(hdulist=grism_im, sci_extn=sci_extn,
wcs=wcs)
else:
if (grism_file is None) | (grism_file == ''):
self.grism = None
else:
print('\nFile not found: {0}!\n'.format(grism_file))
raise IOError
self.direct_file = direct_file
if os.path.exists(direct_file):
direct_im = pyfits.open(direct_file)
if direct_im[0].header['INSTRUME'] == 'ACS':
wcs = stwcs.wcsutil.HSTWCS(direct_im, ext=('SCI',sci_extn))
else:
wcs = None
self.direct = ImageData(hdulist=direct_im, sci_extn=sci_extn,
wcs=wcs)
else:
if (direct_file is None) | (direct_file == ''):
self.direct = None
else:
print('\nFile not found: {0}!\n'.format(direct_file))
raise IOError
# ### Simulation mode, no grism exposure
if self.grism is not None:
self.pad = self.grism.pad
else:
self.pad = pad
if (self.grism is None) & (self.direct is not None):
self.grism = ImageData(hdulist=direct_im, sci_extn=sci_extn)
self.grism_file = self.direct_file
self.grism.filter = force_grism
### Grism exposure only, assumes will get reference from ref_file
if (self.direct is None) & (self.grism is not None):
self.direct = ImageData(hdulist=grism_im, sci_extn=sci_extn)
self.direct_file = self.grism_file
### Add padding
if self.direct is not None:
if pad > 0:
self.direct.add_padding(pad)
self.direct.unset_dq()
nbad = self.direct.flag_negative(sigma=-3)
self.direct.data['SCI'] *= (self.direct.data['DQ'] == 0)
if self.grism is not None:
if pad > 0:
self.grism.add_padding(pad)
self.pad = self.grism.pad
self.grism.unset_dq()
nbad = self.grism.flag_negative(sigma=-3)
self.grism.data['SCI'] *= (self.grism.data['DQ'] == 0)
### Load data from saved model files, if available
# if os.path.exists('%s_model.fits' %(self.grism_file)):
# pass
### Holder for the full grism model array
self.model = np.zeros_like(self.direct.data['SCI'])
### Grism configuration
if 'DFILTER' in self.grism.header:
direct_filter = self.grism.header['DFILTER']
elif self.grism.instrument in ['NIRCAM','NIRISS']:
direct_filter = self.grism.pupil
else:
direct_filter = self.direct.filter
self.conf_file = grismconf.get_config_filename(self.grism.instrument,
direct_filter,
self.grism.filter,
self.grism.ccdchip)
self.conf = grismconf.load_grism_config(self.conf_file)
self.object_dispersers = OrderedDict()
### Blot reference image
self.process_ref_file(ref_file, ref_ext=ref_ext,
shrink_segimage=shrink_segimage,
verbose=verbose)
### Blot segmentation image
self.process_seg_file(seg_file, shrink_segimage=shrink_segimage,
verbose=verbose)
## End things
self.get_dispersion_PA()
self.catalog = None
self.catalog_file = None
self.is_rotated = False
self.has_edge_mask = False
def process_ref_file(self, ref_file, ref_ext=0, shrink_segimage=True,
verbose=True):
"""Read and blot a reference image
Parameters
----------
ref_file : str or `~astropy.fits.io.ImageHDU` / `~astropy.fits.io.PrimaryHDU`
Filename or `astropy.io.fits` Image HDU of the reference image.
shrink_segimage : bool
Try to make a smaller cutout of the reference image to speed
up blotting and array copying. This is most helpful for very
large input mosaics.
verbose : bool
Print some status information to the terminal
Returns
-------
status : bool
False if `ref_file` is None. True if completes successfully.
The blotted reference image is stored in the array attribute
`self.direct.data['REF']`.
The `ref_filter` attribute is determined from the image header and the
`ref_photflam` scaling is taken either from the header if possible, or
the global `photflam` variable defined at the top of this file.
"""
if ref_file is None:
return False
if (isinstance(ref_file, pyfits.ImageHDU) |
isinstance(ref_file, pyfits.PrimaryHDU)):
self.ref_file = ref_file.fileinfo()['file'].name
ref_str = ''
ref_hdu = ref_file
refh = ref_hdu.header
else:
self.ref_file = ref_file
ref_str = '{0}[0]'.format(self.ref_file)
ref_hdu = pyfits.open(ref_file)[ref_ext]
refh = ref_hdu.header
if shrink_segimage:
ref_hdu = self.direct.shrink_large_hdu(ref_hdu, extra=self.pad,
verbose=True)
if verbose:
print('{0} / blot reference {1}'.format(self.direct_file, ref_str))
blotted_ref = self.grism.blot_from_hdu(hdu=ref_hdu,
segmentation=False, interp='poly5')
header_values = {}
self.direct.ref_filter = utils.get_hst_filter(refh)
self.direct.ref_file = ref_str
key_list = {'PHOTFLAM':photflam_list, 'PHOTPLAM':photplam_list}
for key in ['PHOTFLAM', 'PHOTPLAM']:
if key in refh:
try:
header_values[key] = ref_hdu.header[key]*1.
except TypeError:
print('Problem processing header keyword {0}: ** {1} **'.format(key, ref_hdu.header[key]))
raise TypeError
else:
filt = self.direct.ref_filter
if filt in key_list[key]:
header_values[key] = key_list[key][filt]
else:
print('Filter "{0}" not found in {1} tabulated list'.format(filt, key))
raise IndexError
# Found keywords
self.direct.ref_photflam = header_values['PHOTFLAM']
self.direct.ref_photplam = header_values['PHOTPLAM']
# if 'PHOTFLAM' in refh:
# try:
# self.direct.ref_photflam = ref_hdu.header['PHOTFLAM']*1.
# except TypeError:
# print 'Problem reading header keyword PHOTFLAM: ** %s **' %(ref_hdu.header['PHOTFLAM'])
# raise TypeError
# else:
# key = refh['FILTER'].upper()
# if key in photflam_list:
# self.direct.ref_photflam = photflam_list[key]
# else:
# print 'Filter "%s" not found in `photflam_list`' %(key)
# raise IndexError
#
# if 'PHOTPLAM' in refh:
# try:
# self.direct.ref_photplam = ref_hdu.header['PHOTPLAM']*1.
# except TypeError:
# print 'Problem reading header keyword PHOTPLAM: ** %s **' %(ref_hdu.header['PHOTPLAM'])
# raise TypeError
#
# else:
# key = refh['FILTER'].upper()
# self.direct.ref_photplam = photplam_list[refh['FILTER'].upper()]
## TBD: compute something like a cross-correlation offset
## between blotted reference and the direct image itself
self.direct.data['REF'] = np.cast[np.float32](blotted_ref)
#print self.direct.data['REF'].shape, self.direct.ref_photflam
self.direct.data['REF'] *= self.direct.ref_photflam
# Fill empty pixels in the reference image from the SCI image,
# but don't do it if direct['SCI'] is just a copy from the grism
if not self.direct.filter.startswith('G'):
empty = self.direct.data['REF'] == 0
self.direct.data['REF'][empty] += self.direct['SCI'][empty]
# self.direct.data['ERR'] *= 0.
# self.direct.data['DQ'] *= 0
self.direct.ABZP = (0*np.log10(self.direct.ref_photflam) - 21.10 -
5*np.log10(self.direct.ref_photplam) + 18.6921)
self.direct.thumb_extension = 'REF'
#refh['FILTER'].upper()
return True
def process_seg_file(self, seg_file, shrink_segimage=True, verbose=True):
"""Read and blot a rectified segmentation image
Parameters
----------
seg_file : str or `~astropy.fits.io.ImageHDU` / `~astropy.fits.io.PrimaryHDU`
Filename or `astropy.io.fits` Image HDU of the segmentation image.
shrink_segimage : bool
Try to make a smaller cutout of the segmentation image to speed
up blotting and array copying. This is most helpful for very
large input mosaics.
verbose : bool
Print some status information to the terminal
Returns
-------
The blotted segmentation image is stored in the attribute `GrismFLT.seg`.
"""
if seg_file is not None:
if (isinstance(seg_file, pyfits.ImageHDU) |
isinstance(seg_file, pyfits.PrimaryHDU)):
self.seg_file = ''
seg_str = ''
seg_hdu = seg_file
segh = seg_hdu.header
else:
self.seg_file = seg_file
seg_str = '{0}[0]'.format(self.seg_file)
seg_hdu = pyfits.open(seg_file)[0]
segh = seg_hdu.header
if shrink_segimage:
seg_hdu = self.direct.shrink_large_hdu(seg_hdu,
extra=self.pad,
verbose=True)
### Make sure image big enough
seg_hdu = self.direct.expand_hdu(seg_hdu)
if verbose:
print('{0} / blot segmentation {1}'.format(self.direct_file, seg_str))
blotted_seg = self.grism.blot_from_hdu(hdu=seg_hdu,
segmentation=True, grow=3,
interp='poly5')
self.seg = blotted_seg
else:
self.seg = np.zeros(self.direct.sh, dtype=np.float32)
def get_dispersion_PA(self, decimals=0):
"""Compute exact PA of the dispersion axis, including tilt of the
trace and the FLT WCS
Parameters
----------
decimals : int or None
Number of decimal places to round to, passed to `~numpy.round`.
If None, then don't round.
Returns
-------
dispersion_PA : float
PA (angle East of North) of the dispersion axis.
"""
from astropy.coordinates import Angle
import astropy.units as u
### extra tilt of the 1st order grism spectra
x0 = self.conf.conf['BEAMA']
dy_trace, lam_trace = self.conf.get_beam_trace(x=507, y=507, dx=x0,
beam='A')
extra = np.arctan2(dy_trace[1]-dy_trace[0], x0[1]-x0[0])/np.pi*180
### Distorted WCS
crpix = self.direct.wcs.wcs.crpix
xref = [crpix[0], crpix[0]+1]
yref = [crpix[1], crpix[1]]
r, d = self.direct.wcs.all_pix2world(xref, yref, 1)
pa = Angle((extra +
np.arctan2(np.diff(r)*np.cos(d[0]/180*np.pi),
np.diff(d))[0]/np.pi*180)*u.deg)
dispersion_PA = pa.wrap_at(360*u.deg).value
if decimals is not None:
dispersion_PA = np.round(dispersion_PA, decimals=decimals)
self.dispersion_PA = dispersion_PA
return dispersion_PA
def compute_model_orders(self, id=0, x=None, y=None, size=10, mag=-1,
spectrum_1d=None, is_cgs=False,
compute_size=False, max_size=None, store=True,
in_place=True, add=True, get_beams=None,
psf_params=None,
verbose=True):
"""Compute dispersed spectrum for a given object id
Parameters
----------
id : int
Object ID number to match in the segmentation image
x, y : float
Center of the cutout to extract
size : int
Radius of the cutout to extract. The cutout is equivalent to
>>> xc, yc = int(x), int(y)
>>> thumb = self.direct.data['SCI'][yc-size:yc+size, xc-size:xc+size]
mag : float
Specified object magnitude, which will be compared to the
"MMAG_EXTRACT_[BEAM]" parameters in `self.conf` to decide if the
object is bright enough to compute the higher spectral orders.
Default of -1 means compute all orders listed in `self.conf.beams`
spectrum_1d : None or [`~numpy.array`, `~numpy.array`]
Template 1D spectrum to convolve with the grism disperser. If
None, assumes trivial spectrum flat in f_lambda flux densities.
Otherwise, the template is taken to be
>>> wavelength, flux = spectrum_1d
is_cgs : bool
Flux units of `spectrum_1d[1]` are cgs f_lambda flux densities,
rather than normalized in the detection band.
compute_size : bool
Ignore `x`, `y`, and `size` and compute the extent of the
segmentation polygon directly using
`utils_c.disperse.compute_segmentation_limits`.
max_size : int or None
Enforce a maximum size of the cutout when using `compute_size`.
store : bool
If True, then store the computed beams in the OrderedDict
`self.object_dispersers[id]`.
If many objects are computed, this can be memory intensive. To
save memory, set to False and then the function just stores the
input template spectrum (`spectrum_1d`) and the beams will have
to be recomputed if necessary.
in_place : bool
If True, add the computed spectral orders into `self.model`.
Otherwise, make a clean array with only the orders of the given
object.
Returns
-------
output : bool or `numpy.array`
If `in_place` is True, return status of True if everything goes
OK. The computed spectral orders are stored in place in
`self.model`.
Returns False if the specified `id` is not found in the
segmentation array independent of `in_place`.
If `in_place` is False, return a full array including the model
for the single object.
"""
# debug
# x=None; y=None; size=10; mag=-1; spectrum_1d=None; compute_size=True; store=False; in_place=False; add=True; get_beams=['A']; verbose=True
if id in self.object_dispersers:
object_in_model = True
beams = self.object_dispersers[id]
out = self.object_dispersers[id]
# Handle pre 0.3.0-7 formats
if len(out) == 3:
old_cgs, old_spectrum_1d, beams = out
else:
old_cgs, old_spectrum_1d = out
beams = None
else:
object_in_model = False
beams = None
if self.direct.data['REF'] is None:
ext = 'SCI'
else:
ext = 'REF'
# set up the beams to extract
if get_beams is None:
beam_names = self.conf.beams
else:
beam_names = get_beams
# Did we initialize the PSF model this call?
INIT_PSF_NOW = False
### Do we need to compute the dispersed beams?
if beams is None:
### Use catalog
xcat = ycat = None
if self.catalog is not None:
ix = self.catalog['id'] == id
if ix.sum() == 0:
if verbose:
print('ID {0:d} not found in segmentation image'.format(id))
return False
xcat = self.catalog['x_flt'][ix][0]-1
ycat = self.catalog['y_flt'][ix][0]-1
#print '!!! X, Y: ', xcat, ycat, self.direct.origin, size
# use x, y if defined
if x is not None:
xcat = x
if y is not None:
ycat = y
if (compute_size) | (x is None) | (y is None) | (size is None):
### Get the array indices of the segmentation region
out = disperse.compute_segmentation_limits(self.seg, id,
self.direct.data[ext],
self.direct.sh)
ymin, ymax, y, xmin, xmax, x, area, segm_flux = out
if (area == 0) | ~np.isfinite(x) | ~np.isfinite(y):
if verbose:
print('ID {0:d} not found in segmentation image'.format(id))
return False
### Object won't disperse spectrum onto the grism image
if ((ymax < self.pad-5) |
(ymin > self.direct.sh[0]-self.pad+5) |
(ymin == 0) | (ymax == self.direct.sh[0]) |
(xmin == 0) | (xmax == self.direct.sh[1])):
return True
if compute_size:
try:
size = int(np.ceil(np.max([x-xmin, xmax-x,
y-ymin, ymax-y])))
except ValueError:
return False
size += 4
## Enforce minimum size
size = np.maximum(size, 16)
size = np.maximum(size, 26)
## maximum size
if max_size is not None:
size = np.min([size, max_size])
## Avoid problems at the array edges
size = np.min([size, int(x)-2, int(y)-2])
if (size < 4):
return True
### Thumbnails
#print '!! X, Y: ', x, y, self.direct.origin, size
if xcat is not None:
xc, yc = int(np.round(xcat))+1, int(np.round(ycat))+1
xcenter = -(xcat-(xc-1))
ycenter = -(ycat-(yc-1))
else:
xc, yc = int(np.round(x))+1, int(np.round(y))+1
xcenter = -(x-(xc-1))
ycenter = -(y-(yc-1))
origin = [yc-size + self.direct.origin[0],
xc-size + self.direct.origin[1]]
thumb = self.direct.data[ext][yc-size:yc+size, xc-size:xc+size]
seg_thumb = self.seg[yc-size:yc+size, xc-size:xc+size]
## Test that the id is actually in the thumbnail
test = disperse.compute_segmentation_limits(seg_thumb, id, thumb,
np.array(thumb.shape))
if test[-2] == 0:
if verbose:
print('ID {0:d} not found in segmentation image'.format(id))
return False
# # Get precomputed dispersers
# beams, old_spectrum_1d, old_cgs = None, None, False
# if object_in_model:
# out = self.object_dispersers[id]
#
# # Handle pre 0.3.0-7 formats
# if len(out) == 3:
# old_cgs, old_spectrum_1d, old_beams = out
# else:
# old_cgs, old_spectrum_1d = out
# old_beams = None
#
# # Pull out just the requested beams
# if old_beams is not None:
# beams = OrderedDict()
# for b in beam_names:
# beams[b] = old_beams[b]
#
#if beams is None:
### Compute spectral orders ("beams")
beams = OrderedDict()
for b in beam_names:
### Only compute order if bright enough
if mag > self.conf.conf['MMAG_EXTRACT_{0}'.format(b)]:
continue
try:
beam = GrismDisperser(id=id, direct=thumb, segmentation=seg_thumb, xcenter=xcenter, ycenter=ycenter, origin=origin, pad=self.pad, grow=self.grism.grow, beam=b, conf=self.conf, fwcpos=self.grism.fwcpos, MW_EBV=self.grism.MW_EBV)
except:
continue
# Set PSF model if necessary
if psf_params is not None:
store = True
INIT_PSF_NOW = True
#print('xxx Init PSF', b)
if self.direct.ref_filter is None:
psf_filter = self.direct.filter
else:
psf_filter = self.direct.ref_filter
beam.x_init_epsf(flat_sensitivity=False, psf_params=psf_params, psf_filter=psf_filter, yoff=0.)
beams[b] = beam
# Compute old model
if object_in_model:
for b in beams:
beam = beams[b]
if hasattr(beam, 'psf') & (not INIT_PSF_NOW):
store = True
#print('xxx OLD PSF')
beam.compute_model_psf(spectrum_1d=old_spectrum_1d,
is_cgs=old_cgs)
else:
beam.compute_model(spectrum_1d=old_spectrum_1d,
is_cgs=old_cgs)
if get_beams:
out_beams = OrderedDict()
for b in beam_names:
out_beams[b] = beams[b]
return out_beams
if in_place:
### Update the internal model attribute
output = self.model
if store:
### Save the computed beams
self.object_dispersers[id] = is_cgs, spectrum_1d, beams
else:
### Just save the model spectrum (or empty spectrum)
self.object_dispersers[id] = is_cgs, spectrum_1d, None
else:
### Create a fresh array
output = np.zeros_like(self.model)
# if in_place:
# ### Update the internal model attribute
# output = self.model
# else:
# ### Create a fresh array
# output = np.zeros_like(self.model)
# Set PSF model if necessary
if psf_params is not None:
if self.direct.ref_filter is None:
psf_filter = self.direct.filter
else:
psf_filter = self.direct.ref_filter
### Loop through orders and add to the full model array, in-place or
### a separate image
for b in beams:
beam = beams[b]
### Subtract previously-added model
if object_in_model & in_place:
beam.add_to_full_image(-beam.model, output)
### Update PSF params
# if psf_params is not None:
# skip_init_psf = False
# if hasattr(beam, 'psf_params'):
# skip_init_psf |= np.product(np.isclose(beam.psf_params, psf_params)) > 0
#
# if not skip_init_psf:
# beam.x_init_epsf(flat_sensitivity=False, psf_params=psf_params, psf_filter=psf_filter, yoff=0.06)
### Compute model
if hasattr(beam, 'psf'):
beam.compute_model_psf(spectrum_1d=spectrum_1d, is_cgs=is_cgs)
else:
beam.compute_model(spectrum_1d=spectrum_1d, is_cgs=is_cgs)
### Add in new model
beam.add_to_full_image(beam.model, output)
if in_place:
return True
else:
return beams, output
def compute_full_model(self, ids=None, mags=None, mag_limit=22,
store=True, verbose=False):
"""Compute flat-spectrum model for multiple objects.
Parameters
----------
ids : None, list, or `~numpy.array`
id numbers to compute in the model. If None then take all ids
from unique values in `self.seg`.
mags : None, float, or list / `~numpy.array`
magnitudes corresponding to list if `ids`. If None, then compute
magnitudes based on the flux in segmentation regions and
zeropoints determined from PHOTFLAM and PHOTPLAM.
Returns
-------
Updated model stored in `self.model` attribute.
"""
if ids is None:
ids = np.unique(self.seg)[1:]
### If `mags` array not specified, compute magnitudes within
### segmentation regions.
if mags is None:
if verbose:
print('Compute IDs/mags')
mags = np.zeros(len(ids))
for i, id in enumerate(ids):
out = disperse.compute_segmentation_limits(self.seg, id,
self.direct.data[self.direct.thumb_extension],
self.direct.sh)
ymin, ymax, y, xmin, xmax, x, area, segm_flux = out
mags[i] = self.direct.ABZP - 2.5*np.log10(segm_flux)
ix = mags < mag_limit
ids = ids[ix]
mags = mags[ix]
else:
if np.isscalar(mags):
mags = [mags for i in range(len(ids))]
else:
if len(ids) != len(mags):
raise ValueError ('`ids` and `mags` lists different sizes')
### Now compute the full model
for id_i, mag_i in zip(ids, mags):
if verbose:
print(utils.NO_NEWLINE + 'compute model id={0:d}'.format(id_i))
self.compute_model_orders(id=id_i, compute_size=True, mag=mag_i,
in_place=True, store=store)
def smooth_mask(self, gaussian_width=4, threshold=2.5):
"""Compute a mask where smoothed residuals greater than some value
Perhaps useful for flagging contaminated pixels that aren't in the
model, such as high orders dispersed from objects that fall off of the
direct image, but this hasn't yet been extensively tested.
Parameters
----------
gaussian_width : float
Width of the Gaussian filter used with `~scipy.ndimage.gaussian_filter`.
threshold : float
Threshold, in sigma, above which to flag residuals.
Returns
-------
Nothing, but pixels are masked in `self.grism.data['SCI']`.
"""
import scipy.ndimage as nd
mask = self.grism['SCI'] != 0
resid = (self.grism['SCI'] - self.model)*mask
sm = nd.gaussian_filter(np.abs(resid), gaussian_width)
resid_mask = (np.abs(sm) > threshold*self.grism['ERR'])
self.grism.data['SCI'][resid_mask] = 0
def blot_catalog(self, input_catalog, columns=['id','ra','dec'],
sextractor=False, ds9=None):
"""Compute detector-frame coordinates of sky positions in a catalog.
Parameters
----------
input_catalog : `~astropy.table.Table`
Full catalog with sky coordinates. Can be SExtractor or other.
columns : [str,str,str]
List of columns that specify the object id, R.A. and Decl. For
catalogs created with SExtractor this might be
['NUMBER', 'X_WORLD', 'Y_WORLD'].
Detector coordinates will be computed with
`self.direct.wcs.all_world2pix` with `origin=1`.
ds9 : `~grizli.ds9.DS9`, optional
If provided, load circular regions at the derived detector
coordinates.
Returns
-------
catalog : `~astropy.table.Table`
New catalog with columns 'x_flt' and 'y_flt' of the detector
coordinates. Also will copy the `columns` names to columns with
names 'id','ra', and 'dec' if necessary, e.g., for SExtractor
catalogs.
"""
from astropy.table import Column
if sextractor:
columns = ['NUMBER', 'X_WORLD', 'Y_WORLD']
### Detector coordinates. N.B.: 1 indexed!
xy = self.direct.wcs.all_world2pix(input_catalog[columns[1]],
input_catalog[columns[2]], 1,
tolerance=-4,
quiet=True)
### Objects with positions within the image
sh = self.direct.sh
keep = ((xy[0] > 0) & (xy[0] < sh[1]) &
(xy[1] > (self.pad-5)) & (xy[1] < (sh[0]-self.pad+5)))
catalog = input_catalog[keep]
### Remove columns if they exist
for col in ['x_flt', 'y_flt']:
if col in catalog.colnames:
catalog.remove_column(col)
### Columns with detector coordinates
catalog.add_column(Column(name='x_flt', data=xy[0][keep]))
catalog.add_column(Column(name='y_flt', data=xy[1][keep]))
### Copy standardized column names if necessary
if ('id' not in catalog.colnames):
catalog.add_column(Column(name='id', data=catalog[columns[0]]))
if ('ra' not in catalog.colnames):
catalog.add_column(Column(name='ra', data=catalog[columns[1]]))
if ('dec' not in catalog.colnames):
catalog.add_column(Column(name='dec', data=catalog[columns[2]]))
### Show positions in ds9
if ds9:
for i in range(len(catalog)):
x_flt, y_flt = catalog['x_flt'][i], catalog['y_flt'][i]
reg = 'circle {0:f} {1:f} 5\n'.format(x_flt, y_flt)
ds9.set('regions', reg)
return catalog
def photutils_detection(self, use_seg=False, data_ext='SCI',
detect_thresh=2., grow_seg=5, gauss_fwhm=2.,
verbose=True, save_detection=False, ZP=None):
"""Use photutils to detect objects and make segmentation map
Parameters
----------
detect_thresh : float
Detection threshold, in sigma
grow_seg : int
Number of pixels to grow around the perimeter of detected objects
witha maximum filter
gauss_fwhm : float
FWHM of Gaussian convolution kernel that smoothes the detection
image.
verbose : bool
Print logging information to the terminal
save_detection : bool
Save the detection images and catalogs
ZP : float or None
AB magnitude zeropoint of the science array. If `None` then, try
to compute based on PHOTFLAM and PHOTPLAM values and use zero if
that fails.
Returns
---------
status : bool
True if completed successfully. False if `data_ext=='REF'` but
no reference image found.
Stores an astropy.table.Table object to `self.catalog` and a
segmentation array to `self.seg`.
"""
if ZP is None:
if ((self.direct.filter in photflam_list.keys()) &
(self.direct.filter in photplam_list.keys())):
### ABMAG_ZEROPOINT from
### http://www.stsci.edu/hst/wfc3/phot_zp_lbn
ZP = (-2.5*np.log10(photflam_list[self.direct.filter]) -
21.10 - 5*np.log10(photplam_list[self.direct.filter]) +
18.6921)
else:
ZP = 0.
if use_seg:
seg = self.seg
else:
seg = None
if self.direct.data['ERR'].max() != 0.:
err = self.direct.data['ERR']/self.direct.photflam
else:
err = None
if (data_ext == 'REF'):
if (self.direct.data['REF'] is not None):
err = None
else:
print('No reference data found for `self.direct.data[\'REF\']`')
return False
go_detect = utils.detect_with_photutils
cat, seg = go_detect(self.direct.data[data_ext]/self.direct.photflam,
err=err, dq=self.direct.data['DQ'], seg=seg,
detect_thresh=detect_thresh, npixels=8,
grow_seg=grow_seg, gauss_fwhm=gauss_fwhm,
gsize=3, wcs=self.direct.wcs,
save_detection=save_detection,
root=self.direct_file.split('.fits')[0],
background=None, gain=None, AB_zeropoint=ZP,
clobber=True, verbose=verbose)
self.catalog = cat
self.catalog_file = '<photutils>'
self.seg = seg
return True
def load_photutils_detection(self, seg_file=None, seg_cat=None,
catalog_format='ascii.commented_header'):
"""
Load segmentation image and catalog, either from photutils
or SExtractor.
If SExtractor, use `catalog_format='ascii.sextractor'`.
"""
root = self.direct_file.split('.fits')[0]
if seg_file is None:
seg_file = root + '.detect_seg.fits'
if not os.path.exists(seg_file):
print('Segmentation image {0} not found'.format(segfile))
return False
self.seg = np.cast[np.float32](pyfits.open(seg_file)[0].data)
if seg_cat is None:
seg_cat = root + '.detect.cat'
if not os.path.exists(seg_cat):
print('Segmentation catalog {0} not found'.format(seg_cat))
return False
self.catalog = Table.read(seg_cat, format=catalog_format)
self.catalog_file = seg_cat
def save_model(self, clobber=True, verbose=True):
"""Save model properties to FITS file
"""
try:
import cPickle as pickle
except:
# Python 3
import pickle
root = self.grism_file.split('_flt.fits')[0].split('_rate.fits')[0]
h = pyfits.Header()
h['GFILE'] = (self.grism_file, 'Grism exposure name')
h['GFILTER'] = (self.grism.filter, 'Grism spectral element')
h['INSTRUME'] = (self.grism.instrument, 'Instrument of grism file')
h['PAD'] = (self.pad, 'Image padding used')
h['DFILE'] = (self.direct_file, 'Direct exposure name')
h['DFILTER'] = (self.direct.filter, 'Grism spectral element')
h['REF_FILE'] = (self.ref_file, 'Reference image')
h['SEG_FILE'] = (self.seg_file, 'Segmentation image')
h['CONFFILE'] = (self.conf_file, 'Configuration file')
h['DISP_PA'] = (self.dispersion_PA, 'Dispersion position angle')
h0 = pyfits.PrimaryHDU(header=h)
model = pyfits.ImageHDU(data=self.model, header=self.grism.header,
name='MODEL')
seg = pyfits.ImageHDU(data=self.seg, header=self.grism.header,
name='SEG')
hdu = pyfits.HDUList([h0, model, seg])
if 'REF' in self.direct.data:
ref_header = self.grism.header.copy()
ref_header['FILTER'] = self.direct.ref_filter
ref_header['PARENT'] = self.ref_file
ref_header['PHOTFLAM'] = self.direct.ref_photflam
ref_header['PHOTPLAM'] = self.direct.ref_photplam
ref = pyfits.ImageHDU(data=self.direct['REF'],
header=ref_header, name='REFERENCE')
hdu.append(ref)
hdu.writeto('{0}_model.fits'.format(root), clobber=clobber,
output_verify='fix')
fp = open('{0}_model.pkl'.format(root), 'wb')
pickle.dump(self.object_dispersers, fp)
fp.close()
if verbose:
print('Saved {0}_model.fits and {0}_model.pkl'.format(root))
def save_full_pickle(self, verbose=True):
"""Save entire `GrismFLT` object to a pickle
"""
try:
import cPickle as pickle
except:
# Python 3
import pickle
root = self.grism_file.split('_flt.fits')[0].split('_cmb.fits')[0]
root = root.split('_flc.fits')[0].split('_rate.fits')[0]
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
for key in self.direct.data.keys():
hdu.append(pyfits.ImageHDU(data=self.direct.data[key],
header=self.direct.header,
name='D'+key))
for key in self.grism.data.keys():
hdu.append(pyfits.ImageHDU(data=self.grism.data[key],
header=self.grism.header,
name='G'+key))
hdu.append(pyfits.ImageHDU(data=self.seg,
header=self.grism.header,
name='SEG'))
hdu.append(pyfits.ImageHDU(data=self.model,
header=self.grism.header,
name='MODEL'))
hdu.writeto('{0}.{1:02d}.GrismFLT.fits'.format(root, self.grism.sci_extn), clobber=True, output_verify='fix')
## zero out large data objects
self.direct.data = self.grism.data = self.seg = self.model = None
fp = open('{0}.{1:02d}.GrismFLT.pkl'.format(root, self.grism.sci_extn), 'wb')
pickle.dump(self, fp)
fp.close()
self.save_wcs(overwrite=True, verbose=False)
def save_wcs(self, overwrite=True, verbose=True):
"""TBD
"""
if self.direct.parent_file == self.grism.parent_file:
base_list = [self.grism]
else:
base_list = [self.direct, self.grism]
for base in base_list:
hwcs = base.wcs.to_fits(relax=True)
hwcs[0].header['PAD'] = base.pad
if 'CCDCHIP' in base.header:
ext = {1:2,2:1}[base.header['CCDCHIP']]
else:
ext = base.header['EXTVER']
wcsfile = base.parent_file.replace('.fits', '.{0:02d}.wcs.fits'.format(ext))
try:
hwcs.writeto(wcsfile, clobber=overwrite)
except:
hwcs.writeto(wcsfile, overwrite=overwrite)
if verbose:
print(wcsfile)
def load_from_fits(self, save_file):
"""Load saved data from a FITS file
Parameters
----------
save_file : str
Filename of the saved output
Returns
-------
True if completed successfully
"""
fits = pyfits.open(save_file)
self.seg = fits['SEG'].data*1
self.model = fits['MODEL'].data*1
self.direct.data = OrderedDict()
self.grism.data = OrderedDict()
for ext in range(1,len(fits)):
key = fits[ext].header['EXTNAME'][1:]
if fits[ext].header['EXTNAME'].startswith('D'):
if fits[ext].data is None:
self.direct.data[key] = None
else:
self.direct.data[key] = fits[ext].data*1
elif fits[ext].header['EXTNAME'].startswith('G'):
if fits[ext].data is None:
self.grism.data[key] = None
else:
self.grism.data[key] = fits[ext].data*1
else:
pass
del(fits)
return True
def transform_NIRISS(self, verbose=True):
"""
Rotate data & wcs so that spectra are increasing to +x
"""
if self.grism.instrument not in ['NIRCAM', 'NIRISS']:
return True
if self.grism.instrument == 'NIRISS':
if self.grism.filter == 'GR150C':
rot = 2
else:
rot = -1
elif self.grism.instrument == 'NIRCAM':
# Only module A
if self.grism.pupil == 'GRISMC':
rot = 1
else:
return True
if self.is_rotated:
rot *= -1
self.is_rotated = not self.is_rotated
if verbose:
print('Transform NIRISS: flip={0}'.format(self.is_rotated))
### Compute new CRPIX coordinates
center = np.array(self.grism.sh)/2.+0.5
crpix = self.grism.wcs.wcs.crpix
rad = np.deg2rad(-90*rot)
mat = np.zeros((2,2))
mat[0,:] = np.array([np.cos(rad),-np.sin(rad)])
mat[1,:] = np.array([np.sin(rad),np.cos(rad)])
crpix_new = np.dot(mat, crpix-center)+center
for obj in [self.grism, self.direct]:
obj.header['CRPIX1'] = crpix_new[0]
obj.header['CRPIX2'] = crpix_new[1]
# Get rotated CD
out_wcs = utils.transform_wcs(obj.wcs, translation=[0.,0.], rotation=rad, scale=1.)
new_cd = out_wcs.wcs.cd
for i in range(2):
for j in range(2):
obj.header['CD{0}_{1}'.format(i+1, j+1)] = new_cd[i,j]
# Update wcs
obj.get_wcs()
if obj.wcs.wcs.has_pc():
obj.get_wcs()
# Rotate data
for k in obj.data.keys():
if obj.data[k] is not None:
obj.data[k] = np.rot90(obj.data[k], rot)
# Rotate segmentation image
self.seg = np.rot90(self.seg, rot)
self.model = np.rot90(self.model, rot)
#print('xx Rotate images {0}'.format(rot))
if self.catalog is not None:
#print('xx Rotate catalog {0}'.format(rot))
self.catalog = self.blot_catalog(self.catalog,
sextractor=('X_WORLD' in self.catalog.colnames))
def make_edge_mask(self, scale=3, force=False):
"""Make a mask for the edge of the grism FoV that isn't covered by the direct image
Parameters
----------
scale : float
Scale factor to multiply to the mask before it's applied to the
`self.grism.data['ERR']` array.
force : bool
Force apply the mask even if `self.has_edge_mask` is set
indicating that the function has already been run.
Returns
-------
Nothing, updates `self.grism.data['ERR']` in place.
Sets `self.has_edge_mask = True`.
"""
import scipy.ndimage as nd
if (self.has_edge_mask) & (force == False):
return True
kern = (np.arange(self.conf.conf['BEAMA'][1]) > self.conf.conf['BEAMA'][0])*1.
kern /= kern.sum()
if self.direct['REF'] is not None:
mask = self.direct['REF'] == 0
else:
mask = self.direct['SCI'] == 0
full_mask = nd.convolve(mask*1., kern.reshape((1,-1)),
origin=(0,-kern.size//2+20))
self.grism.data['ERR'] *= np.exp(full_mask*scale)
self.has_edge_mask = True
class BeamCutout(object):
def __init__(self, flt=None, beam=None, conf=None,
get_slice_header=True, fits_file=None, scale=1.,
contam_sn_mask=[10,3], min_mask=0.01, min_sens=0.08):
"""Cutout spectral object from the full frame.
Parameters
----------
flt : `GrismFLT`
Parent FLT frame.
beam : `GrismDisperser`
Object and spectral order to consider
conf : `.grismconf.aXeConf`
Pre-computed configuration file. If not specified will regenerate
based on header parameters, which might be necessary for
multiprocessing parallelization and pickling.
get_slice_header : bool
TBD
fits_file : None or str
Optional FITS file containing the beam information, rather than
reading directly from a `GrismFLT` object with the `flt` and
`beam` paremters. Load with `load_fits`.
contam_sn_mask : TBD
min_mask : float
Minimum factor relative to the maximum pixel value of the flat
f-lambda model where the 2D cutout data are considered good.
min_sens : float
Minimum sensitivity relative to the maximum for a given grism
above which pixels are included in the fit.
Attributes
----------
grism, direct : `ImageData` (sliced)
Cutouts of the grism and direct images.
beam : `GrismDisperser`
High-level tools for computing dispersed models of the object
mask : array-like (bool)
Basic mask where `grism` DQ > 0 | ERR == 0 | SCI == 0.
fit_mask, DoF : array-like, int
Additional mask, DoF is `fit_mask.sum()` representing the
effective degrees of freedom for chi-squared.
ivar : array-like
Inverse variance array, taken from `grism` 1/ERR^2
model, modelf : array-like
2D and flattened versions of the object model array
contam : array-like
Contamination model
scif : array_like
Flattened version of `grism['SCI'] - contam`.
flat_flam : array-like
Flattened version of the flat-flambda object model
poly_order : int
Order of the polynomial model
"""
self.background = 0.
if fits_file is not None:
self.load_fits(fits_file, conf)
else:
self.init_from_input(flt, beam, conf, get_slice_header)
self.beam.scale = scale
self.contam_sn_mask = contam_sn_mask
self.min_mask = min_mask
self.min_sens = min_sens
self._parse_from_data(contam_sn_mask=contam_sn_mask,
min_mask=min_mask, min_sens=min_sens)
def _parse_from_data(self, contam_sn_mask=[10,3], min_mask=0.01,
min_sens=0.08):
"""
See parameter description for `~grizli.model.BeamCutout`.
"""
### bad pixels or problems with uncertainties
self.mask = ((self.grism.data['DQ'] > 0) |
(self.grism.data['ERR'] == 0) |
(self.grism.data['SCI'] == 0))
self.var = self.grism.data['ERR']**2
self.ivar = 1/self.grism.data['ERR']**2
self.ivar[self.mask] = 0
self.thumbs = {}
#self.compute_model = self.beam.compute_model
#self.model = self.beam.model
self.modelf = self.beam.modelf #.flatten()
self.model = self.beam.modelf.reshape(self.beam.sh_beam)
# Attributes
self.size = self.modelf.size
self.wave = self.beam.lam
self.sh = self.beam.sh_beam
### Initialize for fits
self.flat_flam = self.compute_model(in_place=False, is_cgs=True) #/self.beam.total_flux
### OK data where the 2D model has non-zero flux
self.fit_mask = (~self.mask.flatten()) & (self.ivar.flatten() != 0)
self.fit_mask &= (self.flat_flam > min_mask*self.flat_flam.max())
#self.fit_mask &= (self.flat_flam > 3*self.contam.flatten())
### Apply minimum sensitivity mask
self.sens_mask = 1.
if min_sens > 0:
flux_min_sens = (self.beam.sensitivity < min_sens*self.beam.sensitivity.max())*1.
if flux_min_sens.sum() > 0:
flat_sens = self.compute_model(in_place=False, is_cgs=True,
spectrum_1d=[self.beam.lam, flux_min_sens])
# self.sens_mask = flat_sens == 0
# Make mask along columns
is_masked = (flat_sens.reshape(self.sh) > 0).sum(axis=0)
self.sens_mask = (np.dot(np.ones((self.sh[0],1)), is_masked[None,:]) == 0).flatten()
self.fit_mask &= self.sens_mask
### Flat versions of sci/ivar arrays
self.scif = (self.grism.data['SCI'] - self.contam).flatten()
self.ivarf = self.ivar.flatten()
self.wavef = np.dot(np.ones((self.sh[0],1)), self.wave[None,:]).flatten()
### Mask large residuals where throughput is low
resid = np.abs(self.scif - self.flat_flam)*np.sqrt(self.ivarf)
bad_resid = (self.flat_flam < 0.05*self.flat_flam.max()) & (resid > 5)
self.fit_mask *= ~bad_resid
### Mask very contaminated
contam_mask = ((self.contam*np.sqrt(self.ivar) > contam_sn_mask[0]) &
(self.model*np.sqrt(self.ivar) < contam_sn_mask[1]))
#self.fit_mask *= ~contam_mask.flatten()
self.contam_mask = ~nd.maximum_filter(contam_mask, size=5).flatten()
self.poly_order = None
#self.init_poly_coeffs(poly_order=1)
def init_from_input(self, flt, beam, conf=None, get_slice_header=True):
"""Initialize from data objects
Parameters
----------
flt : `GrismFLT`
Parent FLT frame.
beam : `GrismDisperser`
Object and spectral order to consider
conf : `.grismconf.aXeConf`
Pre-computed configuration file. If not specified will regenerate
based on header parameters, which might be necessary for
multiprocessing parallelization and pickling.
get_slice_header : bool
Get full header of the sliced data. Costs some overhead so can
be skipped if full header information isn't required.
Returns
-------
Loads attributes to `self`.
"""
self.id = beam.id
if conf is None:
conf = grismconf.load_grism_config(flt.conf_file)
self.beam = GrismDisperser(id=beam.id, direct=beam.direct*1,
segmentation=beam.seg*1, origin=beam.origin,
pad=beam.pad, grow=beam.grow,
beam=beam.beam, conf=conf, xcenter=beam.xcenter,
ycenter=beam.ycenter, fwcpos=flt.grism.fwcpos,
MW_EBV=flt.grism.MW_EBV)
if hasattr(beam, 'psf_params'):
self.beam.x_init_epsf(psf_params=beam.psf_params, psf_filter=beam.psf_filter, yoff=beam.psf_yoff)
if beam.spectrum_1d is None:
self.compute_model()#spectrum_1d=beam.spectrum_1d)
else:
self.compute_model(spectrum_1d=beam.spectrum_1d,
is_cgs=beam.is_cgs)
slx_thumb = slice(self.beam.origin[1],
self.beam.origin[1]+self.beam.sh[1])
sly_thumb = slice(self.beam.origin[0],
self.beam.origin[0]+self.beam.sh[0])
self.direct = flt.direct.get_slice(slx_thumb, sly_thumb,
get_slice_header=get_slice_header)
self.grism = flt.grism.get_slice(self.beam.slx_parent,
self.beam.sly_parent,
get_slice_header=get_slice_header)
self.contam = flt.model[self.beam.sly_parent, self.beam.slx_parent]*1
if self.beam.id in flt.object_dispersers:
self.contam -= self.beam.model
def load_fits(self, file, conf=None, direct_extn=1, grism_extn=2):
"""Initialize from FITS file
Parameters
----------
file : str
FITS file to read (as output from `write_fits`).
Returns
-------
Loads attributes to `self`.
"""
if isinstance(file, str):
hdu = pyfits.open(file)
else:
hdu = file
self.direct = ImageData(hdulist=hdu, sci_extn=direct_extn)
self.grism = ImageData(hdulist=hdu, sci_extn=grism_extn)
self.contam = hdu['CONTAM'].data*1
try:
self.modelf = hdu['MODEL'].data.flatten()*1
except:
self.modelf = self.grism['SCI'].flatten()*0.
if ('REF',1) in hdu:
direct = hdu['REF', 1].data*1
else:
direct = hdu['SCI', 1].data*1
h0 = hdu[0].header
# if 'DFILTER' in self.grism.header:
# direct_filter = self.grism.header['DFILTER']
# else:
# direct_filter = self.direct.filter
# #
if 'DFILTER' in self.grism.header:
direct_filter = self.grism.header['DFILTER']
elif self.grism.instrument in ['NIRCAM','NIRISS']:
direct_filter = self.grism.pupil
else:
direct_filter = self.direct.filter
if conf is None:
conf_file = grismconf.get_config_filename(self.direct.instrument,
direct_filter,
self.grism.filter,
chip=self.grism.ccdchip)
conf = grismconf.load_grism_config(conf_file)
if 'GROW' in self.grism.header:
grow = self.grism.header['GROW']
else:
grow = 1
if 'MW_EBV' in h0:
self.grism.MW_EBV = h0['MW_EBV']
else:
self.grism.MW_EBV = 0
self.grism.fwcpos = h0['FWCPOS']
if (self.grism.fwcpos == 0) | (self.grism.fwcpos == ''):
self.grism.fwcpos = None
if 'TYOFFSET' in h0:
yoffset = h0['TYOFFSET']
else:
yoffset = 0.
self.beam = GrismDisperser(id=h0['ID'], direct=direct,
segmentation=hdu['SEG'].data*1,
origin=self.direct.origin,
pad=h0['PAD'],
grow=grow, beam=h0['BEAM'],
xcenter=h0['XCENTER'],
ycenter=h0['YCENTER'],
conf=conf, fwcpos=self.grism.fwcpos,
MW_EBV=self.grism.MW_EBV,
yoffset=yoffset)
self.grism.parent_file = h0['GPARENT']
self.direct.parent_file = h0['DPARENT']
self.id = h0['ID']
self.modelf = self.beam.modelf
def write_fits(self, root='beam_', clobber=True, strip=False, get_hdu=False):
"""Write attributes and data to FITS file
Parameters
----------
root : str
Output filename will be
'{root}_{self.id}.{self.grism.filter}.{self.beam}.fits'
with `self.id` zero-padded with 5 digits.
clobber : bool
Overwrite existing file.
strip : bool
Strip out extensions that aren't totally necessary for
regenerating the `ImageData` object. That is, strip out the
direct image `SCI`, `ERR`, and `DQ` extensions if `REF` is
defined. Also strip out `MODEL`.
get_hdu : bool
Return `~astropy.io.fits.HDUList` rather than writing a file.
Returns
-------
hdu : `~astropy.io.fits.HDUList`
If `get_hdu` is True
outfile : str
If `get_hdu` is False, return the output filename.
"""
h0 = pyfits.Header()
h0['ID'] = self.beam.id, 'Object ID'
h0['PAD'] = self.beam.pad, 'Padding of input image'
h0['BEAM'] = self.beam.beam, 'Grism order ("beam")'
h0['XCENTER'] = (self.beam.xcenter,
'Offset of centroid wrt thumb center')
h0['YCENTER'] = (self.beam.ycenter,
'Offset of centroid wrt thumb center')
if hasattr(self.beam, 'yoffset'):
h0['TYOFFSET'] = (self.beam.yoffset,
'Cross dispersion offset of the trace')
h0['GPARENT'] = (self.grism.parent_file,
'Parent grism file')
h0['DPARENT'] = (self.direct.parent_file,
'Parent direct file')
h0['FWCPOS'] = (self.grism.fwcpos,
'Filter wheel position (NIRISS)')
h0['MW_EBV'] = (self.grism.MW_EBV,
'Milky Way exctinction E(B-V)')
hdu = pyfits.HDUList([pyfits.PrimaryHDU(header=h0)])
hdu.extend(self.direct.get_HDUList(extver=1))
hdu.append(pyfits.ImageHDU(data=np.cast[np.int32](self.beam.seg),
header=hdu[-1].header, name='SEG'))
hdu.extend(self.grism.get_HDUList(extver=2))
hdu.append(pyfits.ImageHDU(data=self.contam, header=hdu[-1].header,
name='CONTAM'))
hdu.append(pyfits.ImageHDU(data=self.model, header=hdu[-1].header,
name='MODEL'))
if strip:
# Blotted reference is attached, don't need individual direct
# arrays.
if self.direct['REF'] is not None:
for ext in [('SCI',1), ('ERR',1) , ('DQ',1)]:
if ext in hdu:
ix = hdu.index_of(ext)
p = hdu.pop(ix)
# This can be regenerated
ix = hdu.index_of('MODEL')
p = hdu.pop(ix)
# Put Primary keywords in first extension
SKIP_KEYS = ['EXTEND', 'SIMPLE']
for key in h0:
if key not in SKIP_KEYS:
hdu[1].header[key] = (h0[key], h0.comments[key])
hdu['SCI',2].header[key] = (h0[key], h0.comments[key])
if get_hdu:
return hdu
outfile = '{0}_{1:05d}.{2}.{3}.fits'.format(root, self.beam.id,
self.grism.filter.lower(),
self.beam.beam)
hdu.writeto(outfile, clobber=clobber)
return outfile
def compute_model(self, use_psf=True, **kwargs):
"""Link to `self.beam.compute_model`
`self.beam` is a `GrismDisperser` object.
"""
if use_psf & hasattr(self.beam, 'psf'):
result = self.beam.compute_model_psf(**kwargs)
else:
result = self.beam.compute_model(**kwargs)
reset = True
if 'in_place' in kwargs:
reset = kwargs['in_place']
if reset:
self.modelf = self.beam.modelf #.flatten()
self.model = self.beam.modelf.reshape(self.beam.sh_beam)
return result
def get_wavelength_wcs(self, wavelength=1.3e4):
"""Compute *celestial* WCS of the 2D spectrum array for a specified central wavelength
This essentially recenters the celestial SIP WCS such that the
desired wavelength was at the object position as observed in the
direct image (which has associated geometric distortions etc).
Parameters
----------
wavelength : float
Central wavelength to use for derived WCS.
Returns
-------
header : `~astropy.io.fits.Header`
FITS header
wcs : `~astropy.wcs.WCS`
Derived celestial WCS
"""
wcs = self.grism.wcs.deepcopy()
xarr = np.arange(self.beam.lam_beam.shape[0])
### Trace properties at desired wavelength
dx = np.interp(wavelength, self.beam.lam_beam, xarr)
dy = np.interp(wavelength, self.beam.lam_beam, self.beam.ytrace_beam)
dl = np.interp(wavelength, self.beam.lam_beam[1:],
np.diff(self.beam.lam_beam))
ysens = np.interp(wavelength, self.beam.lam_beam, self.beam.sensitivity_beam)
### Update CRPIX
dc = 0 # python array center to WCS pixel center
for wcs_ext in [wcs.sip, wcs.wcs]:
if wcs_ext is None:
continue
else:
cr = wcs_ext.crpix
cr[0] += dx + self.beam.sh[0]/2 + self.beam.dxfull[0] + dc
cr[1] += dy + dc
for wcs_ext in [wcs.cpdis1, wcs.cpdis2, wcs.det2im1, wcs.det2im2]:
if wcs_ext is None:
continue
else:
cr = wcs_ext.crval
cr[0] += dx + self.beam.sh[0]/2 + self.beam.dxfull[0] + dc
cr[1] += dy + dc
### Make SIP CRPIX match CRPIX
# if wcs.sip is not None:
# for i in [0,1]:
# wcs.sip.crpix[i] = wcs.wcs.crpix[i]
for wcs_ext in [wcs.sip]:
if wcs_ext is not None:
for i in [0,1]:
wcs_ext.crpix[i] = wcs.wcs.crpix[i]
### WCS header
header = wcs.to_header(relax=True)
for key in header:
if key.startswith('PC'):
header.rename_keyword(key, key.replace('PC', 'CD'))
header['LONPOLE'] = 180.
header['RADESYS'] = 'ICRS'
header['LTV1'] = (0.0, 'offset in X to subsection start')
header['LTV2'] = (0.0, 'offset in Y to subsection start')
header['LTM1_1'] = (1.0, 'reciprocal of sampling rate in X')
header['LTM2_2'] = (1.0, 'reciprocal of sampling rate in X')
header['INVSENS'] = (ysens, 'inverse sensitivity, 10**-17 erg/s/cm2')
header['DLDP'] = (dl, 'delta wavelength per pixel')
return header, wcs
def get_2d_wcs(self, data=None):
"""Get simplified WCS of the 2D spectrum
Parameters
----------
data : array-like
Put this data in the output HDU rather than empty zeros
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
Image HDU with header and data properties.
wcs : `~astropy.wcs.WCS`
WCS appropriate for the 2D spectrum with spatial (y) and spectral
(x) axes.
.. note::
Assumes linear dispersion and trace functions!
"""
h = pyfits.Header()
h['CRPIX1'] = self.beam.sh_beam[0]/2 - self.beam.xcenter
h['CRPIX2'] = self.beam.sh_beam[0]/2 - self.beam.ycenter
h['CRVAL1'] = self.beam.lam_beam[0]
h['CD1_1'] = self.beam.lam_beam[1] - self.beam.lam_beam[0]
h['CD1_2'] = 0.
h['CRVAL2'] = -1*self.beam.ytrace_beam[0]
h['CD2_2'] = 1.
h['CD2_1'] = -(self.beam.ytrace_beam[1] - self.beam.ytrace_beam[0])
h['CTYPE1'] = 'WAVE'
h['CTYPE2'] = 'LINEAR'
if data is None:
data = np.zeros(self.beam.sh_beam, dtype=np.float32)
hdu = pyfits.ImageHDU(data=data, header=h)
wcs = pywcs.WCS(hdu.header)
#wcs.pscale = np.sqrt(wcs.wcs.cd[0,0]**2 + wcs.wcs.cd[1,0]**2)*3600.
wcs.pscale = utils.get_wcs_pscale(wcs)
return hdu, wcs
def full_2d_wcs(self, data=None):
"""Get trace WCS of the 2D spectrum
Parameters
----------
data : array-like
Put this data in the output HDU rather than empty zeros
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
Image HDU with header and data properties.
wcs : `~astropy.wcs.WCS`
WCS appropriate for the 2D spectrum with spatial (y) and spectral
(x) axes.
.. note::
Assumes linear dispersion and trace functions!
"""
h = pyfits.Header()
h['CRPIX1'] = self.beam.sh_beam[0]/2 - self.beam.xcenter
h['CRPIX2'] = self.beam.sh_beam[0]/2 - self.beam.ycenter
h['CRVAL1'] = self.beam.lam_beam[0]/1.e4
h['CD1_1'] = (self.beam.lam_beam[1] - self.beam.lam_beam[0])/1.e4
h['CD1_2'] = 0.
h['CRVAL2'] = -1*self.beam.ytrace_beam[0]
h['CD2_2'] = 1.
h['CD2_1'] = -(self.beam.ytrace_beam[1] - self.beam.ytrace_beam[0])
h['CTYPE1'] = 'RA---TAN-SIP'
h['CUNIT1'] = 'mas'
h['CTYPE2'] = 'DEC--TAN-SIP'
h['CUNIT2'] = 'mas'
#wcs_header = grizli.utils.to_header(self.grism.wcs)
x = np.arange(len(self.beam.lam_beam))
c = np.polyfit(x, self.beam.lam_beam/1.e4, 2)
#c = np.polyfit((self.beam.lam_beam-self.beam.lam_beam[0])/1.e4, x/h['CD1_1'], 2)
ct = np.polyfit(x, self.beam.ytrace_beam, 2)
h['A_ORDER'] = 2
h['B_ORDER'] = 2
h['A_0_2'] = 0.
h['A_1_2'] = 0.
h['A_2_2'] = 0.
h['A_2_1'] = 0.
h['A_2_0'] = c[0]#/c[1]
h['CD1_1'] = c[1]
h['B_0_2'] = 0.
h['B_1_2'] = 0.
h['B_2_2'] = 0.
h['B_2_1'] = 0.
if ct[1] != 0:
h['B_2_0'] = ct[0]#/ct[1]
else:
h['B_2_0'] = 0
#h['B_2_0'] = 0
if data is None:
data = np.zeros(self.beam.sh_beam, dtype=np.float32)
hdu = pyfits.ImageHDU(data=data, header=h)
wcs = pywcs.WCS(hdu.header)
# xf = x + h['CRPIX1']-1
# coo = np.array([xf, xf*0])
# tr = wcs.all_pix2world(coo.T, 0)
#wcs.pscale = np.sqrt(wcs.wcs.cd[0,0]**2 + wcs.wcs.cd[1,0]**2)*3600.
wcs.pscale = utils.get_wcs_pscale(wcs)
return hdu, wcs
def get_sky_coords(self):
"""Get WCS coordinates of the center of the direct image
Returns
-------
ra, dec : float
Center coordinates of the beam thumbnail in decimal degrees
"""
pix_center = np.array([self.beam.sh][::-1])/2.
pix_center -= np.array([self.beam.xcenter, self.beam.ycenter])
if self.direct.wcs.sip is not None:
for i in range(2):
self.direct.wcs.sip.crpix[i] = self.direct.wcs.wcs.crpix[i]
ra, dec = self.direct.wcs.all_pix2world(pix_center, 1)[0]
return ra, dec
def get_dispersion_PA(self, decimals=0):
"""Compute exact PA of the dispersion axis, including tilt of the
trace and the FLT WCS
Parameters
----------
decimals : int or None
Number of decimal places to round to, passed to `~numpy.round`.
If None, then don't round.
Returns
-------
dispersion_PA : float
PA (angle East of North) of the dispersion axis.
"""
from astropy.coordinates import Angle
import astropy.units as u
### extra tilt of the 1st order grism spectra
x0 = self.beam.conf.conf['BEAMA']
dy_trace, lam_trace = self.beam.conf.get_beam_trace(x=507, y=507,
dx=x0, beam='A')
extra = np.arctan2(dy_trace[1]-dy_trace[0], x0[1]-x0[0])/np.pi*180
### Distorted WCS
crpix = self.direct.wcs.wcs.crpix
xref = [crpix[0], crpix[0]+1]
yref = [crpix[1], crpix[1]]
r, d = self.direct.wcs.all_pix2world(xref, yref, 1)
pa = Angle((extra +
np.arctan2(np.diff(r)*np.cos(d[0]/180*np.pi),
np.diff(d))[0]/np.pi*180)*u.deg)
dispersion_PA = pa.wrap_at(360*u.deg).value
if decimals is not None:
dispersion_PA = np.round(dispersion_PA, decimals=decimals)
return dispersion_PA
def init_epsf(self, center=None, tol=1.e-3, yoff=0., skip=1., flat_sensitivity=False, psf_params=None, N=4, get_extended=False):
"""Initialize ePSF fitting for point sources
TBD
"""
import scipy.sparse
EPSF = utils.EffectivePSF()
ivar = 1/self.direct['ERR']**2
ivar[~np.isfinite(ivar)] = 0
ivar[self.direct['DQ'] > 0] = 0
ivar[self.beam.seg != self.id] = 0
if ivar.max() == 0:
ivar = ivar+1.
origin = np.array(self.direct.origin) - np.array(self.direct.pad)
if psf_params is None:
self.psf_params = EPSF.fit_ePSF(self.direct['SCI'],
ivar=ivar,
center=center, tol=tol,
N=N, origin=origin,
filter=self.direct.filter,
get_extended=get_extended,
only_centering=False)
else:
self.psf_params = psf_params
self.beam.x_init_epsf(flat_sensitivity=False, psf_params=self.psf_params, psf_filter=self.direct.filter, yoff=yoff, skip=skip, get_extended=get_extended)
self._parse_from_data(contam_sn_mask=self.contam_sn_mask,
min_mask=self.min_mask, min_sens=self.min_sens)
return None
# self.psf = EPSF.get_ePSF(self.psf_params, origin=origin, shape=self.beam.sh, filter=self.direct.filter)
#
# self.psf_resid = self.direct['SCI'] - self.psf
#
# y0, x0 = np.array(self.beam.sh)/2.-1
#
# # Center in detector coords
# xd = self.psf_params[1] + self.direct.origin[1] - self.direct.pad + x0
# yd = self.psf_params[2] + self.direct.origin[0] - self.direct.pad + y0
#
# # Get wavelength array
# psf_xy_lam = []
# for i, filter in enumerate(['F105W', 'F125W', 'F160W']):
# psf_xy_lam.append(EPSF.get_at_position(x=xd, y=yd, filter=filter))
#
# filt_ix = np.arange(3)
# filt_lam = np.array([1.0551, 1.2486, 1.5369])*1.e4
#
# yp_beam, xp_beam = np.indices(self.beam.sh_beam)
# #skip = 1
# xarr = np.arange(0,self.beam.lam_beam.shape[0], skip)
# xarr = xarr[xarr <= self.beam.lam_beam.shape[0]-1]
# xbeam = np.arange(self.beam.lam_beam.shape[0])*1.
#
# #yoff = 0 #-0.15
# psf_model = self.model*0.
# A_psf = []
# lam_psf = []
#
# lam_offset = self.beam.sh[1]/2 - self.psf_params[1] - 1
# self.lam_offset = lam_offset
#
# for xi in xarr:
# yi = np.interp(xi, xbeam, self.beam.ytrace_beam)
# li = np.interp(xi, xbeam, self.beam.lam_beam)
# dx = xp_beam-self.psf_params[1]-xi-x0
# dy = yp_beam-self.psf_params[2]-yi+yoff-y0
#
# # wavelength-dependent
# ii = np.interp(li, filt_lam, filt_ix, left=-1, right=10)
# if ii == -1:
# psf_xy_i = psf_xy_lam[0]*1
# elif ii == 10:
# psf_xy_i = psf_xy_lam[2]*1
# else:
# ni = int(ii)
# f = 1-(li-filt_lam[ni])/(filt_lam[ni+1]-filt_lam[ni])
# psf_xy_i = f*psf_xy_lam[ni] + (1-f)*psf_xy_lam[ni+1]
#
# psf = EPSF.eval_ePSF(psf_xy_i, dx, dy)*self.psf_params[0]
#
# A_psf.append(psf.flatten())
# lam_psf.append(li)
#
# # Sensitivity
# self.lam_psf = np.array(lam_psf)
# if flat_sensitivity:
# s_i_scale = np.abs(np.gradient(self.lam_psf))*self.direct.photflam
# else:
# sens = self.beam.conf.sens[self.beam.beam]
# so = np.argsort(self.lam_psf)
# s_i = interp.interp_conserve_c(self.lam_psf[so], sens['WAVELENGTH'], sens['SENSITIVITY'])*np.gradient(self.lam_psf[so])*self.direct.photflam
# s_i_scale = s_i*0.
# s_i_scale[so] = s_i
#
# self.A_psf = scipy.sparse.csr_matrix(np.array(A_psf).T*s_i_scale)
# def xcompute_model_psf(self, id=None, spectrum_1d=None, in_place=True, is_cgs=True):
# if spectrum_1d is None:
# model = np.array(self.A_psf.sum(axis=1))
# model = model.reshape(self.beam.sh_beam)
# else:
# dx = np.diff(self.lam_psf)[0]
# if dx < 0:
# coeffs = interp.interp_conserve_c(self.lam_psf[::-1],
# spectrum_1d[0],
# spectrum_1d[1])[::-1]
# else:
# coeffs = interp.interp_conserve_c(self.lam_psf,
# spectrum_1d[0],
# spectrum_1d[1])
#
#
# model = self.A_psf.dot(coeffs).reshape(self.beam.sh_beam)
#
# if in_place:
# self.model = model
# self.beam.model = self.model
# return True
# else:
# return model.flatten()
####### Below here will be cut out after verifying that the demos
####### can be run with the new fitting tools
def init_poly_coeffs(self, poly_order=1, fit_background=True):
"""Initialize arrays for polynomial fits to the spectrum
Provides capabilities of fitting n-order polynomials to observed
spectra rather than galaxy/stellar templates.
Parameters
----------
poly_order : int
Order of the polynomial
fit_background : bool
Compute additional arrays for allowing the background to be fit
along with the polynomial coefficients.
Returns
-------
Polynomial parameters stored in attributes `y_poly`, `n_poly`, ...
"""
### Already done?
if poly_order == self.poly_order:
return None
self.poly_order = poly_order
##### Model: (a_0 x**0 + ... a_i x**i)*continuum + line
yp, xp = np.indices(self.beam.sh_beam)
NX = self.beam.sh_beam[1]
self.xpf = (xp.flatten() - NX/2.)
self.xpf /= (NX/2.)
### Polynomial continuum arrays
if fit_background:
self.n_bg = 1
self.A_poly = [self.flat_flam*0+1]
self.A_poly.extend([self.xpf**order*self.flat_flam
for order in range(poly_order+1)])
else:
self.n_bg = 0
self.A_poly = [self.xpf**order*self.flat_flam
for order in range(poly_order+1)]
### Array for generating polynomial "template"
x = (np.arange(NX) - NX/2.)/ (NX/2.)
self.y_poly = np.array([x**order for order in range(poly_order+1)])
self.n_poly = self.y_poly.shape[0]
self.n_simp = self.n_poly + self.n_bg
self.DoF = self.fit_mask.sum()
# def load_templates(self, fwhm=400, line_complexes=True):
# """TBD
#
# ***
# These below will probably be cut since they're all now implemented
# in more detail in multifit.py. Need to update demos before
# taking them out completely.
# ***
#
# """
# # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed1_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed2_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed4_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed5_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed6_nolines.dat',
# # 'templates/cvd12_t11_solar_Chabrier.extend.dat',
# # 'templates/dobos11/bc03_pr_ch_z02_ltau07.0_age09.2_av2.5.dat']
#
# templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat',
# 'templates/cvd12_t11_solar_Chabrier.extend.dat']
#
# temp_list = OrderedDict()
# for temp in templates:
# data = np.loadtxt(GRIZLI_PATH + '/' + temp, unpack=True)
# scl = np.interp(5500., data[0], data[1])
# name = os.path.basename(temp)
# temp_list[name] = utils.SpectrumTemplate(wave=data[0],
# flux=data[1]/scl)
# #plt.plot(temp_list[-1].wave, temp_list[-1].flux, label=temp, alpha=0.5)
#
# line_wavelengths = {} ; line_ratios = {}
# line_wavelengths['Ha'] = [6564.61]; line_ratios['Ha'] = [1.]
# line_wavelengths['Hb'] = [4862.68]; line_ratios['Hb'] = [1.]
# line_wavelengths['Hg'] = [4341.68]; line_ratios['Hg'] = [1.]
# line_wavelengths['Hd'] = [4102.892]; line_ratios['Hd'] = [1.]
# line_wavelengths['OIIIx'] = [4364.436]; line_ratios['OIIIx'] = [1.]
# line_wavelengths['OIII'] = [5008.240, 4960.295]; line_ratios['OIII'] = [2.98, 1]
# line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]; line_ratios['OIII+Hb'] = [2.98, 1, 3.98/8.]
#
# line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]; line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]
#
# line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]
# line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]
#
# line_wavelengths['OII'] = [3729.875]; line_ratios['OII'] = [1]
# line_wavelengths['OI'] = [6302.046]; line_ratios['OI'] = [1]
#
# line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]; line_ratios['Ha+SII'] = [1., 1./10, 1./10]
# line_wavelengths['SII'] = [6718.29, 6732.67]; line_ratios['SII'] = [1., 1.]
#
# if line_complexes:
# #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']
# line_list = ['Ha+SII', 'OIII+Hb', 'OII']
# else:
# line_list = ['Ha', 'SII', 'OIII', 'Hb', 'OII']
# #line_list = ['Ha', 'SII']
#
# for line in line_list:
# scl = line_ratios[line]/np.sum(line_ratios[line])
# for i in range(len(scl)):
# line_i = utils.SpectrumTemplate(wave=line_wavelengths[line][i],
# flux=None, fwhm=fwhm, velocity=True)
#
# if i == 0:
# line_temp = line_i*scl[i]
# else:
# line_temp = line_temp + line_i*scl[i]
#
# temp_list['line {0}'.format(line)] = line_temp
#
# return temp_list
#
# def fit_at_z(self, z=0., templates={}, fitter='lstsq', poly_order=3):
# """TBD
# """
# import copy
#
# import sklearn.linear_model
# import numpy.linalg
#
# self.init_poly_coeffs(poly_order=poly_order)
#
# NTEMP = len(self.A_poly)
# A_list = copy.copy(self.A_poly)
# ok_temp = np.ones(NTEMP+len(templates), dtype=bool)
#
# for i, key in enumerate(templates.keys()):
# NTEMP += 1
# temp = templates[key].zscale(z, 1.)
# spectrum_1d = [temp.wave, temp.flux]
#
# if ((temp.wave[0] > self.beam.lam_beam[-1]) |
# (temp.wave[-1] < self.beam.lam_beam[0])):
#
# A_list.append(self.flat_flam*1)
# ok_temp[NTEMP-1] = False
# #print 'skip TEMP: %d, %s' %(i, key)
# continue
# else:
# pass
# #print 'TEMP: %d' %(i)
#
# temp_model = self.compute_model(spectrum_1d=spectrum_1d,
# in_place=False)
#
# ### Test that model spectrum has non-zero pixel values
# #print 'TEMP: %d, %.3f' %(i, temp_model[self.fit_mask].max()/temp_model.max())
# if temp_model[self.fit_mask].max()/temp_model.max() < 0.2:
# #print 'skipx TEMP: %d, %s' %(i, key)
# ok_temp[NTEMP-1] = False
#
# A_list.append(temp_model)
#
# A = np.vstack(A_list).T
# out_coeffs = np.zeros(NTEMP)
#
# ### LSTSQ coefficients
# if fitter == 'lstsq':
# out = numpy.linalg.lstsq(A[self.fit_mask, :][:, ok_temp],
# self.scif[self.fit_mask])
# lstsq_coeff, residuals, rank, s = out
# coeffs = lstsq_coeff
# else:
# clf = sklearn.linear_model.LinearRegression()
# status = clf.fit(A[self.fit_mask, :][:, ok_temp],
# self.scif[self.fit_mask])
# coeffs = clf.coef_
#
# out_coeffs[ok_temp] = coeffs
# model = np.dot(A, out_coeffs)
# model_2d = model.reshape(self.beam.sh_beam)
#
# chi2 = np.sum(((self.scif - model)**2*self.ivarf)[self.fit_mask])
#
# return A, out_coeffs, chi2, model_2d
#
# def fit_redshift(self, prior=None, poly_order=1, fwhm=500,
# make_figure=True, zr=None, dz=None, verbose=True):
# """TBD
# """
# # if False:
# # reload(grizlidev.utils); utils = grizlidev.utils
# # reload(grizlidev.utils_c); reload(grizlidev.model);
# # reload(grizlidev.grismconf); reload(grizlidev.utils); reload(grizlidev.multifit); reload(grizlidev); reload(grizli)
# #
# # beams = []
# # if id in flt.object_dispersers:
# # b = flt.object_dispersers[id]['A']
# # beam = grizli.model.BeamCutout(flt, b, conf=flt.conf)
# # #print beam.grism.pad, beam.beam.grow
# # beams.append(beam)
# # else:
# # print flt.grism.parent_file, 'ID %d not found' %(id)
# #
# # #plt.imshow(beam.beam.direct*(beam.beam.seg == id), interpolation='Nearest', origin='lower', cmap='viridis_r')
# # self = beam
# #
# # #poly_order = 3
#
# if self.grism.filter == 'G102':
# if zr is None:
# zr = [0.78e4/6563.-1, 1.2e4/5007.-1]
# if dz is None:
# dz = [0.001, 0.0005]
#
# if self.grism.filter == 'G141':
# if zr is None:
# zr = [1.1e4/6563.-1, 1.65e4/5007.-1]
# if dz is None:
# dz = [0.003, 0.0005]
#
# zgrid = utils.log_zgrid(zr, dz=dz[0])
# NZ = len(zgrid)
#
# templates = self.load_templates(fwhm=fwhm)
# NTEMP = len(templates)
#
# out = self.fit_at_z(z=0., templates=templates, fitter='lstsq',
# poly_order=poly_order)
#
# A, coeffs, chi2, model_2d = out
#
# chi2 = np.zeros(NZ)
# coeffs = np.zeros((NZ, coeffs.shape[0]))
#
# for i in range(NZ):
# out = self.fit_at_z(z=zgrid[i], templates=templates,
# fitter='lstsq', poly_order=poly_order)
#
# A, coeffs[i,:], chi2[i], model_2d = out
# if verbose:
# print(utils.NO_NEWLINE + '{0:.4f} {1:9.1f}'.format(zgrid[i], chi2[i]))
#
# # peaks
# import peakutils
# chi2nu = (chi2.min()-chi2)/self.DoF
# indexes = peakutils.indexes((chi2nu+0.01)*(chi2nu > -0.004), thres=0.003, min_dist=20)
# num_peaks = len(indexes)
# # plt.plot(zgrid, (chi2-chi2.min())/ self.DoF)
# # plt.scatter(zgrid[indexes], (chi2-chi2.min())[indexes]/ self.DoF, color='r')
#
#
# ### zoom
# if ((chi2.max()-chi2.min())/self.DoF > 0.01) & (num_peaks < 5):
# threshold = 0.01
# else:
# threshold = 0.001
#
# zgrid_zoom = utils.zoom_zgrid(zgrid, chi2/self.DoF, threshold=threshold, factor=10)
# NZOOM = len(zgrid_zoom)
#
# chi2_zoom = np.zeros(NZOOM)
# coeffs_zoom = np.zeros((NZOOM, coeffs.shape[1]))
#
# for i in range(NZOOM):
# out = self.fit_at_z(z=zgrid_zoom[i], templates=templates,
# fitter='lstsq', poly_order=poly_order)
#
# A, coeffs_zoom[i,:], chi2_zoom[i], model_2d = out
# if verbose:
# print(utils.NO_NEWLINE + '- {0:.4f} {1:9.1f}'.format(zgrid_zoom[i], chi2_zoom[i]))
#
# zgrid = np.append(zgrid, zgrid_zoom)
# chi2 = np.append(chi2, chi2_zoom)
# coeffs = np.append(coeffs, coeffs_zoom, axis=0)
#
# so = np.argsort(zgrid)
# zgrid = zgrid[so]
# chi2 = chi2[so]
# coeffs=coeffs[so,:]
#
# ### Best redshift
# templates = self.load_templates(line_complexes=False, fwhm=fwhm)
# zbest = zgrid[np.argmin(chi2)]
# out = self.fit_at_z(z=zbest, templates=templates,
# fitter='lstsq', poly_order=poly_order)
#
# A, coeffs_full, chi2_best, model_full = out
#
# ## Continuum fit
# mask = np.isfinite(coeffs_full)
# for i, key in enumerate(templates.keys()):
# if key.startswith('line'):
# mask[self.n_simp+i] = False
#
# model_continuum = np.dot(A, coeffs_full*mask)
# model_continuum = model_continuum.reshape(self.beam.sh_beam)
#
# ### 1D spectrum
# model1d = utils.SpectrumTemplate(wave=self.beam.lam,
# flux=np.dot(self.y_poly.T,
# coeffs_full[self.n_bg:self.n_poly+self.n_bg]))
#
# cont1d = model1d*1
#
# line_flux = OrderedDict()
# for i, key in enumerate(templates.keys()):
# temp_i = templates[key].zscale(zbest, coeffs_full[self.n_simp+i])
# model1d += temp_i
# if not key.startswith('line'):
# cont1d += temp_i
# else:
# line_flux[key.split()[1]] = (coeffs_full[self.n_simp+i] * 1.)
# #self.beam.total_flux/1.e-17)
#
#
# fit_data = OrderedDict()
# fit_data['poly_order'] = poly_order
# fit_data['fwhm'] = fwhm
# fit_data['zbest'] = zbest
# fit_data['zgrid'] = zgrid
# fit_data['A'] = A
# fit_data['coeffs'] = coeffs
# fit_data['chi2'] = chi2
# fit_data['model_full'] = model_full
# fit_data['coeffs_full'] = coeffs_full
# fit_data['line_flux'] = line_flux
# #fit_data['templates_full'] = templates
# fit_data['model_cont'] = model_continuum
# fit_data['model1d'] = model1d
# fit_data['cont1d'] = cont1d
#
# fig = None
# if make_figure:
# fig = self.show_redshift_fit(fit_data)
# #fig.savefig('fit.pdf')
#
# return fit_data, fig
def show_redshift_fit(self, fit_data):
"""Make a plot based on results from `simple_line_fit`.
Parameters
----------
fit_data : dict
returned data from `simple_line_fit`. I.e.,
>>> fit_outputs = BeamCutout.simple_line_fit()
>>> fig = BeamCutout.show_simple_fit_results(fit_outputs)
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object that can be optionally written to a hardcopy file.
"""
import matplotlib.gridspec
#zgrid, A, coeffs, chi2, model_best, model_continuum, model1d = fit_outputs
### Full figure
fig = plt.figure(figsize=(12,5))
#fig = plt.Figure(figsize=(8,4))
## 1D plots
gsb = matplotlib.gridspec.GridSpec(3,1)
xspec, yspec, yerr = self.beam.optimal_extract(self.grism.data['SCI']
- self.contam,
ivar = self.ivar)
flat_model = self.flat_flam.reshape(self.beam.sh_beam)
xspecm, yspecm, yerrm = self.beam.optimal_extract(flat_model)
out = self.beam.optimal_extract(fit_data['model_full'])
xspecl, yspecl, yerrl = out
ax = fig.add_subplot(gsb[-2:,:])
ax.errorbar(xspec/1.e4, yspec, yerr, linestyle='None', marker='o',
markersize=3, color='black', alpha=0.5,
label='Data (id={0:d})'.format(self.beam.id))
ax.plot(xspecm/1.e4, yspecm, color='red', linewidth=2, alpha=0.8,
label=r'Flat $f_\lambda$ ({0})'.format(self.direct.filter))
zbest = fit_data['zgrid'][np.argmin(fit_data['chi2'])]
ax.plot(xspecl/1.e4, yspecl, color='orange', linewidth=2, alpha=0.8,
label='Template (z={0:.4f})'.format(zbest))
ax.legend(fontsize=8, loc='lower center', scatterpoints=1)
ax.set_xlabel(r'$\lambda$'); ax.set_ylabel('flux (e-/s)')
if self.grism.filter == 'G102':
xlim = [0.7, 1.25]
if self.grism.filter == 'G141':
xlim = [1., 1.8]
xt = np.arange(xlim[0],xlim[1],0.1)
ax.set_xlim(xlim[0], xlim[1])
ax.set_xticks(xt)
ax = fig.add_subplot(gsb[-3,:])
ax.plot(fit_data['zgrid'], fit_data['chi2']/self.DoF)
for d in [1,4,9]:
ax.plot(fit_data['zgrid'],
fit_data['chi2']*0+(fit_data['chi2'].min()+d)/self.DoF,
color='{0:.1f}'.format(d/20.))
#ax.set_xticklabels([])
ax.set_ylabel(r'$\chi^2/(\nu={0:d})$'.format(self.DoF))
ax.set_xlabel('z')
ax.set_xlim(fit_data['zgrid'][0], fit_data['zgrid'][-1])
# axt = ax.twiny()
# axt.set_xlim(np.array(ax.get_xlim())*1.e4/6563.-1)
# axt.set_xlabel(r'$z_\mathrm{H\alpha}$')
## 2D spectra
gst = matplotlib.gridspec.GridSpec(4,1)
if 'viridis_r' in plt.colormaps():
cmap = 'viridis_r'
else:
cmap = 'cubehelix_r'
ax = fig.add_subplot(gst[0,:])
ax.imshow(self.grism.data['SCI'], vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Observed')
ax = fig.add_subplot(gst[1,:])
mask2d = self.fit_mask.reshape(self.beam.sh_beam)
ax.imshow((self.grism.data['SCI'] - self.contam)*mask2d,
vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Masked')
ax = fig.add_subplot(gst[2,:])
ax.imshow(fit_data['model_full']+self.contam, vmin=-0.05, vmax=0.2,
cmap=cmap, interpolation='Nearest', origin='lower',
aspect='auto')
ax.set_ylabel('Model')
ax = fig.add_subplot(gst[3,:])
ax.imshow(self.grism.data['SCI']-fit_data['model_full']-self.contam,
vmin=-0.05, vmax=0.2, cmap=cmap, interpolation='Nearest',
origin='lower', aspect='auto')
ax.set_ylabel('Resid.')
for ax in fig.axes[-4:]:
self.beam.twod_axis_labels(wscale=1.e4,
limits=[xlim[0], xlim[1], 0.1],
mpl_axis=ax)
self.beam.twod_xlim(xlim, wscale=1.e4, mpl_axis=ax)
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$')
for ax in fig.axes[-4:-1]:
ax.set_xticklabels([])
gsb.tight_layout(fig, pad=0.1,h_pad=0.01, rect=(0,0,0.5,1))
gst.tight_layout(fig, pad=0.1,h_pad=0.01, rect=(0.5,0.01,1,0.98))
return fig
def simple_line_fit(self, fwhm=48., grid=[1.12e4, 1.65e4, 1, 4],
fitter='lstsq', poly_order=3):
"""Function to fit a Gaussian emission line and a polynomial continuum
Parameters
----------
fwhm : float
FWHM of the emission line
grid : list `[l0, l1, dl, skip]`
The base wavelength array will be generated like
>>> wave = np.arange(l0, l1, dl)
and lines will be generated every `skip` wavelength grid points:
>>> line_centers = wave[::skip]
fitter : str, 'lstsq' or 'sklearn'
Least-squares fitting function for determining template
normalization coefficients.
order : int (>= 0)
Polynomial order to use for the continuum
Returns
-------
line_centers : length N `~numpy.array`
emission line center positions
coeffs : (N, M) `~numpy.ndarray` where `M = (poly_order+1+1)`
Normalization coefficients for the continuum and emission line
templates.
chi2 : `~numpy.array`
Chi-squared evaluated at each line_centers[i]
ok_data : `~numpy.ndarray`
Boolean mask of pixels used for the Chi-squared calculation.
Consists of non-masked DQ pixels, non-zero ERR pixels and pixels
where `self.model > 0.03*self.model.max()` for the flat-spectrum
model.
best_model : `~numpy.ndarray`
2D array with best-fit continuum + line model
best_model_cont : `~numpy.ndarray`
2D array with Best-fit continuum-only model.
best_line_center : float
wavelength where chi2 is minimized.
best_line_flux : float
Emission line flux where chi2 is minimized
"""
### Test fit
import sklearn.linear_model
import numpy.linalg
clf = sklearn.linear_model.LinearRegression()
### Continuum
self.compute_model()
self.model = self.modelf.reshape(self.beam.sh_beam)
### OK data where the 2D model has non-zero flux
ok_data = (~self.mask.flatten()) & (self.ivar.flatten() != 0)
ok_data &= (self.modelf > 0.03*self.modelf.max())
### Flat versions of sci/ivar arrays
scif = (self.grism.data['SCI'] - self.contam).flatten()
ivarf = self.ivar.flatten()
##### Model: (a_0 x**0 + ... a_i x**i)*continuum + line
yp, xp = np.indices(self.beam.sh_beam)
xpf = (xp.flatten() - self.beam.sh_beam[1]/2.)
xpf /= (self.beam.sh_beam[1]/2)
### Polynomial continuum arrays
A_list = [xpf**order*self.modelf for order in range(poly_order+1)]
# Extra element for the computed line model
A_list.append(self.modelf*1)
A = np.vstack(A_list).T
### Normalized Gaussians on a grid
waves = np.arange(grid[0], grid[1], grid[2])
line_centers = waves[grid[3] // 2::grid[3]]
rms = fwhm/2.35
gaussian_lines = np.exp(-(line_centers[:,None]-waves)**2/2/rms**2)
gaussian_lines /= np.sqrt(2*np.pi*rms**2)
N = len(line_centers)
coeffs = np.zeros((N, A.shape[1]))
chi2 = np.zeros(N)
chi2min = 1e30
### Loop through line models and fit for template coefficients
### Compute chi-squared.
for i in range(N):
self.compute_model(spectrum_1d=[waves, gaussian_lines[i,:]])
A[:,-1] = self.model.flatten()
if fitter == 'lstsq':
out = numpy.linalg.lstsq(A[ok_data,:], scif[ok_data])
lstsq_coeff, residuals, rank, s = out
coeffs[i,:] += lstsq_coeff
model = np.dot(A, lstsq_coeff)
else:
status = clf.fit(A[ok_data,:], scif[ok_data])
coeffs[i,:] = clf.coef_
model = np.dot(A, clf.coef_)
chi2[i] = np.sum(((scif-model)**2*ivarf)[ok_data])
if chi2[i] < chi2min:
chi2min = chi2[i]
#print chi2
ix = np.argmin(chi2)
self.compute_model(spectrum_1d=[waves, gaussian_lines[ix,:]])
A[:,-1] = self.model.flatten()
best_coeffs = coeffs[ix,:]*1
best_model = np.dot(A, best_coeffs).reshape(self.beam.sh_beam)
### Continuum
best_coeffs_cont = best_coeffs*1
best_coeffs_cont[-1] = 0.
best_model_cont = np.dot(A, best_coeffs_cont)
best_model_cont = best_model_cont.reshape(self.beam.sh_beam)
best_line_center = line_centers[ix]
best_line_flux = coeffs[ix,-1]*self.beam.total_flux/1.e-17
return (line_centers, coeffs, chi2, ok_data,
best_model, best_model_cont,
best_line_center, best_line_flux)
def show_simple_fit_results(self, fit_outputs):
"""Make a plot based on results from `simple_line_fit`.
Parameters
----------
fit_outputs : tuple
returned data from `simple_line_fit`. I.e.,
>>> fit_outputs = BeamCutout.simple_line_fit()
>>> fig = BeamCutout.show_simple_fit_results(fit_outputs)
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object that can be optionally written to a hardcopy file.
"""
import matplotlib.gridspec
line_centers, coeffs, chi2, ok_data, best_model, best_model_cont, best_line_center, best_line_flux = fit_outputs
### Full figure
fig = plt.figure(figsize=(10,5))
#fig = plt.Figure(figsize=(8,4))
## 1D plots
gsb = matplotlib.gridspec.GridSpec(3,1)
xspec, yspec, yerr = self.beam.optimal_extract(self.grism.data['SCI']
- self.contam,
ivar = self.ivar)
flat_model = self.compute_model(in_place=False)
flat_model = flat_model.reshape(self.beam.sh_beam)
xspecm, yspecm, yerrm = self.beam.optimal_extract(flat_model)
xspecl, yspecl, yerrl = self.beam.optimal_extract(best_model)
ax = fig.add_subplot(gsb[-2:,:])
ax.errorbar(xspec/1.e4, yspec, yerr, linestyle='None', marker='o',
markersize=3, color='black', alpha=0.5,
label='Data (id={0:d})'.format(self.beam.id))
ax.plot(xspecm/1.e4, yspecm, color='red', linewidth=2, alpha=0.8,
label=r'Flat $f_\lambda$ ({0})'.format(self.direct.filter))
ax.plot(xspecl/1.e4, yspecl, color='orange', linewidth=2, alpha=0.8,
label='Cont+line ({0:.4f}, {1:.2e})'.format(best_line_center/1.e4, best_line_flux*1.e-17))
ax.legend(fontsize=8, loc='lower center', scatterpoints=1)
ax.set_xlabel(r'$\lambda$'); ax.set_ylabel('flux (e-/s)')
ax = fig.add_subplot(gsb[-3,:])
ax.plot(line_centers/1.e4, chi2/ok_data.sum())
ax.set_xticklabels([])
ax.set_ylabel(r'$\chi^2/(\nu={0:d})$'.format(ok_data.sum()))
if self.grism.filter == 'G102':
xlim = [0.7, 1.25]
if self.grism.filter == 'G141':
xlim = [1., 1.8]
xt = np.arange(xlim[0],xlim[1],0.1)
for ax in fig.axes:
ax.set_xlim(xlim[0], xlim[1])
ax.set_xticks(xt)
axt = ax.twiny()
axt.set_xlim(np.array(ax.get_xlim())*1.e4/6563.-1)
axt.set_xlabel(r'$z_\mathrm{H\alpha}$')
## 2D spectra
gst = matplotlib.gridspec.GridSpec(3,1)
if 'viridis_r' in plt.colormaps():
cmap = 'viridis_r'
else:
cmap = 'cubehelix_r'
ax = fig.add_subplot(gst[0,:])
ax.imshow(self.grism.data['SCI'], vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Observed')
ax = fig.add_subplot(gst[1,:])
ax.imshow(best_model+self.contam, vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Model')
ax = fig.add_subplot(gst[2,:])
ax.imshow(self.grism.data['SCI']-best_model-self.contam, vmin=-0.05,
vmax=0.2, cmap=cmap, interpolation='Nearest',
origin='lower', aspect='auto')
ax.set_ylabel('Resid.')
for ax in fig.axes[-3:]:
self.beam.twod_axis_labels(wscale=1.e4,
limits=[xlim[0], xlim[1], 0.1],
mpl_axis=ax)
self.beam.twod_xlim(xlim, wscale=1.e4, mpl_axis=ax)
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$')
for ax in fig.axes[-3:-1]:
ax.set_xticklabels([])
gsb.tight_layout(fig, pad=0.1,h_pad=0.01, rect=(0,0,0.5,1))
gst.tight_layout(fig, pad=0.1,h_pad=0.01, rect=(0.5,0.1,1,0.9))
return fig
|
albertfxwang/grizli
|
grizli/model.py
|
Python
|
mit
| 186,462
|
[
"Galaxy",
"Gaussian",
"VisIt"
] |
9de6b570ae3e0c6979ffec586d079132ce7d53f2c66876d39d549efbae974c3a
|
"""
Base model of MindboggleDB
Base is a generic set of classes that model the vertices (nodes) and
edges (arcs) in the Mindboggle graph database implementation.
Domain Objects
Database -
Project - a collection of subjects
Person - an individual with a roll
Subject - a participant in a project -
Sulcus - a surface (mesh) is a fold of the brain.
Ribbon - a medial surface (mesh) within a fold, extending from a fundus.
Fundus - a curve (polyline) runs through the pits (via a minimum spanning tree algorithm).
Pit - a point (vertex) of maximal depth or curvature within a neighborhood on a brain surface.
Not Implemented (could follow the XCEDE data model):
Subject_Group
Visit
Study
Episode
Acquisition
"""
from bulbs.model import Node, Relationship
from bulbs.property import Property, String, Integer, Float
# Base Node and Relationship class for MBDB
class NodeMB(Node):
"""
NodeMB is the root node for all vertices in MBDB
"""
element_type = "node"
def __unicode__(self):
return self.element_type
class RelationshipMB(Relationship):
"""
RelationshipMB is the root node for all vertices in MBDB
"""
element_type = "relationship"
def __unicode__(self):
return self.element_type
# Vertices
class Database(NodeMB):
"""
Database is the root node of mbdb domain model
"""
element_type = "database"
name = Property(String, nullable=False)
#def after_initialized(self):
# self.create_index(index_keys = self.name)
#def after_created(self):
# self.index.put_unique(self.eid, key = self.element_type, value = self.name)
def __unicode__(self):
return self.name
class Project(Database):
"""
Project is the concept of a collection of participants in a study - potentially with a set of
overlapping metadata attributes
Relationships
contained_in Database
"""
element_type = "project"
name = Property(String, nullable=False)
def after_created(self):
self.create_index(index_keys=self.name)
def __unicode__(self):
return self.name
class Person(NodeMB):
"""
Project is the concept of a collection of participants in a study - potentially with a set of
overlapping metadata attributes
Relationships
contained_in Project
"""
element_type = "project"
name = Property(String, nullable=False)
def __unicode__(self):
return self.name
class Subject(Project, Person):
"""
Subject is the concept of a participant in a Project with a set of data collected about them
Relationships
contained_in Project
is-a Person
"""
element_type = "subject"
name = Property(String, nullable=False)
age = Property(Integer)
def __unicode__(self):
return self.name
class Sulcus(Subject):
"""
Sulcus is anatomical entity with a set of image features
Relationships
contained_in Subject
"""
element_type = "sulcus"
name = Property(String, nullable=False)
def __unicode__(self):
return self.name
class Ribbon(Sulcus):
"""
Sulcus is anatomical entity with a set of image features
Relationships
contained_in Subject
"""
element_type = "ribbon"
name = Property(String, nullable=False)
def __unicode__(self):
return self.name
class Fundus(Sulcus):
"""
Sulcus is anatomical entity with a set of image features
Relationships
contained_in Subject
"""
element_type = "fundus"
name = Property(String, nullable=False)
curvature = Property(Float)
convexity = Property(Float)
depth = Property(Float)
thickness = Property(Float)
length = Property(Float)
def __unicode__(self):
return self.name
class Pit(Fundus):
"""
Pit is anatomical entity with a set of image features
Relationships
contained_in Fundus
"""
element_type = "pit"
name = Property(String, nullable=False)
def __unicode__(self):
return self.name
# Relationship types
class ContatinedIn(RelationshipMB):
"""
ContainedIn is a relationship type
Usage:
The first vertex is contained within the second vertex
Example:
subject = Subject(name = "Nolan Nichols")
project = Project(name = "Awesome Project")
relationship = ContainedIn(Subject,Project)
"""
label = "contained_in"
name = "contained_in"
# incoming node
@property
def outVObject(self):
return self.label
@outVObject.setter
def outVObject(self, value):
outVertex = value.__class__
outVertex.get(self.outV)
# outgoing node
@property
def inVObject(self):
return self.label
@inVObject.setter
def inVObject(self, value):
inVertex = value.__class__
inVertex.get(self.inV)
def __unicode__(self):
return self.label
class IsA(Relationship):
"""
IsA is a relationship type...
"""
label = "is_a"
name = "is_a"
# incoming node
@property
def outVObject(self):
return self.label
@outVObject.setter
def outVObject(self, value):
outVertex = value.__class__
outVertex.get(self.outV)
# outgoing node
@property
def inVObject(self):
return self.label
@inVObject.setter
def inVObject(self, value):
inVertex = value.__class__
inVertex.get(self.inV)
def __unicode__(self):
return self.label
|
binarybottle/mindboggle_sidelined
|
database/base.py
|
Python
|
apache-2.0
| 5,624
|
[
"VisIt"
] |
5d286a8cc70c37c5c25036937feb9358d84a013bc3712ff0ebbe7d08407d030a
|
# coding=utf-8
import gettext
import gtk
import misc, ui
translators = '''\
ar - Ahmad Farghal <ahmad.farghal@gmail.com>
be@latin - Ihar Hrachyshka <ihar.hrachyshka@gmail.com>
ca - Franc Rodriguez <franc.rodriguez@tecob.com>
cs - Jakub Adler <jakubadler@gmail.com>
da - Martin Dybdal <dybber@dybber.dk>
de - Paul Johnson <thrillerator@googlemail.com>
el_GR - Lazaros Koromilas <koromilaz@gmail.com>
es - Xoan Sampaiño <xoansampainho@gmail.com>
et - Mihkel <turakas@gmail.com>
fi - Ilkka Tuohela <hile@hack.fi>
fr - Floreal M <florealm@gmail.com>
ja - Masato Hashimoto <cabezon.hashimoto@gmail.com>
it - Gianni Vialetto <forgottencrow@gmail.com>
nl - Olivier Keun <litemotiv@gmail.com>
pl - Tomasz Dominikowski <dominikowski@gmail.com>
pt_BR - Alex Tercete Matos <alextercete@gmail.com>
ru - Ivan <bkb.box@bk.ru>
sk - Robert Hartl <hartl.robert@gmail.com>
sl - Alan Pepelko <alan.pepelko@gmail.com>
sv - Daniel Nylander <po@danielnylander.se>
tr - Gökmen Görgen <gkmngrgn@gmail.com>
uk - Господарисько Тарас <dogmaton@gmail.com>
zh_CN - Desmond Chang <dochang@gmail.com>
zh_TW - Ian-Xue Li <da.mi.spirit@gmail>
'''
class About(object):
def __init__(self, parent_window, config, version, licensetext, icon_file):
self.parent_window = parent_window
self.config = config
self.version = version
self.license = licensetext
self.icon_file = icon_file
self.about_dialog = None
def about_close(self, _event, _data=None):
self.about_dialog.hide()
return True
def about_shortcuts(self, _button):
# define the shortcuts and their descriptions
# these are all gettextable
mainshortcuts = \
[[ "F1", _("About Sonata") ],
[ "F5", _("Preferences") ],
[ "F11", _("Fullscreen Artwork Mode") ],
[ "Alt-[1-5]", _("Switch to [1st-5th] tab") ],
[ "Alt-C", _("Connect to MPD") ],
[ "Alt-D", _("Disconnect from MPD") ],
[ "Alt-R", _("Randomize current playlist") ],
[ "Alt-Down", _("Expand player") ],
[ "Alt-Left", _("Switch to previous tab") ],
[ "Alt-Right", _("Switch to next tab") ],
[ "Alt-Up", _("Collapse player") ],
[ "Ctrl-H", _("Search library") ],
[ "Ctrl-Q", _("Quit") ],
[ "Ctrl-Shift-U", _("Update entire library") ],
[ "Menu", _("Display popup menu") ],
[ "Escape", _("Minimize to system tray (if enabled)") ]]
playbackshortcuts = \
[[ "Ctrl-Left", _("Previous track") ],
[ "Ctrl-Right", _("Next track") ],
[ "Ctrl-P", _("Play/Pause") ],
[ "Ctrl-S", _("Stop") ],
[ "Ctrl-Minus", _("Lower the volume") ],
[ "Ctrl-Plus", _("Raise the volume") ]]
currentshortcuts = \
[[ "Enter/Space", _("Play selected song") ],
[ "Delete", _("Remove selected song(s)") ],
[ "Ctrl-I", _("Center currently playing song") ],
[ "Ctrl-T", _("Edit selected song's tags") ],
[ "Ctrl-Shift-S", _("Save to new playlist") ],
[ "Ctrl-Delete", _("Clear list") ],
[ "Alt-R", _("Randomize list") ]]
libraryshortcuts = \
[[ "Enter/Space", _("Add selected song(s) or enter directory") ],
[ "Backspace", _("Go to parent directory") ],
[ "Ctrl-D", _("Add selected item(s)") ],
[ "Ctrl-R", _("Replace with selected item(s)") ],
[ "Ctrl-T", _("Edit selected song's tags") ],
[ "Ctrl-Shift-D", _("Add selected item(s) and play") ],
[ "Ctrl-Shift-R", _("Replace with selected item(s) and play") ],
[ "Ctrl-U", _("Update selected item(s)/path(s)") ]]
playlistshortcuts = \
[[ "Enter/Space", _("Add selected playlist(s)") ],
[ "Delete", _("Remove selected playlist(s)") ],
[ "Ctrl-D", _("Add selected playlist(s)") ],
[ "Ctrl-R", _("Replace with selected playlist(s)") ],
[ "Ctrl-Shift-D", _("Add selected playlist(s) and play") ],
[ "Ctrl-Shift-R", _("Replace with selected playlist(s) and play") ]]
streamshortcuts = \
[[ "Enter/Space", _("Add selected stream(s)") ],
[ "Delete", _("Remove selected stream(s)") ],
[ "Ctrl-D", _("Add selected stream(s)") ],
[ "Ctrl-R", _("Replace with selected stream(s)") ],
[ "Ctrl-Shift-D", _("Add selected stream(s) and play") ],
[ "Ctrl-Shift-R", _("Replace with selected stream(s) and play") ]]
infoshortcuts = \
[[ "Ctrl-T", _("Edit playing song's tags") ]]
# define the main array- this adds headings to each section of
# shortcuts that will be displayed
shortcuts = [[ _("Main Shortcuts"), mainshortcuts ],
[ _("Playback Shortcuts"), playbackshortcuts ],
[ _("Current Shortcuts"), currentshortcuts ],
[ _("Library Shortcuts"), libraryshortcuts ],
[ _("Playlist Shortcuts"), playlistshortcuts ],
[ _("Stream Shortcuts"), streamshortcuts ],
[ _("Info Shortcuts"), infoshortcuts ]]
dialog = ui.dialog(title=_("Shortcuts"), parent=self.about_dialog, flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE), role='shortcuts', default=gtk.RESPONSE_CLOSE, h=320)
# each pair is a [ heading, shortcutlist ]
vbox = gtk.VBox()
for pair in shortcuts:
titlelabel = ui.label(markup="<b>%s</b>" % pair[0])
vbox.pack_start(titlelabel, False, False, 2)
# print the items of [ shortcut, desc ]
for item in pair[1]:
tmphbox = gtk.HBox()
tmplabel = ui.label(markup="<b>%s:</b>" % item[0], y=0)
tmpdesc = ui.label(text=item[1], wrap=True, y=0)
tmphbox.pack_start(tmplabel, False, False, 2)
tmphbox.pack_start(tmpdesc, True, True, 2)
vbox.pack_start(tmphbox, False, False, 2)
vbox.pack_start(ui.label(text=" "), False, False, 2)
scrollbox = ui.scrollwindow(policy_x=gtk.POLICY_NEVER, addvp=vbox)
dialog.vbox.pack_start(scrollbox, True, True, 2)
dialog.show_all()
dialog.run()
dialog.destroy()
def statstext(self, stats):
# XXX translate expressions, not words
statslabel = stats['songs'] + ' ' + gettext.ngettext('song', 'songs', int(stats['songs'])) + '.\n'
statslabel = statslabel + stats['albums'] + ' ' + gettext.ngettext('album', 'albums', int(stats['albums'])) + '.\n'
statslabel = statslabel + stats['artists'] + ' ' + gettext.ngettext('artist', 'artists', int(stats['artists'])) + '.\n'
try:
hours_of_playtime = misc.convert_time(float(stats['db_playtime'])).split(':')[-3]
except:
hours_of_playtime = '0'
if int(hours_of_playtime) >= 24:
days_of_playtime = str(int(hours_of_playtime)/24)
statslabel = statslabel + days_of_playtime + ' ' + gettext.ngettext('day of bliss', 'days of bliss', int(days_of_playtime)) + '.'
else:
statslabel = statslabel + hours_of_playtime + ' ' + gettext.ngettext('hour of bliss', 'hours of bliss', int(hours_of_playtime)) + '.'
return statslabel
def about_load(self, stats):
self.about_dialog = gtk.AboutDialog()
try:
self.about_dialog.set_transient_for(self.parent_window)
self.about_dialog.set_modal(True)
except:
pass
self.about_dialog.set_name('Sonata')
self.about_dialog.set_role('about')
self.about_dialog.set_version(self.version)
commentlabel = _('An elegant music client for MPD.')
self.about_dialog.set_comments(commentlabel)
if stats:
self.about_dialog.set_copyright(self.statstext(stats))
self.about_dialog.set_license(self.license)
self.about_dialog.set_authors(['Scott Horowitz <stonecrest@gmail.com>', 'Tuukka Hastrup <Tuukka.Hastrup@iki.fi>', 'Stephen Boyd <bebarino@gmail.com>'])
self.about_dialog.set_artists(['Adrian Chromenko <adrian@rest0re.org>\nhttp://oss.rest0re.org/'])
self.about_dialog.set_translator_credits(translators)
gtk.about_dialog_set_url_hook(self.show_website)
self.about_dialog.set_website("http://sonata.berlios.de/")
large_icon = gtk.gdk.pixbuf_new_from_file(self.icon_file)
self.about_dialog.set_logo(large_icon)
# Add button to show keybindings:
shortcut_button = ui.button(text=_("_Shortcuts"))
self.about_dialog.action_area.pack_start(shortcut_button)
self.about_dialog.action_area.reorder_child(self.about_dialog.action_area.get_children()[-1], -2)
# Connect to callbacks
self.about_dialog.connect('response', self.about_close)
self.about_dialog.connect('delete_event', self.about_close)
shortcut_button.connect('clicked', self.about_shortcuts)
self.about_dialog.show_all()
def show_website(self, _dialog, link):
if not misc.browser_load(link, self.config.url_browser, self.parent_window):
ui.show_msg(self.about_dialog, _('Unable to launch a suitable browser.'), _('Launch Browser'), 'browserLoadError', gtk.BUTTONS_CLOSE)
|
tuukka/sonata-svn-test
|
sonata/about.py
|
Python
|
gpl-3.0
| 8,442
|
[
"Desmond"
] |
a622ae81af95e797117593ff006b484c053619c39a922a127c627cd6721f5697
|
class Languages:
English = 'english'
Spanish = 'spanish'
CurrentLanguage = Languages.Spanish
def set_language(lang):
global CurrentLanguage
CurrentLanguage = lang
EnglishDictionary = {
'About...': 'About...',
'About this program:': 'About this program:',
'ABOUT_DIALOG': 'This program was created by NREL for the United States Department of Energy.',
'Cancel': 'Cancel',
'Cancelled!': 'Cancelled!',
'Choose Input File..': 'Choose Input File..',
'Choose Weather File..': 'Choose Weather File..',
'Close': 'Close',
'Could not open run directory': 'Could not open run directory',
'Could not open input file, set default application by opening the file separately first.':
'Could not open input file, set default application by opening the file separately first.',
'Edit Input File..': 'Edit Input File..',
'E+ Version': 'E+ Version',
'EnergyPlus Failed': 'EnergyPlus Failed',
'EnergyPlus Failed!': 'EnergyPlus Failed!',
'EnergyPlus Simulation Output:': 'EnergyPlus Simulation Output:',
'EPW files': 'EPW files',
'Error file is the best place to start. Would you like to open the Run Folder?':
'Error file is the best place to start. Would you like to open the Run Folder?',
'Error performing prior action:': 'Error performing prior action:',
'Exit': 'Exit',
'File': 'File',
'IDF files': 'IDF files',
'Input and/or Weather file paths are invalid': 'Input and/or Weather file paths are invalid',
'Message': 'Message',
'Open Run Directory': 'Open Run Directory',
'Ready for launch': 'Ready for launch',
'You must restart the app to make the language change take effect. Would you like to restart now?':
'You must restart the app to make the language change take effect. Would you like to restart now?',
'Select input file': 'Select input file',
'Select weather file': 'Select weather file',
'Simulate': 'Simulate',
'Simulation cancelled': 'Simulation cancelled',
'Simulation Output': 'Simulation Output',
'Simulation completed': 'Simulation completed',
'Simulation failed': 'Simulation failed',
'Simulation started': 'Simulation started',
'Switch language': 'Switch language'
}
SpanishDictionary = {
'About...': 'Acerca de...',
'About this program:': 'Acerca de este programa',
'ABOUT_DIALOG': 'Este programa fue creado por el NREL para el Departamento de Energia de los Estados Unidos.',
'Cancel': 'Cancelar',
'Cancelled!': 'Cancelado!',
'Choose Input File..': 'Elija el archivo de entrada..',
'Choose Weather File..': 'Elija Tiempo Archivo..',
'Close': 'Cerca',
'Could not open run directory': 'No se pudo abrir directorio de ejecucion',
'Could not open input file, set default application by opening the file separately first.':
'No se pudo abrir el archivo de entrada, ajuste aplicacion ' +
'por defecto al abrir el archivo por separado en primer lugar.',
'Edit Input File..': 'Editar el archivo..',
'E+ Version': 'E+ Version',
'EnergyPlus Failed': 'EnergyPlus fallado',
'EnergyPlus Failed!': 'EnergyPlus fallado!',
'EnergyPlus Simulation Output:': 'EnergyPlus salida de la simulacion:',
'EPW files': 'EPW archivos',
'Error file is the best place to start. Would you like to open the Run Folder?':
'Archivo de errores es el mejor lugar para empezar. Le gustaria abrir la carpeta Run?',
'Error performing prior action:': 'Error al realizar la accion previa:',
'Exit': 'Salida',
'File': 'Archivo',
'IDF files': 'IDF archivos',
'Input and/or Weather file paths are invalid': 'Las rutas de entrada y/o archivos de tiempo no son validos',
'Message': 'Mensaje',
'Open Run Directory': 'Directorio de ejecucion abierta',
'Ready for launch': 'Listo para su lanzamiento',
'You must restart the app to make the language change take effect. Would you like to restart now?':
'Debe reiniciar la aplicacion para que el cambio de idioma tenga efecto. Le gustaria reiniciar ahora?',
'Select input file': 'Seleccionar archivo de entrada',
'Select weather file': 'Seleccionar archivo de tiempo',
'Simulate': 'Simular',
'Simulation cancelled': 'Simulacion cancelado',
'Simulation Output': 'Salida de la simulacion',
'Simulation completed': 'Simulacion completado',
'Simulation failed': 'Simulacion fallo',
'Simulation started': 'Simulacion comenzo',
'Switch language': 'Cambiar de idioma'
}
def report_missing_keys():
base_keys = EnglishDictionary.keys()
for dict_name, dictionary in {'Spanish': SpanishDictionary}.iteritems(): # add more here
print("Processing missing keys from dictionary: " + dict_name)
for key in base_keys:
if key not in dictionary:
print("Could not find key: \"%s\"" % key)
def translate(key):
# if for some reason blank, just return blank
if key is None or key == "":
return ""
# start with English, but switch based on language
dictionary = EnglishDictionary
if CurrentLanguage == Languages.Spanish:
dictionary = SpanishDictionary
# if the key is there, return it, otherwise return a big flashy problematic statement
if key in dictionary:
return dictionary[key]
else:
print("Could not find this key in the dictionary: \"%s\"" % key)
return "TRANSLATION MISSING"
|
Myoldmopar/EPLaunchLight
|
EPLaunchLite/International.py
|
Python
|
bsd-3-clause
| 5,461
|
[
"EPW"
] |
975a78ba31ec8e6c74379cfb925e0031a87d6dd4d6bbcb06a7d4b790566426d7
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send CLI commands to Lenovo Switches
# Lenovo Networking
#
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_command
author: "Dave Kasberg (@dkasberg)"
short_description: Execute a single command on devices running Lenovo CNOS
description:
- This module allows you to modify the switch running configuration. It provides a way to
execute a single CNOS command on a switch by evaluating the current running configuration
and executing the command only if the specific setting has not been already configured.
The CNOS command is passed as an argument of the method.
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_command.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
clicommand:
description:
- This specifies the CLI command as an attribute to this method. The command is
passed using double quotes. The variables can be placed directly on to the CLI
commands or can be invoked from the vars directory.
required: true
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_command. These are written in the main.yml file of the tasks directory.
---
- name: Test Command
cnos_command:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
outputfile: "./results/test_command_{{ inventory_hostname }}_output.txt"
clicommand: "display users"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Command Applied"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
clicommand=dict(required=True),
outputfile=dict(required=True),
host=dict(required=True),
deviceType=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = module.params['clicommand']
deviceType = module.params['deviceType']
outputfile = module.params['outputfile']
hostIP = module.params['host']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand + "\n", "(config)#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="CLI command executed and results saved in file ")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
andreaso/ansible
|
lib/ansible/modules/network/lenovo/cnos_command.py
|
Python
|
gpl-3.0
| 5,666
|
[
"VisIt"
] |
6721eb5e50637baf9a66e7e03e4d39d04050a52857d322cbb66c6fa93dacfc48
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffxparser(RPackage):
"""Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR).
It provides methods for fast and memory efficient parsing of
Affymetrix files using the Affymetrix' Fusion SDK. Both ASCII-
and binary-based files are supported. Currently, there are methods
for reading chip definition file (CDF) and a cell intensity file (CEL).
These files can be read either in full or in part. For example,
probe signals from a few probesets can be extracted very quickly
from a set of CEL files into a convenient list structure."""
homepage = "https://www.bioconductor.org/packages/affxparser/"
url = "https://git.bioconductor.org/packages/affxparser"
list_url = homepage
version('1.48.0', git='https://git.bioconductor.org/packages/affxparser', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('r@3.4.0:3.4.9', when='@1.48.0')
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-affxparser/package.py
|
Python
|
lgpl-2.1
| 2,177
|
[
"Bioconductor"
] |
2bec788c94c4e746f70af595c9e6bb1be357b25a18beee6de86cf976f5425bcf
|
"""
Utilities for scripts
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import functools
import os
import shlex
import subprocess
import sys
import time
import humanize
import requests
import yaml
import pysam
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
def ga4ghImportGlue():
"""
Call this method before importing a ga4gh module in the scripts dir.
Otherwise, you will be using the installed package instead of
the development package.
Assumes a certain directory structure.
"""
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path)
def log(message):
print(message)
class Timed(object):
"""
Decorator that times a method, reporting runtime at finish
"""
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self.start = time.time()
result = func(*args, **kwargs)
self.end = time.time()
self._report()
return result
return wrapper
def _report(self):
delta = self.end - self.start
timeString = humanize.time.naturaldelta(delta)
log("Finished in {} ({:.2f} seconds)".format(timeString, delta))
class FileDownloader(object):
"""
Base class for file downloaders of different protocols
"""
defaultStream = sys.stdout
def __init__(self, url, path, stream=defaultStream):
self.url = url
self.path = path
self.basename = path
self.basenameLength = len(self.basename)
self.stream = stream
self.bytesReceived = 0
self.displayIndex = 0
self.displayWindowSize = 20
self.fileSize = None
self.displayCounter = 0
def _printStartDownloadMessage(self):
self.stream.write("Downloading '{}' to '{}'\n".format(
self.url, self.path))
def _cleanUp(self):
self.stream.write("\n")
self.stream.flush()
def _getFileNameDisplayString(self):
if self.basenameLength <= self.displayWindowSize:
return self.basename
else:
return self.basename # TODO scrolling window here
def _updateDisplay(self, modulo=1):
self.displayCounter += 1
if self.displayCounter % modulo != 0:
return
fileName = self._getFileNameDisplayString()
if self.fileSize is None:
displayString = "{} bytes received: {}\r"
bytesReceived = humanize.filesize.naturalsize(
self.bytesReceived)
self.stream.write(displayString.format(
fileName, bytesReceived))
else:
# TODO contentlength seems to slightly under-report how many
# bytes we have to download... hence the min functions
percentage = min(self.bytesReceived / self.fileSize, 1)
numerator = humanize.filesize.naturalsize(
min(self.bytesReceived, self.fileSize))
denominator = humanize.filesize.naturalsize(
self.fileSize)
displayString = "{} {:<6.2%} ({:>9} / {:<9})\r"
self.stream.write(displayString.format(
fileName, percentage, numerator, denominator))
self.stream.flush()
class HttpFileDownloader(FileDownloader):
"""
Provides a wget-like file download and terminal display for HTTP
"""
defaultChunkSize = 1048576 # 1MB
def __init__(self, url, path, chunkSize=defaultChunkSize,
stream=FileDownloader.defaultStream):
super(HttpFileDownloader, self).__init__(
url, path, stream)
self.chunkSize = chunkSize
def download(self):
self._printStartDownloadMessage()
response = requests.get(self.url, stream=True)
response.raise_for_status()
try:
contentLength = int(response.headers['content-length'])
self.fileSize = contentLength
except KeyError:
# chunked transfer encoding
pass
with open(self.path, 'wb') as outputFile:
for chunk in response.iter_content(chunk_size=self.chunkSize):
self.bytesReceived += self.chunkSize
self._updateDisplay()
outputFile.write(chunk)
self._cleanUp()
def runCommandSplits(splits, silent=False):
"""
Run a shell command given the command's parsed command line
"""
try:
if silent:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(splits, stdout=devnull, stderr=devnull)
else:
subprocess.check_call(splits)
except OSError, e:
if e.errno == 2: # cmd not found
raise Exception(
"Can't find command while trying to run {}".format(splits))
else:
raise
def runCommand(command, silent=False):
"""
Run a shell command
"""
splits = shlex.split(command)
runCommandSplits(splits, silent=silent)
def getAuthValues(filePath='scripts/auth.yml'):
"""
Return the script authentication file as a dictionary
"""
return getYamlDocument(filePath)
def getYamlDocument(filePath):
"""
Return a yaml file's contents as a dictionary
"""
with open(filePath) as stream:
doc = yaml.load(stream)
return doc
class AlignmentFileConstants(object):
"""
A container class for constants dealing with alignment files
"""
SAM = "SAM"
BAM = "BAM"
BAI = "BAI"
class AlignmentFileTool(object):
"""
Helps with operations on BAM and SAM files
"""
def __init__(self, inputFileFormat, outputFileFormat):
self.inputFileFormat = inputFileFormat
self.outputFileFormat = outputFileFormat
self.args = None
def parseArgs(self):
description = "{} to {} conversion tool".format(
self.inputFileFormat, self.outputFileFormat)
parser = argparse.ArgumentParser(
description=description)
inputHelpText = "the name of the {} file to read".format(
self.inputFileFormat)
parser.add_argument(
"inputFile", help=inputHelpText)
outputHelpText = "the name of the {} file to write".format(
self.outputFileFormat)
defaultOutputFilePath = "out.{}".format(
self.outputFileFormat.lower())
parser.add_argument(
"--outputFile", "-o", default=defaultOutputFilePath,
help=outputHelpText)
parser.add_argument(
"--numLines", "-n", default=10,
help="the number of lines to write")
parser.add_argument(
"--skipIndexing", default=False, action='store_true',
help="don't create an index file")
args = parser.parse_args()
self.args = args
def convert(self):
# set flags
if self.inputFileFormat == AlignmentFileConstants.SAM:
inputFlags = "r"
elif self.inputFileFormat == AlignmentFileConstants.BAM:
inputFlags = "rb"
if self.outputFileFormat == AlignmentFileConstants.SAM:
outputFlags = "wh"
elif self.outputFileFormat == AlignmentFileConstants.BAM:
outputFlags = "wb"
# open files
inputFile = pysam.AlignmentFile(
self.args.inputFile, inputFlags)
outputFile = pysam.AlignmentFile(
self.args.outputFile, outputFlags, header=inputFile.header)
outputFilePath = outputFile.filename
log("Creating alignment file '{}'".format(outputFilePath))
# write new file
for _ in xrange(self.args.numLines):
alignedSegment = inputFile.next()
outputFile.write(alignedSegment)
# clean up
inputFile.close()
outputFile.close()
# create index file
if (not self.args.skipIndexing and
self.outputFileFormat == AlignmentFileConstants.BAM):
indexFilePath = "{}.{}".format(
outputFilePath, AlignmentFileConstants.BAI.lower())
log("Creating index file '{}'".format(indexFilePath))
pysam.index(outputFilePath)
|
macieksmuga/server
|
scripts/utils.py
|
Python
|
apache-2.0
| 8,309
|
[
"pysam"
] |
81182b43118056b9b41413080a606a34ecc7ba53ddcedfa636abdf8d23cb7357
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import re
import shlex
import sys
from optparse import OptionError
from optparse import OptionGroup
from optparse import OptionParser
from optparse import SUPPRESS_HELP
from lib.core.common import checkDeprecatedOptions
from lib.core.common import checkSystemEncoding
from lib.core.common import dataToStdout
from lib.core.common import expandMnemonics
from lib.core.common import getUnicode
from lib.core.data import cmdLineOptions
from lib.core.data import conf
from lib.core.data import logger
from lib.core.defaults import defaults
from lib.core.enums import AUTOCOMPLETE_TYPE
from lib.core.exception import SqlmapShellQuitException
from lib.core.exception import SqlmapSyntaxException
from lib.core.settings import BASIC_HELP_ITEMS
from lib.core.settings import DUMMY_URL
from lib.core.settings import IS_WIN
from lib.core.settings import MAX_HELP_OPTION_LENGTH
from lib.core.settings import VERSION_STRING
from lib.core.shell import autoCompletion
from lib.core.shell import clearHistory
from lib.core.shell import loadHistory
from lib.core.shell import saveHistory
def cmdLineParser(argv=None):
"""
This function parses the command line parameters and arguments
"""
if not argv:
argv = sys.argv
checkSystemEncoding()
# Reference: https://stackoverflow.com/a/4012683 (Note: previously used "...sys.getfilesystemencoding() or UNICODE_ENCODING")
_ = getUnicode(os.path.basename(argv[0]), encoding=sys.stdin.encoding)
usage = "%s%s [options]" % ("python " if not IS_WIN else "", \
"\"%s\"" % _ if " " in _ else _)
parser = OptionParser(usage=usage)
try:
parser.add_option("--hh", dest="advancedHelp",
action="store_true",
help="Show advanced help message and exit")
parser.add_option("--version", dest="showVersion",
action="store_true",
help="Show program's version number and exit")
parser.add_option("-v", dest="verbose", type="int",
help="Verbosity level: 0-6 (default %d)" % defaults.verbose)
# Target options
target = OptionGroup(parser, "Target", "At least one of these "
"options has to be provided to define the target(s)")
target.add_option("-d", dest="direct", help="Connection string "
"for direct database connection")
target.add_option("-u", "--url", dest="url", help="Target URL (e.g. \"http://www.site.com/vuln.php?id=1\")")
target.add_option("-l", dest="logFile", help="Parse target(s) from Burp "
"or WebScarab proxy log file")
target.add_option("-x", dest="sitemapUrl", help="Parse target(s) from remote sitemap(.xml) file")
target.add_option("-m", dest="bulkFile", help="Scan multiple targets given "
"in a textual file ")
target.add_option("-r", dest="requestFile",
help="Load HTTP request from a file")
target.add_option("-g", dest="googleDork",
help="Process Google dork results as target URLs")
target.add_option("-c", dest="configFile",
help="Load options from a configuration INI file")
# Request options
request = OptionGroup(parser, "Request", "These options can be used "
"to specify how to connect to the target URL")
request.add_option("--method", dest="method",
help="Force usage of given HTTP method (e.g. PUT)")
request.add_option("--data", dest="data",
help="Data string to be sent through POST")
request.add_option("--param-del", dest="paramDel",
help="Character used for splitting parameter values")
request.add_option("--cookie", dest="cookie",
help="HTTP Cookie header value")
request.add_option("--cookie-del", dest="cookieDel",
help="Character used for splitting cookie values")
request.add_option("--load-cookies", dest="loadCookies",
help="File containing cookies in Netscape/wget format")
request.add_option("--drop-set-cookie", dest="dropSetCookie",
action="store_true",
help="Ignore Set-Cookie header from response")
request.add_option("--user-agent", dest="agent",
help="HTTP User-Agent header value")
request.add_option("--random-agent", dest="randomAgent",
action="store_true",
help="Use randomly selected HTTP User-Agent header value")
request.add_option("--host", dest="host",
help="HTTP Host header value")
request.add_option("--referer", dest="referer",
help="HTTP Referer header value")
request.add_option("-H", "--header", dest="header",
help="Extra header (e.g. \"X-Forwarded-For: 127.0.0.1\")")
request.add_option("--headers", dest="headers",
help="Extra headers (e.g. \"Accept-Language: fr\\nETag: 123\")")
request.add_option("--auth-type", dest="authType",
help="HTTP authentication type "
"(Basic, Digest, NTLM or PKI)")
request.add_option("--auth-cred", dest="authCred",
help="HTTP authentication credentials "
"(name:password)")
request.add_option("--auth-file", dest="authFile",
help="HTTP authentication PEM cert/private key file")
request.add_option("--ignore-401", dest="ignore401", action="store_true",
help="Ignore HTTP Error 401 (Unauthorized)")
request.add_option("--ignore-proxy", dest="ignoreProxy", action="store_true",
help="Ignore system default proxy settings")
request.add_option("--ignore-redirects", dest="ignoreRedirects", action="store_true",
help="Ignore redirection attempts")
request.add_option("--ignore-timeouts", dest="ignoreTimeouts", action="store_true",
help="Ignore connection timeouts")
request.add_option("--proxy", dest="proxy",
help="Use a proxy to connect to the target URL")
request.add_option("--proxy-cred", dest="proxyCred",
help="Proxy authentication credentials "
"(name:password)")
request.add_option("--proxy-file", dest="proxyFile",
help="Load proxy list from a file")
request.add_option("--tor", dest="tor",
action="store_true",
help="Use Tor anonymity network")
request.add_option("--tor-port", dest="torPort",
help="Set Tor proxy port other than default")
request.add_option("--tor-type", dest="torType",
help="Set Tor proxy type (HTTP, SOCKS4 or SOCKS5 (default))")
request.add_option("--check-tor", dest="checkTor",
action="store_true",
help="Check to see if Tor is used properly")
request.add_option("--delay", dest="delay", type="float",
help="Delay in seconds between each HTTP request")
request.add_option("--timeout", dest="timeout", type="float",
help="Seconds to wait before timeout connection "
"(default %d)" % defaults.timeout)
request.add_option("--retries", dest="retries", type="int",
help="Retries when the connection timeouts "
"(default %d)" % defaults.retries)
request.add_option("--randomize", dest="rParam",
help="Randomly change value for given parameter(s)")
request.add_option("--safe-url", dest="safeUrl",
help="URL address to visit frequently during testing")
request.add_option("--safe-post", dest="safePost",
help="POST data to send to a safe URL")
request.add_option("--safe-req", dest="safeReqFile",
help="Load safe HTTP request from a file")
request.add_option("--safe-freq", dest="safeFreq", type="int",
help="Test requests between two visits to a given safe URL")
request.add_option("--skip-urlencode", dest="skipUrlEncode",
action="store_true",
help="Skip URL encoding of payload data")
request.add_option("--csrf-token", dest="csrfToken",
help="Parameter used to hold anti-CSRF token")
request.add_option("--csrf-url", dest="csrfUrl",
help="URL address to visit to extract anti-CSRF token")
request.add_option("--force-ssl", dest="forceSSL",
action="store_true",
help="Force usage of SSL/HTTPS")
request.add_option("--hpp", dest="hpp",
action="store_true",
help="Use HTTP parameter pollution method")
request.add_option("--eval", dest="evalCode",
help="Evaluate provided Python code before the request (e.g. \"import hashlib;id2=hashlib.md5(id).hexdigest()\")")
# Optimization options
optimization = OptionGroup(parser, "Optimization", "These "
"options can be used to optimize the "
"performance of sqlmap")
optimization.add_option("-o", dest="optimize",
action="store_true",
help="Turn on all optimization switches")
optimization.add_option("--predict-output", dest="predictOutput", action="store_true",
help="Predict common queries output")
optimization.add_option("--keep-alive", dest="keepAlive", action="store_true",
help="Use persistent HTTP(s) connections")
optimization.add_option("--null-connection", dest="nullConnection", action="store_true",
help="Retrieve page length without actual HTTP response body")
optimization.add_option("--threads", dest="threads", type="int",
help="Max number of concurrent HTTP(s) "
"requests (default %d)" % defaults.threads)
# Injection options
injection = OptionGroup(parser, "Injection", "These options can be "
"used to specify which parameters to test "
"for, provide custom injection payloads and "
"optional tampering scripts")
injection.add_option("-p", dest="testParameter",
help="Testable parameter(s)")
injection.add_option("--skip", dest="skip",
help="Skip testing for given parameter(s)")
injection.add_option("--skip-static", dest="skipStatic", action="store_true",
help="Skip testing parameters that not appear to be dynamic")
injection.add_option("--param-exclude", dest="paramExclude",
help="Regexp to exclude parameters from testing (e.g. \"ses\")")
injection.add_option("--dbms", dest="dbms",
help="Force back-end DBMS to this value")
injection.add_option("--dbms-cred", dest="dbmsCred",
help="DBMS authentication credentials (user:password)")
injection.add_option("--os", dest="os",
help="Force back-end DBMS operating system "
"to this value")
injection.add_option("--invalid-bignum", dest="invalidBignum",
action="store_true",
help="Use big numbers for invalidating values")
injection.add_option("--invalid-logical", dest="invalidLogical",
action="store_true",
help="Use logical operations for invalidating values")
injection.add_option("--invalid-string", dest="invalidString",
action="store_true",
help="Use random strings for invalidating values")
injection.add_option("--no-cast", dest="noCast",
action="store_true",
help="Turn off payload casting mechanism")
injection.add_option("--no-escape", dest="noEscape",
action="store_true",
help="Turn off string escaping mechanism")
injection.add_option("--prefix", dest="prefix",
help="Injection payload prefix string")
injection.add_option("--suffix", dest="suffix",
help="Injection payload suffix string")
injection.add_option("--tamper", dest="tamper",
help="Use given script(s) for tampering injection data")
# Detection options
detection = OptionGroup(parser, "Detection", "These options can be "
"used to customize the detection phase")
detection.add_option("--level", dest="level", type="int",
help="Level of tests to perform (1-5, "
"default %d)" % defaults.level)
detection.add_option("--risk", dest="risk", type="int",
help="Risk of tests to perform (1-3, "
"default %d)" % defaults.risk)
detection.add_option("--string", dest="string",
help="String to match when "
"query is evaluated to True")
detection.add_option("--not-string", dest="notString",
help="String to match when "
"query is evaluated to False")
detection.add_option("--regexp", dest="regexp",
help="Regexp to match when "
"query is evaluated to True")
detection.add_option("--code", dest="code", type="int",
help="HTTP code to match when "
"query is evaluated to True")
detection.add_option("--text-only", dest="textOnly",
action="store_true",
help="Compare pages based only on the textual content")
detection.add_option("--titles", dest="titles",
action="store_true",
help="Compare pages based only on their titles")
# Techniques options
techniques = OptionGroup(parser, "Techniques", "These options can be "
"used to tweak testing of specific SQL "
"injection techniques")
techniques.add_option("--technique", dest="tech",
help="SQL injection techniques to use "
"(default \"%s\")" % defaults.tech)
techniques.add_option("--time-sec", dest="timeSec",
type="int",
help="Seconds to delay the DBMS response "
"(default %d)" % defaults.timeSec)
techniques.add_option("--union-cols", dest="uCols",
help="Range of columns to test for UNION query SQL injection")
techniques.add_option("--union-char", dest="uChar",
help="Character to use for bruteforcing number of columns")
techniques.add_option("--union-from", dest="uFrom",
help="Table to use in FROM part of UNION query SQL injection")
techniques.add_option("--dns-domain", dest="dnsDomain",
help="Domain name used for DNS exfiltration attack")
techniques.add_option("--second-order", dest="secondOrder",
help="Resulting page URL searched for second-order "
"response")
# Fingerprint options
fingerprint = OptionGroup(parser, "Fingerprint")
fingerprint.add_option("-f", "--fingerprint", dest="extensiveFp",
action="store_true",
help="Perform an extensive DBMS version fingerprint")
# Enumeration options
enumeration = OptionGroup(parser, "Enumeration", "These options can "
"be used to enumerate the back-end database "
"management system information, structure "
"and data contained in the tables. Moreover "
"you can run your own SQL statements")
enumeration.add_option("-a", "--all", dest="getAll",
action="store_true", help="Retrieve everything")
enumeration.add_option("-b", "--banner", dest="getBanner",
action="store_true", help="Retrieve DBMS banner")
enumeration.add_option("--current-user", dest="getCurrentUser",
action="store_true",
help="Retrieve DBMS current user")
enumeration.add_option("--current-db", dest="getCurrentDb",
action="store_true",
help="Retrieve DBMS current database")
enumeration.add_option("--hostname", dest="getHostname",
action="store_true",
help="Retrieve DBMS server hostname")
enumeration.add_option("--is-dba", dest="isDba",
action="store_true",
help="Detect if the DBMS current user is DBA")
enumeration.add_option("--users", dest="getUsers", action="store_true",
help="Enumerate DBMS users")
enumeration.add_option("--passwords", dest="getPasswordHashes",
action="store_true",
help="Enumerate DBMS users password hashes")
enumeration.add_option("--privileges", dest="getPrivileges",
action="store_true",
help="Enumerate DBMS users privileges")
enumeration.add_option("--roles", dest="getRoles",
action="store_true",
help="Enumerate DBMS users roles")
enumeration.add_option("--dbs", dest="getDbs", action="store_true",
help="Enumerate DBMS databases")
enumeration.add_option("--tables", dest="getTables", action="store_true",
help="Enumerate DBMS database tables")
enumeration.add_option("--columns", dest="getColumns", action="store_true",
help="Enumerate DBMS database table columns")
enumeration.add_option("--schema", dest="getSchema", action="store_true",
help="Enumerate DBMS schema")
enumeration.add_option("--count", dest="getCount", action="store_true",
help="Retrieve number of entries for table(s)")
enumeration.add_option("--dump", dest="dumpTable", action="store_true",
help="Dump DBMS database table entries")
enumeration.add_option("--dump-all", dest="dumpAll", action="store_true",
help="Dump all DBMS databases tables entries")
enumeration.add_option("--search", dest="search", action="store_true",
help="Search column(s), table(s) and/or database name(s)")
enumeration.add_option("--comments", dest="getComments", action="store_true",
help="Retrieve DBMS comments")
enumeration.add_option("-D", dest="db",
help="DBMS database to enumerate")
enumeration.add_option("-T", dest="tbl",
help="DBMS database table(s) to enumerate")
enumeration.add_option("-C", dest="col",
help="DBMS database table column(s) to enumerate")
enumeration.add_option("-X", dest="excludeCol",
help="DBMS database table column(s) to not enumerate")
enumeration.add_option("-U", dest="user",
help="DBMS user to enumerate")
enumeration.add_option("--exclude-sysdbs", dest="excludeSysDbs",
action="store_true",
help="Exclude DBMS system databases when "
"enumerating tables")
enumeration.add_option("--pivot-column", dest="pivotColumn",
help="Pivot column name")
enumeration.add_option("--where", dest="dumpWhere",
help="Use WHERE condition while table dumping")
enumeration.add_option("--start", dest="limitStart", type="int",
help="First dump table entry to retrieve")
enumeration.add_option("--stop", dest="limitStop", type="int",
help="Last dump table entry to retrieve")
enumeration.add_option("--first", dest="firstChar", type="int",
help="First query output word character to retrieve")
enumeration.add_option("--last", dest="lastChar", type="int",
help="Last query output word character to retrieve")
enumeration.add_option("--sql-query", dest="query",
help="SQL statement to be executed")
enumeration.add_option("--sql-shell", dest="sqlShell",
action="store_true",
help="Prompt for an interactive SQL shell")
enumeration.add_option("--sql-file", dest="sqlFile",
help="Execute SQL statements from given file(s)")
# Brute force options
brute = OptionGroup(parser, "Brute force", "These "
"options can be used to run brute force "
"checks")
brute.add_option("--common-tables", dest="commonTables", action="store_true",
help="Check existence of common tables")
brute.add_option("--common-columns", dest="commonColumns", action="store_true",
help="Check existence of common columns")
# User-defined function options
udf = OptionGroup(parser, "User-defined function injection", "These "
"options can be used to create custom user-defined "
"functions")
udf.add_option("--udf-inject", dest="udfInject", action="store_true",
help="Inject custom user-defined functions")
udf.add_option("--shared-lib", dest="shLib",
help="Local path of the shared library")
# File system options
filesystem = OptionGroup(parser, "File system access", "These options "
"can be used to access the back-end database "
"management system underlying file system")
filesystem.add_option("--file-read", dest="rFile",
help="Read a file from the back-end DBMS "
"file system")
filesystem.add_option("--file-write", dest="wFile",
help="Write a local file on the back-end "
"DBMS file system")
filesystem.add_option("--file-dest", dest="dFile",
help="Back-end DBMS absolute filepath to "
"write to")
# Takeover options
takeover = OptionGroup(parser, "Operating system access", "These "
"options can be used to access the back-end "
"database management system underlying "
"operating system")
takeover.add_option("--os-cmd", dest="osCmd",
help="Execute an operating system command")
takeover.add_option("--os-shell", dest="osShell",
action="store_true",
help="Prompt for an interactive operating "
"system shell")
takeover.add_option("--os-pwn", dest="osPwn",
action="store_true",
help="Prompt for an OOB shell, "
"Meterpreter or VNC")
takeover.add_option("--os-smbrelay", dest="osSmb",
action="store_true",
help="One click prompt for an OOB shell, "
"Meterpreter or VNC")
takeover.add_option("--os-bof", dest="osBof",
action="store_true",
help="Stored procedure buffer overflow "
"exploitation")
takeover.add_option("--priv-esc", dest="privEsc",
action="store_true",
help="Database process user privilege escalation")
takeover.add_option("--msf-path", dest="msfPath",
help="Local path where Metasploit Framework "
"is installed")
takeover.add_option("--tmp-path", dest="tmpPath",
help="Remote absolute path of temporary files "
"directory")
# Windows registry options
windows = OptionGroup(parser, "Windows registry access", "These "
"options can be used to access the back-end "
"database management system Windows "
"registry")
windows.add_option("--reg-read", dest="regRead",
action="store_true",
help="Read a Windows registry key value")
windows.add_option("--reg-add", dest="regAdd",
action="store_true",
help="Write a Windows registry key value data")
windows.add_option("--reg-del", dest="regDel",
action="store_true",
help="Delete a Windows registry key value")
windows.add_option("--reg-key", dest="regKey",
help="Windows registry key")
windows.add_option("--reg-value", dest="regVal",
help="Windows registry key value")
windows.add_option("--reg-data", dest="regData",
help="Windows registry key value data")
windows.add_option("--reg-type", dest="regType",
help="Windows registry key value type")
# General options
general = OptionGroup(parser, "General", "These options can be used "
"to set some general working parameters")
general.add_option("-s", dest="sessionFile",
help="Load session from a stored (.sqlite) file")
general.add_option("-t", dest="trafficFile",
help="Log all HTTP traffic into a "
"textual file")
general.add_option("--batch", dest="batch",
action="store_true",
help="Never ask for user input, use the default behaviour")
general.add_option("--binary-fields", dest="binaryFields",
help="Result fields having binary values (e.g. \"digest\")")
general.add_option("--charset", dest="charset",
help="Force character encoding used for data retrieval")
general.add_option("--check-internet", dest="checkInternet",
action="store_true",
help="Check Internet connection before assessing the target")
general.add_option("--crawl", dest="crawlDepth", type="int",
help="Crawl the website starting from the target URL")
general.add_option("--crawl-exclude", dest="crawlExclude",
help="Regexp to exclude pages from crawling (e.g. \"logout\")")
general.add_option("--csv-del", dest="csvDel",
help="Delimiting character used in CSV output "
"(default \"%s\")" % defaults.csvDel)
general.add_option("--dump-format", dest="dumpFormat",
help="Format of dumped data (CSV (default), HTML or SQLITE)")
general.add_option("--eta", dest="eta",
action="store_true",
help="Display for each output the estimated time of arrival")
general.add_option("--flush-session", dest="flushSession",
action="store_true",
help="Flush session files for current target")
general.add_option("--forms", dest="forms",
action="store_true",
help="Parse and test forms on target URL")
general.add_option("--fresh-queries", dest="freshQueries",
action="store_true",
help="Ignore query results stored in session file")
general.add_option("--har", dest="harFile",
help="Log all HTTP traffic into a HAR file")
general.add_option("--hex", dest="hexConvert",
action="store_true",
help="Use DBMS hex function(s) for data retrieval")
general.add_option("--output-dir", dest="outputDir",
action="store",
help="Custom output directory path")
general.add_option("--parse-errors", dest="parseErrors",
action="store_true",
help="Parse and display DBMS error messages from responses")
general.add_option("--save", dest="saveConfig",
help="Save options to a configuration INI file")
general.add_option("--scope", dest="scope",
help="Regexp to filter targets from provided proxy log")
general.add_option("--test-filter", dest="testFilter",
help="Select tests by payloads and/or titles (e.g. ROW)")
general.add_option("--test-skip", dest="testSkip",
help="Skip tests by payloads and/or titles (e.g. BENCHMARK)")
general.add_option("--update", dest="updateAll",
action="store_true",
help="Update sqlmap")
# Miscellaneous options
miscellaneous = OptionGroup(parser, "Miscellaneous")
miscellaneous.add_option("-z", dest="mnemonics",
help="Use short mnemonics (e.g. \"flu,bat,ban,tec=EU\")")
miscellaneous.add_option("--alert", dest="alert",
help="Run host OS command(s) when SQL injection is found")
miscellaneous.add_option("--answers", dest="answers",
help="Set question answers (e.g. \"quit=N,follow=N\")")
miscellaneous.add_option("--beep", dest="beep", action="store_true",
help="Beep on question and/or when SQL injection is found")
miscellaneous.add_option("--cleanup", dest="cleanup",
action="store_true",
help="Clean up the DBMS from sqlmap specific "
"UDF and tables")
miscellaneous.add_option("--dependencies", dest="dependencies",
action="store_true",
help="Check for missing (non-core) sqlmap dependencies")
miscellaneous.add_option("--disable-coloring", dest="disableColoring",
action="store_true",
help="Disable console output coloring")
miscellaneous.add_option("--gpage", dest="googlePage", type="int",
help="Use Google dork results from specified page number")
miscellaneous.add_option("--identify-waf", dest="identifyWaf",
action="store_true",
help="Make a thorough testing for a WAF/IPS/IDS protection")
miscellaneous.add_option("--mobile", dest="mobile",
action="store_true",
help="Imitate smartphone through HTTP User-Agent header")
miscellaneous.add_option("--offline", dest="offline",
action="store_true",
help="Work in offline mode (only use session data)")
miscellaneous.add_option("--purge-output", dest="purgeOutput",
action="store_true",
help="Safely remove all content from output directory")
miscellaneous.add_option("--skip-waf", dest="skipWaf",
action="store_true",
help="Skip heuristic detection of WAF/IPS/IDS protection")
miscellaneous.add_option("--smart", dest="smart",
action="store_true",
help="Conduct thorough tests only if positive heuristic(s)")
miscellaneous.add_option("--sqlmap-shell", dest="sqlmapShell", action="store_true",
help="Prompt for an interactive sqlmap shell")
miscellaneous.add_option("--tmp-dir", dest="tmpDir",
help="Local directory for storing temporary files")
miscellaneous.add_option("--web-root", dest="webRoot",
help="Web server document root directory (e.g. \"/var/www\")")
miscellaneous.add_option("--wizard", dest="wizard",
action="store_true",
help="Simple wizard interface for beginner users")
# Hidden and/or experimental options
parser.add_option("--dummy", dest="dummy", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--murphy-rate", dest="murphyRate", type="int",
help=SUPPRESS_HELP)
parser.add_option("--disable-precon", dest="disablePrecon", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--disable-stats", dest="disableStats", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--profile", dest="profile", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--force-dns", dest="forceDns", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--force-threads", dest="forceThreads", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--smoke-test", dest="smokeTest", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--live-test", dest="liveTest", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--stop-fail", dest="stopFail", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--run-case", dest="runCase", help=SUPPRESS_HELP)
# API options
parser.add_option("--api", dest="api", action="store_true",
help=SUPPRESS_HELP)
parser.add_option("--taskid", dest="taskid", help=SUPPRESS_HELP)
parser.add_option("--database", dest="database", help=SUPPRESS_HELP)
parser.add_option_group(target)
parser.add_option_group(request)
parser.add_option_group(optimization)
parser.add_option_group(injection)
parser.add_option_group(detection)
parser.add_option_group(techniques)
parser.add_option_group(fingerprint)
parser.add_option_group(enumeration)
parser.add_option_group(brute)
parser.add_option_group(udf)
parser.add_option_group(filesystem)
parser.add_option_group(takeover)
parser.add_option_group(windows)
parser.add_option_group(general)
parser.add_option_group(miscellaneous)
# Dirty hack to display longer options without breaking into two lines
def _(self, *args):
retVal = parser.formatter._format_option_strings(*args)
if len(retVal) > MAX_HELP_OPTION_LENGTH:
retVal = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - parser.formatter.indent_increment)) % retVal
return retVal
parser.formatter._format_option_strings = parser.formatter.format_option_strings
parser.formatter.format_option_strings = type(parser.formatter.format_option_strings)(_, parser, type(parser))
# Dirty hack for making a short option '-hh'
option = parser.get_option("--hh")
option._short_opts = ["-hh"]
option._long_opts = []
# Dirty hack for inherent help message of switch '-h'
option = parser.get_option("-h")
option.help = option.help.capitalize().replace("this help", "basic help")
_ = []
prompt = False
advancedHelp = True
extraHeaders = []
# Reference: https://stackoverflow.com/a/4012683 (Note: previously used "...sys.getfilesystemencoding() or UNICODE_ENCODING")
for arg in argv:
_.append(getUnicode(arg, encoding=sys.stdin.encoding))
argv = _
checkDeprecatedOptions(argv)
prompt = "--sqlmap-shell" in argv
if prompt:
parser.usage = ""
cmdLineOptions.sqlmapShell = True
_ = ["x", "q", "exit", "quit", "clear"]
for option in parser.option_list:
_.extend(option._long_opts)
_.extend(option._short_opts)
for group in parser.option_groups:
for option in group.option_list:
_.extend(option._long_opts)
_.extend(option._short_opts)
autoCompletion(AUTOCOMPLETE_TYPE.SQLMAP, commands=_)
while True:
command = None
try:
command = raw_input("sqlmap-shell> ").strip()
command = getUnicode(command, encoding=sys.stdin.encoding)
except (KeyboardInterrupt, EOFError):
print
raise SqlmapShellQuitException
if not command:
continue
elif command.lower() == "clear":
clearHistory()
dataToStdout("[i] history cleared\n")
saveHistory(AUTOCOMPLETE_TYPE.SQLMAP)
elif command.lower() in ("x", "q", "exit", "quit"):
raise SqlmapShellQuitException
elif command[0] != '-':
dataToStdout("[!] invalid option(s) provided\n")
dataToStdout("[i] proper example: '-u http://www.site.com/vuln.php?id=1 --banner'\n")
else:
saveHistory(AUTOCOMPLETE_TYPE.SQLMAP)
loadHistory(AUTOCOMPLETE_TYPE.SQLMAP)
break
try:
for arg in shlex.split(command):
argv.append(getUnicode(arg, encoding=sys.stdin.encoding))
except ValueError, ex:
raise SqlmapSyntaxException, "something went wrong during command line parsing ('%s')" % ex.message
for i in xrange(len(argv)):
if argv[i] == "-hh":
argv[i] = "-h"
elif len(argv[i]) > 1 and all(ord(_) in xrange(0x2018, 0x2020) for _ in ((argv[i].split('=', 1)[-1].strip() or ' ')[0], argv[i][-1])):
dataToStdout("[!] copy-pasting illegal (non-console) quote characters from Internet is, well, illegal (%s)\n" % argv[i])
raise SystemExit
elif len(argv[i]) > 1 and u"\uff0c" in argv[i].split('=', 1)[-1]:
dataToStdout("[!] copy-pasting illegal (non-console) comma characters from Internet is, well, illegal (%s)\n" % argv[i])
raise SystemExit
elif re.search(r"\A-\w=.+", argv[i]):
dataToStdout("[!] potentially miswritten (illegal '=') short option detected ('%s')\n" % argv[i])
raise SystemExit
elif argv[i] == "-H":
if i + 1 < len(argv):
extraHeaders.append(argv[i + 1])
elif re.match(r"\A\d+!\Z", argv[i]) and argv[max(0, i - 1)] == "--threads" or re.match(r"\A--threads.+\d+!\Z", argv[i]):
argv[i] = argv[i][:-1]
conf.skipThreadCheck = True
elif argv[i] == "--version":
print VERSION_STRING.split('/')[-1]
raise SystemExit
elif argv[i] in ("-h", "--help"):
advancedHelp = False
for group in parser.option_groups[:]:
found = False
for option in group.option_list:
if option.dest not in BASIC_HELP_ITEMS:
option.help = SUPPRESS_HELP
else:
found = True
if not found:
parser.option_groups.remove(group)
for verbosity in (_ for _ in argv if re.search(r"\A\-v+\Z", _)):
try:
if argv.index(verbosity) == len(argv) - 1 or not argv[argv.index(verbosity) + 1].isdigit():
conf.verbose = verbosity.count('v') + 1
del argv[argv.index(verbosity)]
except (IndexError, ValueError):
pass
try:
(args, _) = parser.parse_args(argv)
except UnicodeEncodeError, ex:
dataToStdout("\n[!] %s\n" % ex.object.encode("unicode-escape"))
raise SystemExit
except SystemExit:
if "-h" in argv and not advancedHelp:
dataToStdout("\n[!] to see full list of options run with '-hh'\n")
raise
if extraHeaders:
if not args.headers:
args.headers = ""
delimiter = "\\n" if "\\n" in args.headers else "\n"
args.headers += delimiter + delimiter.join(extraHeaders)
# Expand given mnemonic options (e.g. -z "ign,flu,bat")
for i in xrange(len(argv) - 1):
if argv[i] == "-z":
expandMnemonics(argv[i + 1], parser, args)
if args.dummy:
args.url = args.url or DUMMY_URL
if not any((args.direct, args.url, args.logFile, args.bulkFile, args.googleDork, args.configFile, \
args.requestFile, args.updateAll, args.smokeTest, args.liveTest, args.wizard, args.dependencies, \
args.purgeOutput, args.sitemapUrl)):
errMsg = "missing a mandatory option (-d, -u, -l, -m, -r, -g, -c, -x, --wizard, --update, --purge-output or --dependencies), "
errMsg += "use -h for basic or -hh for advanced help\n"
parser.error(errMsg)
return args
except (OptionError, TypeError), e:
parser.error(e)
except SystemExit:
# Protection against Windows dummy double clicking
if IS_WIN:
dataToStdout("\nPress Enter to continue...")
raw_input()
raise
debugMsg = "parsing command line"
logger.debug(debugMsg)
|
zhinaonet/sqlmap-z
|
lib/parse/cmdline.py
|
Python
|
gpl-3.0
| 45,290
|
[
"VisIt"
] |
9fed3d5e70d19a48754cf5517fb8f281f51f2d9477ac2f42cc233e57f267b357
|
#!/usr/bin/env python
import vtk
import numpy as np
from vmtk import vmtkscripts
import argparse
import copy
# creates lines normal to surface for evaluation in the probe image with surface
def warp_surface(args):
print("warp the surface ")
reader = vmtkscripts.vmtkSurfaceReader()
reader.InputFileName = args.surface
reader.Execute()
Surface = reader.Surface
narrays = Surface.GetPointData().GetNumberOfArrays()
has_normals = False
for i in range(narrays):
if ( Surface.GetPointData().GetArrayName(i) == "Normals"):
has_normals = True
break
if(has_normals):
normals = Surface
print("already have")
else:
get_normals = vtk.vtkPolyDataNormals()
get_normals.SetInputData(Surface)
get_normals.SetFeatureAngle(30.0) # default
get_normals.SetSplitting(True)
get_normals.Update()
get_normals.GetOutput().GetPointData().SetActiveVectors("Normals")
normals = get_normals.GetOutput()
print("normals generated")
random = vtk.vtkRandomAttributeGenerator()
random.SetInputData(normals)
random.SetDataTypeToDouble()
random.GeneratePointScalarsOn ()
random.SetComponentRange(-0.5, 0.5)
random.Update()
#n = random.GetOutput().GetPointData().GetNumberOfArrays()
#for i in range(n):
#print(random.GetOutput().GetPointData().GetArrayName(i))
calc = vtk.vtkArrayCalculator()
calc.SetInputConnection(random.GetOutputPort())
calc.AddScalarArrayName("RandomPointScalars", 0)
calc.AddVectorArrayName("Normals", 0, 1, 2)
calc.SetFunction("Normals * RandomPointScalars")
calc.SetResultArrayName("RandomLengthNormalVectors")
calc.Update()
warp = vtk.vtkWarpVector()
warp.SetInputConnection(calc.GetOutputPort())
warp.SetInputArrayToProcess(0, 0, 0,
vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS,
"RandomLengthNormalVectors");
warp.SetScaleFactor(args.fuzz_scale)
warp.Update()
writer = vmtkscripts.vmtkSurfaceWriter()
writer.OutputFileName = args.file_out
writer.Input = warp.GetOutput()
writer.Execute()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='estimate vertices for uniform point distribution')
parser.add_argument("-i", dest="surface", required=True, help="input surface file", metavar="FILE")
parser.add_argument("-o", dest="file_out", required=True, help="output surface file", metavar="FILE")
parser.add_argument("-s", '--scale', dest="fuzz_scale", type=float, help='how much to fuzz surface ', default=0.08)
args = parser.parse_args()
#print(args)
warp_surface(args)
|
kayarre/Tools
|
vmtk/fuzzypsurface.py
|
Python
|
bsd-2-clause
| 2,767
|
[
"VTK"
] |
6c406b6dfae85a106e44bbd1b02d0cba7fe1168899caa77b4bff8cce5028b4c0
|
#
# The Python Imaging Library.
# $Id$
#
# the Image class wrapper
#
# partial release history:
# 1995-09-09 fl Created
# 1996-03-11 fl PIL release 0.0 (proof of concept)
# 1996-04-30 fl PIL release 0.1b1
# 1999-07-28 fl PIL release 1.0 final
# 2000-06-07 fl PIL release 1.1
# 2000-10-20 fl PIL release 1.1.1
# 2001-05-07 fl PIL release 1.1.2
# 2002-03-15 fl PIL release 1.1.3
# 2003-05-10 fl PIL release 1.1.4
# 2005-03-28 fl PIL release 1.1.5
# 2006-12-02 fl PIL release 1.1.6
# 2009-11-15 fl PIL release 1.1.7
#
# Copyright (c) 1997-2009 by Secret Labs AB. All rights reserved.
# Copyright (c) 1995-2009 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from PIL import VERSION, PILLOW_VERSION, _plugins
import logging
import warnings
import math
logger = logging.getLogger(__name__)
class DecompressionBombWarning(RuntimeWarning):
pass
class _imaging_not_installed(object):
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
# Limit to around a quarter gigabyte for a 24 bit (3 bpp) image
MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 / 4 / 3)
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk (use
# __import__ to hide this from naive packagers; we don't really
# depend on Tk unless ImageTk is used, and that module already
# imports Tkinter)
__import__("FixTk")
except ImportError:
pass
try:
# If the _imaging C module is not present, Pillow will not load.
# Note that other modules should not refer to _imaging directly;
# import Image and use the Image.core variable instead.
# Also note that Image.core is not a publicly documented interface,
# and should be considered private and subject to change.
from PIL import _imaging as core
if PILLOW_VERSION != getattr(core, 'PILLOW_VERSION', None):
raise ImportError("The _imaging extension was built for another "
" version of Pillow or PIL")
except ImportError as v:
core = _imaging_not_installed()
# Explanations for ways that we know we might have an import error
if str(v).startswith("Module use of python"):
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python.",
RuntimeWarning
)
elif str(v).startswith("The _imaging extension"):
warnings.warn(str(v), RuntimeWarning)
elif "Symbol not found: _PyUnicodeUCS2_" in str(v):
# should match _PyUnicodeUCS2_FromString and
# _PyUnicodeUCS2_AsLatin1String
warnings.warn(
"The _imaging extension was built for Python with UCS2 support; "
"recompile Pillow or build Python --without-wide-unicode. ",
RuntimeWarning
)
elif "Symbol not found: _PyUnicodeUCS4_" in str(v):
# should match _PyUnicodeUCS4_FromString and
# _PyUnicodeUCS4_AsLatin1String
warnings.warn(
"The _imaging extension was built for Python with UCS4 support; "
"recompile Pillow or build Python --with-wide-unicode. ",
RuntimeWarning
)
# Fail here anyway. Don't let people run with a mostly broken Pillow.
# see docs/porting.rst
raise
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
from PIL import ImageMode
from PIL._binary import i8
from PIL._util import isPath
from PIL._util import isStringType
from PIL._util import deferred_error
import os
import sys
import io
import struct
# type stuff
import collections
import numbers
# works everywhere, win for pypy, not cpython
USE_CFFI_ACCESS = hasattr(sys, 'pypy_version_info')
try:
import cffi
HAS_CFFI = True
except ImportError:
HAS_CFFI = False
def isImageType(t):
"""
Checks if an object is an image object.
.. warning::
This function is for internal use only.
:param t: object to check if it's an image
:returns: True if the object is an image
"""
return hasattr(t, "im")
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NEAREST = NONE = 0
BOX = 4
BILINEAR = LINEAR = 2
HAMMING = 5
BICUBIC = CUBIC = 3
LANCZOS = ANTIALIAS = 1
# dithers
NEAREST = NONE = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
LIBIMAGEQUANT = 3
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
if hasattr(core, 'DEFAULT_STRATEGY'):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
FILTERED = core.FILTERED
HUFFMAN_ONLY = core.HUFFMAN_ONLY
RLE = core.RLE
FIXED = core.FIXED
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
SAVE_ALL = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
"HSV": ("RGB", "L", ("H", "S", "V")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # Bits need to be extended to bytes
"L": ('|u1', None),
"LA": ('|u1', 2),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
"HSV": ('|u1', 3),
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
shape = im.size[1], im.size[0]
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return shape, typ
else:
return shape+(extra,), typ
MODES = sorted(_MODEINFO.keys())
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
def preinit():
"Explicitly load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
from PIL import BmpImagePlugin
except ImportError:
pass
try:
from PIL import GifImagePlugin
except ImportError:
pass
try:
from PIL import JpegImagePlugin
except ImportError:
pass
try:
from PIL import PpmImagePlugin
except ImportError:
pass
try:
from PIL import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
def init():
"""
Explicitly initializes the Python Imaging Library. This function
loads all available file format drivers.
"""
global _initialized
if _initialized >= 2:
return 0
for plugin in _plugins:
try:
logger.debug("Importing %s", plugin)
__import__("PIL.%s" % plugin, globals(), locals(), [])
except ImportError as e:
logger.debug("Image: failed to import %s: %s", plugin, e)
if OPEN or SAVE:
_initialized = 2
return 1
# --------------------------------------------------------------------
# Codec factories (used by tobytes/frombytes and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print(decoder, mode, args + extra)
return decoder(mode, *args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print(encoder, mode, args + extra)
return encoder(mode, *args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
def coerce_e(value):
return value if isinstance(value, _E) else _E(value)
class _E(object):
def __init__(self, data):
self.data = data
def __add__(self, other):
return _E((self.data, "__add__", coerce_e(other).data))
def __mul__(self, other):
return _E((self.data, "__mul__", coerce_e(other).data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number)):
return c, 0.0
if a is stub and b == "__add__" and isinstance(c, numbers.Number):
return 1.0, c
except TypeError:
pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number) and
d == "__add__" and isinstance(e, numbers.Number)):
return c, e
except TypeError:
pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
class Image(object):
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
self.pyaccess = None
@property
def width(self):
return self.size[0]
@property
def height(self):
return self.size[1]
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
if self.palette:
new.palette = self.palette.copy()
if im.mode == "P" and not new.palette:
from PIL import ImagePalette
new.palette = ImagePalette.ImagePalette()
new.info = self.info.copy()
return new
_makeself = _new # compatibility
# Context Manager Support
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def close(self):
"""
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is only required to close images that have not
had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method.
"""
try:
self.fp.close()
except Exception as msg:
logger.debug("Error closing: %s", msg)
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = deferred_error(ValueError("Operation on closed image"))
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile
suffix = ''
if format:
suffix = '.'+format
if not file:
f, file = tempfile.mkstemp(suffix)
os.close(f)
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
if not file.endswith(format):
file = file + "." + format
self.save(file, format)
return file
def __eq__(self, other):
return (self.__class__.__name__ == other.__class__.__name__ and
self.mode == other.mode and
self.size == other.size and
self.info == other.info and
self.category == other.category and
self.readonly == other.readonly and
self.getpalette() == other.getpalette() and
self.tobytes() == other.tobytes())
def __ne__(self, other):
eq = (self == other)
return not eq
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__, self.__class__.__name__,
self.mode, self.size[0], self.size[1],
id(self)
)
def _repr_png_(self):
""" iPython display hook support
:returns: png version of the image as bytes
"""
from io import BytesIO
b = BytesIO()
self.save(b, 'PNG')
return b.getvalue()
@property
def __array_interface__(self):
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['version'] = 3
if self.mode == '1':
# Binary images need to be extended from bits to bytes
# See: https://github.com/python-pillow/Pillow/issues/350
new['data'] = self.tobytes('raw', 'L')
else:
new['data'] = self.tobytes()
return new
def __getstate__(self):
return [
self.info,
self.mode,
self.size,
self.getpalette(),
self.tobytes()]
def __setstate__(self, state):
Image.__init__(self)
self.tile = []
info, mode, size, palette, data = state
self.info = info
self.mode = mode
self.size = size
self.im = core.new(mode, size)
if mode in ("L", "P") and palette:
self.putpalette(palette)
self.frombytes(data)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object.
.. warning::
This method returns the raw image data from the internal
storage. For compressed image data (e.g. PNG, JPEG) use
:meth:`~.save`, with a BytesIO parameter for in-memory
data.
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
:param args: Extra arguments to the encoder.
:rtype: A bytes object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while True:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tobytes" % s)
return b"".join(data)
def tostring(self, *args, **kw):
raise NotImplementedError("tostring() has been removed. " +
"Please call tobytes() instead.")
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tobytes("xbm")
return b"".join([
("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'),
("#define %s_height %d\n" % (name, self.size[1])).encode('ascii'),
("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};"
])
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
def fromstring(self, *args, **kw):
raise NotImplementedError("fromstring() has been removed. " +
"Please call frombytes() instead.")
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time. This method will close the file
associated with the image.
:returns: An image access object.
:rtype: :ref:`PixelAccess` or :py:class:`PIL.PyAccess`
"""
if self.im and self.palette and self.palette.dirty:
# realize palette
self.im.putpalette(*self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if "transparency" in self.info:
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
if self.im:
if HAS_CFFI and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from PIL import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(self, mode=None, matrix=None, dither=None,
palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode. See: :ref:`concept-modes`.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 12-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
return self._new(im)
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if "transparency" in self.info and \
self.info['transparency'] is not None:
if self.mode in ('L', 'RGB') and mode == 'RGBA':
# Use transparent conversion to promote from transparent
# color to an alpha channel.
return self._new(self.im.convert_transparent(
mode, self.info['transparency']))
elif self.mode in ('L', 'RGB', 'P') and mode in ('L', 'RGB', 'P'):
t = self.info['transparency']
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn('Palette images with Transparency ' +
' expressed in bytes should be converted ' +
'to RGBA images')
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1, 1)))
if self.mode == 'P':
trns_im.putpalette(self.palette)
if type(t) == tuple:
try:
t = trns_im.palette.getcolor(t)
except:
raise ValueError("Couldn't allocate a palette " +
"color for transparency")
trns_im.putpixel((0, 0), t)
if mode in ('L', 'RGB'):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert('RGB')
trns = trns_im.getpixel((0, 0))
elif self.mode == 'P' and mode == 'RGBA':
t = self.info['transparency']
delete_trns = True
if isinstance(t, bytes):
self.im.putpalettealphas(t)
elif isinstance(t, int):
self.im.putpalettealpha(t, 0)
else:
raise ValueError("Transparency for P mode should" +
" be bytes or int")
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from PIL import ImagePalette
new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del(new.info['transparency'])
if trns is not None:
try:
new.info['transparency'] = new.palette.getcolor(trns)
except:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del(new.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
new_im = self._new(im)
if delete_trns:
# crash fail if we leave a bytes transparency in an rgb/l mode.
del(new_im.info['transparency'])
if trns is not None:
if new_im.mode == 'P':
try:
new_im.info['transparency'] = new_im.palette.getcolor(trns)
except:
del(new_im.info['transparency'])
warnings.warn("Couldn't allocate palette entry " +
"for transparency")
else:
new_im.info['transparency'] = trns
return new_im
def quantize(self, colors=256, method=None, kmeans=0, palette=None):
"""
Convert the image to 'P' mode with the specified number
of colors.
:param colors: The desired number of colors, <= 256
:param method: 0 = median cut
1 = maximum coverage
2 = fast octree
3 = libimagequant
:param kmeans: Integer
:param palette: Quantize to the :py:class:`PIL.ImagingPalette` palette.
:returns: A new image
"""
self.load()
if method is None:
# defaults:
method = 0
if self.mode == 'RGBA':
method = 2
if self.mode == 'RGBA' and method not in (2, 3):
# Caller specified an invalid mode.
raise ValueError(
'Fast Octree (method == 2) and libimagequant (method == 3) ' +
'are the only valid methods for quantizing RGBA images')
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
return self._new(self.im.quantize(colors, method, kmeans))
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
return self._new(self.im.copy())
__copy__ = copy
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
Note: Prior to Pillow 3.4.0, this was a lazy operation.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if box is None:
return self.copy()
x0, y0, x1, y1 = map(int, map(round, box))
if x1 < x0:
x1 = x0
if y1 < y0:
y1 = y0
return self._new(self.im.crop(( x0, y0, x1, y1)))
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object. """
self.load()
if isinstance(filter, collections.Callable):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError("filter argument should be ImageFilter.Filter " +
"instance or class")
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, **getbands** on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. If the image
is completely empty, this method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band=None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use **list(im.getdata())**.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self):
"""
Returns the image palette as a list.
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
if bytes is str:
return [i8(c) for c in self.im.getpalette()]
else:
return list(self.im.getpalette())
except ValueError:
return None # no palette
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y).
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return [i8(c) for c in x], [i8(c) for c in y]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def offset(self, xoffset, yoffset=None):
raise NotImplementedError("offset() has been removed. " +
"Please call ImageChops.offset() instead.")
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). If a 4-tuple is given, the size of the pasted image
must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L" or "RGBA"
images (in the latter case, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values will mix the two images together, including their alpha
channels if they have them.
See :py:meth:`~PIL.Image.Image.alpha_composite` if you want to
combine images with respect to their alpha channels.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box
box = None
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# upper left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError(
"cannot determine region size; use 4-item box"
)
box = box + (box[0]+size[0], box[1]+size[1])
if isStringType(im):
from PIL import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65336 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self.load()
if self.readonly:
self._copy()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
self.pyaccess = None
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self.load()
if self.readonly:
self._copy()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P" or
"L" image, and the palette sequence must contain 768 integer
values, where each group of three values represent the red,
green, and blue values for the corresponding pixel
index. Instead of an integer sequence, you can use an 8-bit
string.
:param data: A palette sequence (either a list or a string).
"""
from PIL import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
self.load()
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
if bytes is str:
data = "".join(chr(x) for x in data)
else:
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y).
:param value: The pixel value.
"""
self.load()
if self.readonly:
self._copy()
self.pyaccess = None
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy, value)
return self.im.putpixel(xy, value)
def resize(self, size, resample=NEAREST):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BOX`,
:py:attr:`PIL.Image.BILINEAR`, :py:attr:`PIL.Image.HAMMING`,
:py:attr:`PIL.Image.BICUBIC` or :py:attr:`PIL.Image.LANCZOS`.
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
See: :ref:`concept-filters`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample not in (
NEAREST, BILINEAR, BICUBIC, LANCZOS, BOX, HAMMING,
):
raise ValueError("unknown resampling filter")
self.load()
size = tuple(size)
if self.size == size:
return self._new(self.im)
if self.mode in ("1", "P"):
resample = NEAREST
if self.mode == 'LA':
return self.convert('La').resize(size, resample).convert('LA')
if self.mode == 'RGBA':
return self.convert('RGBa').resize(size, resample).convert('RGBA')
return self._new(self.im.resize(size, resample))
def rotate(self, angle, resample=NEAREST, expand=0):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`. See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
# Fast paths regardless of filter
if angle == 0:
return self.copy()
if angle == 180:
return self.transpose(ROTATE_180)
if angle == 90 and expand:
return self.transpose(ROTATE_90)
if angle == 270 and expand:
return self.transpose(ROTATE_270)
angle = - math.radians(angle)
matrix = [
round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0,
round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0
]
def transform(x, y, matrix=matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
w, h = self.size
if expand:
# calculate output size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y)
xx.append(x)
yy.append(y)
w = int(math.ceil(max(xx)) - math.floor(min(xx)))
h = int(math.ceil(max(yy)) - math.floor(min(yy)))
# adjust center
x, y = transform(w / 2.0, h / 2.0)
matrix[2] = self.size[0] / 2.0 - x
matrix[5] = self.size[1] / 2.0 - y
return self.transform((w, h), AFFINE, matrix, resample)
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param options: Extra parameters to the image writer.
:returns: None
:exception KeyError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
filename = ""
open_fp = False
if isPath(fp):
filename = fp
open_fp = True
elif sys.version_info >= (3, 4):
from pathlib import Path
if isinstance(fp, Path):
filename = str(fp)
open_fp = True
elif hasattr(fp, "name") and isPath(fp.name):
# only set the name for metadata purposes
filename = fp.name
# may mutate self!
self.load()
save_all = False
if 'save_all' in params:
save_all = params.pop('save_all')
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
if ext not in EXTENSION:
init()
format = EXTENSION[ext]
if format.upper() not in SAVE:
init()
if save_all:
save_handler = SAVE_ALL[format.upper()]
else:
save_handler = SAVE[format.upper()]
if open_fp:
# Open also for reading ("+"), because TIFF save_all
# writer needs to go back and edit the written data.
fp = builtins.open(filename, "w+b")
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if open_fp:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None, command=None):
"""
Displays this image. This method is mainly intended for
debugging purposes.
On Unix platforms, this method saves the image to a temporary
PPM file, and calls either the **xv** utility or the **display**
utility, depending on which one can be found.
On macOS, this method saves the image to a temporary BMP file, and opens
it with the native Preview application.
On Windows, it saves the image to a temporary BMP file, and uses
the standard BMP display utility to show it (usually Paint).
:param title: Optional title to use for the image window,
where possible.
:param command: command used to show the image
"""
_show(self, title=title, command=command)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = []
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=BICUBIC):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0)
:returns: None
"""
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
im = self.resize(size, resample)
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
self.pyaccess = None
# FIXME: the different transform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode == 'LA':
return self.convert('La').transform(
size, method, data, resample, fill).convert('LA')
if self.mode == 'RGBA':
return self.convert('RGBa').transform(
size, method, data, resample, fill).convert('RGBA')
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
w = box[2] - box[0]
h = box[3] - box[1]
if method == AFFINE:
data = data[0:6]
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (xs, 0, x0, 0, ys, y0)
elif method == PERSPECTIVE:
data = data[0:8]
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]
sw = data[2:4]
se = data[4:6]
ne = data[6:8]
x0, y0 = nw
As = 1.0 / w
At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270` or
:py:attr:`PIL.Image.TRANSPOSE`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
return self._new(self.im.transpose(method))
def effect_spread(self, distance):
"""
Randomly spread pixels in an image.
:param distance: Distance to spread pixels.
"""
self.load()
return self._new(self.im.effect_spread(distance))
def toqimage(self):
"""Returns a QImage copy of this image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqimage(self)
def toqpixmap(self):
"""Returns a QPixmap copy of this image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.toqpixmap(self)
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler(object):
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler(object):
# used as a mixin by geometry transforms (for use with im.transform)
pass
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
def _check_size(size):
"""
Common check to enforce type and sanity check on size tuples
:param size: Should be a 2 tuple of (width, height)
:returns: True, or raises a ValueError
"""
if not isinstance(size, (list, tuple)):
raise ValueError("Size must be a tuple")
if len(size) != 2:
raise ValueError("Size must be a tuple of length 2")
if size[0] <= 0 or size[1] <= 0:
raise ValueError("Width and Height must be > 0")
return True
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
from PIL import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Decoder <file-decoders>`.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(data, decoder_name, args)
return im
def fromstring(*args, **kw):
raise NotImplementedError("fromstring() has been removed. " +
"Please call frombytes() instead.")
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.frombytes`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1, 1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If obj is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
See: :ref:`concept-modes`.
:returns: An image object.
.. versionadded:: 1.1.6
"""
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr['typestr']
mode, rawmode = _fromarray_typemap[typekey]
except KeyError:
# print typekey
raise TypeError("Cannot handle this data type")
else:
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax))
size = shape[1], shape[0]
if strides is not None:
if hasattr(obj, 'tobytes'):
obj = obj.tobytes()
else:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
def fromqimage(im):
"""Creates an image instance from a QImage image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqimage(im)
def fromqpixmap(im):
"""Creates an image instance from a QPixmap image"""
from PIL import ImageQt
if not ImageQt.qt_is_installed:
raise ImportError("Qt bindings are not installed")
return ImageQt.fromqpixmap(im)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
# ((1, 1), "|b1"): ("1", "1"), # broken
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<u2"): ("I", "I;16"),
((1, 1), ">u2"): ("I", "I;16B"),
((1, 1), "<i2"): ("I", "I;16S"),
((1, 1), ">i2"): ("I", "I;16BS"),
((1, 1), "<u4"): ("I", "I;32"),
((1, 1), ">u4"): ("I", "I;32B"),
((1, 1), "<i4"): ("I", "I;32S"),
((1, 1), ">i4"): ("I", "I;32BS"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 2), "|u1"): ("LA", "LA"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def _decompression_bomb_check(size):
if MAX_IMAGE_PIXELS is None:
return
pixels = size[0] * size[1]
if pixels > MAX_IMAGE_PIXELS:
warnings.warn(
"Image size (%d pixels) exceeds limit of %d pixels, "
"could be decompression bomb DOS attack." %
(pixels, MAX_IMAGE_PIXELS),
DecompressionBombWarning)
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but
the file remains open and the actual image data is not read from
the file until you try to process the data (or call the
:py:meth:`~PIL.Image.Image.load` method). See
:py:func:`~PIL.Image.new`.
:param fp: A filename (string), pathlib.Path object or a file object.
The file object must implement :py:meth:`~file.read`,
:py:meth:`~file.seek`, and :py:meth:`~file.tell` methods,
and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception IOError: If the file cannot be found, or the image cannot be
opened and identified.
"""
if mode != "r":
raise ValueError("bad mode %r" % mode)
filename = ""
if isPath(fp):
filename = fp
else:
try:
from pathlib import Path
if isinstance(fp, Path):
filename = str(fp.resolve())
except ImportError:
pass
if filename:
fp = builtins.open(filename, "rb")
try:
fp.seek(0)
except (AttributeError, io.UnsupportedOperation):
fp = io.BytesIO(fp.read())
prefix = fp.read(16)
preinit()
def _open_core(fp, filename, prefix):
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
im = factory(fp, filename)
_decompression_bomb_check(im.size)
return im
except (SyntaxError, IndexError, TypeError, struct.error):
# Leave disabled by default, spams the logs with image
# opening failures that are entirely expected.
# logger.debug("", exc_info=True)
continue
return None
im = _open_core(fp, filename, prefix)
if im is None:
if init():
im = _open_core(fp, filename, prefix)
if im:
return im
raise IOError("cannot identify image file %r"
% (filename if filename else fp))
#
# Image processing.
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image. Must have mode RGBA.
:param im2: The second image. Must have mode RGBA, and the same size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.alpha_composite(im1.im, im2.im))
def blend(im1, im2, alpha):
"""
Creates a new image by interpolating between two input images, using
a constant alpha.::
out = image1 * (1.0 - alpha) + image2 * alpha
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:param alpha: The interpolation alpha factor. If alpha is 0.0, a
copy of the first image is returned. If alpha is 1.0, a copy of
the second image is returned. There are no restrictions on the
alpha value. If necessary, the result is clipped to fit into
the allowed output range.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
def eval(image, *args):
"""
Applies the function (which should take one argument) to each pixel
in the given image. If the image has more than one band, the same
function is applied to each band. Note that the function is
evaluated once for each possible pixel value, so you cannot use
random components or other generators.
:param image: The input image.
:param function: A function object, taking one integer argument.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
return image.point(args[0])
def merge(mode, bands):
"""
Merge a set of single band images into a new multiband image.
:param mode: The mode to use for the output image. See:
:ref:`concept-modes`.
:param bands: A sequence containing one single-band image for
each band in the output image. All bands must have the
same size.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
def register_open(id, factory, accept=None):
"""
Register an image file plugin. This function should not be used
in application code.
:param id: An image format identifier.
:param factory: An image file factory method.
:param accept: An optional function that can be used to quickly
reject images having another format.
"""
id = id.upper()
ID.append(id)
OPEN[id] = factory, accept
def register_mime(id, mimetype):
"""
Registers an image MIME type. This function should not be used
in application code.
:param id: An image format identifier.
:param mimetype: The image MIME type for this format.
"""
MIME[id.upper()] = mimetype
def register_save(id, driver):
"""
Registers an image save function. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE[id.upper()] = driver
def register_save_all(id, driver):
"""
Registers an image function to save all the frames
of a multiframe format. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE_ALL[id.upper()] = driver
def register_extension(id, extension):
"""
Registers an image extension. This function should not be
used in application code.
:param id: An image format identifier.
:param extension: An extension used for this format.
"""
EXTENSION[extension.lower()] = id.upper()
# --------------------------------------------------------------------
# Simple display support. User code may override this.
def _show(image, **options):
# override me, as necessary
_showxv(image, **options)
def _showxv(image, title=None, **options):
from PIL import ImageShow
ImageShow.show(image, title, **options)
# --------------------------------------------------------------------
# Effects
def effect_mandelbrot(size, extent, quality):
"""
Generate a Mandelbrot set covering the given extent.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param extent: The extent to cover, as a 4-tuple:
(x0, y0, x1, y2).
:param quality: Quality.
"""
return Image()._new(core.effect_mandelbrot(size, extent, quality))
def effect_noise(size, sigma):
"""
Generate Gaussian noise centered around 128.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param sigma: Standard deviation of noise.
"""
return Image()._new(core.effect_noise(size, sigma))
|
sumedh123/debatify
|
venv/lib/python2.7/site-packages/PIL/Image.py
|
Python
|
mit
| 82,447
|
[
"Gaussian"
] |
795411b9479d434b97ea09ca0b26d789aa9bf718785efe37e19b1cae0e307ce4
|
#
# Copyright (C) 2011 by Brian Weck
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
#
import time
from . import log, reddit, queue
class Stylesheet:
#
CSSBOT_PAUSE = "/* --- cssbot action PAUSE --- */"
CSSBOT_BEGIN = "/* --- cssbot BEGIN --- */"
CSSBOT_END = "/* --- cssbot END --- */"
#
#
#
#
def __init__(self, subreddit, selector, rule):
self.log = log.getLogger("cssbot.style.Stylesheet")
#
self.subreddit = subreddit
self.selector = selector
self.rule = rule
#
self.reddit = reddit.APIWrapper()
self.queue = queue.Queue()
#
#
#
def merge_css(self, orig="", update=""):
_out = []
# look for the markers, do nothing if not found.
try:
orig.index(self.CSSBOT_BEGIN)
orig.find(self.CSSBOT_END)
except ValueError:
# return original string
self.log.warn("could not find start/end markers.")
return False
# parse the stylesheet
lines = orig.splitlines()
#
cursor = iter(lines)
# pre begin lines.
for line in cursor:
_out.append( line )
if line.startswith(self.CSSBOT_BEGIN):
break
# add marker/new css
_out.append(update)
_out.append("")
_out.append("/* last modified %s */" % int(time.time()))
# skip up to the end marker
for line in cursor:
if line.startswith(self.CSSBOT_END):
_out.append( line )
break
for line in cursor:
_out.append( line )
return "\n".join(_out)
#
# check if this css has the pause command in it.
#
def is_paused(self, css):
try:
if css:
css.index(self.CSSBOT_PAUSE)
return True
except ValueError:
pass
return False
#
#
#
def generate_and_save(self):
# login to reddit.
self.reddit.login()
# get the current css.
current_css = self.reddit.get_stylesheet(self.subreddit)
if self.is_paused(current_css):
self.log.warn("not updating css, paused.")
return False
self.log.info("current css:\n %s", current_css)
# get a list of matched thing names.
matched_names = []
matched_snippet = []
for thing in self.queue.find({"data.matched":"Y", "data.subreddit":self.subreddit}):
name = thing["data"]["name"]
matched_names.append(name)
matched_snippet.append( ".id-%s %s" % (name, self.selector) )
self.log.debug("the matched are %s", matched_names)
if matched_snippet:
generated_css = "%s %s" % ( (", ".join(matched_snippet)), self.rule )
else:
generated_css = ""
self.log.debug("generated:\n%s", generated_css)
# merge the new and current css.
merged_css = self.merge_css(current_css, generated_css)
# save the css.
if merged_css:
self.log.info("saving css:\n %s", merged_css)
self.reddit.save_stylesheet(self.subreddit, merged_css)
|
bweck/cssbot
|
cssbot/style.py
|
Python
|
mit
| 3,103
|
[
"Brian"
] |
5990f1a8931700d25451572af8bb6b7327e98ad08f464d0bebcc5af80c95bbde
|
#!/usr/bin/env python
########################################################################
# File : dirac-externals-requirements
# Author : Adri/Federico/Andrei
########################################################################
""" If RequiredExternals section is found in releases.cfg of any extension,
then some python packages to install with pip may be found. This script
will install the requested modules.
The command is called from the dirac-install general installation command.
"""
import os
import sys
import commands
from DIRAC.Core.Base import Script
Script.disableCS()
from DIRAC import gLogger, rootPath, S_OK
from DIRAC.Core.Utilities.CFG import CFG
__RCSID__ = "$Id$"
# Default installation type
instType = "server"
def setInstallType(val):
global instType
instType = val
return S_OK()
Script.registerSwitch("t:", "type=", "Installation type. 'server' by default.", setInstallType)
Script.parseCommandLine(ignoreErrors=True)
def pipInstall(package, switches=""):
# The right pip should be in the PATH, which is the case after sourcing the DIRAC bashrc
cmd = "pip install --trusted-host pypi.python.org %s %s" % (switches, package)
gLogger.notice("Executing %s" % cmd)
return commands.getstatusoutput(cmd)
# Collect all the requested python modules to install
reqDict = {}
for entry in os.listdir(rootPath):
if len(entry) < 5 or entry.find("DIRAC") != len(entry) - 5:
continue
reqFile = os.path.join(rootPath, entry, "releases.cfg")
try:
with open(reqFile, "r") as extfd:
reqCFG = CFG().loadFromBuffer(extfd.read())
except BaseException:
gLogger.verbose("%s not found" % reqFile)
continue
reqList = reqCFG.getOption("/RequiredExternals/%s" % instType.capitalize(), [])
if not reqList:
gLogger.verbose("%s does not have requirements for %s installation" % (entry, instType))
continue
for req in reqList:
reqName = False
reqCond = ""
for cond in ("==", ">="):
iP = cond.find(req)
if iP > 0:
reqName = req[:iP]
reqCond = req[iP:]
break
if not reqName:
reqName = req
if reqName not in reqDict:
reqDict[reqName] = (reqCond, entry)
else:
gLogger.notice("Skipping %s, it's already requested by %s" % (reqName, reqDict[reqName][1]))
if not reqDict:
gLogger.notice("No extra python module requested to be installed")
sys.exit(0)
for reqName in reqDict:
package = "%s%s" % (reqName, reqDict[reqName][0])
gLogger.notice("Requesting installation of %s" % package)
status, output = pipInstall(package)
if status != 0:
gLogger.error(output)
else:
gLogger.notice("Successfully installed %s" % package)
|
andresailer/DIRAC
|
Core/scripts/dirac-externals-requirements.py
|
Python
|
gpl-3.0
| 2,702
|
[
"DIRAC"
] |
41558a11a0ec96976978466378db907336dc33fe36a7180f7c4cc8fc9af10464
|
from test import test_support
import unittest
import codecs
import StringIO
class UTF16Test(unittest.TestCase):
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup("utf-16")
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assert_(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEquals(f.read(), u"spamspam")
class EscapeDecodeTest(unittest.TestCase):
def test_empty_escape_decode(self):
self.assertEquals(codecs.escape_decode(""), ("", 0))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write(u"a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEquals(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEquals(uni, puny.decode("punycode"))
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEquals(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
def test_main():
test_support.run_unittest(
UTF16Test,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
NameprepTest
)
if __name__ == "__main__":
test_main()
|
trivoldus28/pulsarch-verilog
|
tools/local/bas-release/bas,3.9/lib/python/lib/python2.3/test/test_codecs.py
|
Python
|
gpl-2.0
| 11,956
|
[
"FEFF"
] |
0142424d68f315bccfb3e8f53bc13b01010296cc50d35c02d90ec887f53bea65
|
from moose import Annotator
from PyQt4.QtGui import QColor
import numpy as np
import os
import config
import pickle
import random
import matplotlib
colormap_file = open(os.path.join(config.settings[config.KEY_COLORMAP_DIR], 'rainbow2.pkl'),'rb')
colorMap = pickle.load(colormap_file)
colormap_file.close()
ignoreColor= ["mistyrose","antiquewhite","aliceblue","azure","bisque","black","blanchedalmond","blue","cornsilk","darkolivegreen","darkslategray","dimgray","floralwhite","gainsboro","ghostwhite","honeydew","ivory","lavender","lavenderblush","lemonchiffon","lightcyan","lightgoldenrodyellow","lightgray","lightyellow","linen","mediumblue","mintcream","navy","oldlace","papayawhip","saddlebrown","seashell","snow","wheat","white","whitesmoke"]
matplotcolor = {}
for name,hexno in matplotlib.colors.cnames.iteritems():
matplotcolor[name]=hexno
def getRandColor():
k = random.choice(matplotcolor.keys())
if k in ignoreColor:
return getRandColor()
else:
print " l =",matplotcolor[k]
return QColor(matplotcolor[k])
def getRandColor1():
color = (np.random.randint(low=0, high=255, size=3)).tolist()
if not all((x <= 65 or x >= 105) for x in (color[0],color[1],color[2])):
return QColor(color[0],color[1],color[2])
else:
return getRandColor()
def getColor(iteminfo):
""" Getting a textcolor and background color for the given mooseObject \
If textcolor is empty replaced with green \
background color is empty replaced with blue
if textcolor and background is same as it happend in kkit files \
replacing textcolor with random color\
The colors are not valid there are siliently replaced with some values \
but while model building can raise an exception
"""
textcolor = Annotator(iteminfo).getField('textColor')
bgcolor = Annotator(iteminfo).getField('color')
if(textcolor == ''): textcolor = 'green'
if(bgcolor == ''): bgcolor = 'blue'
if(textcolor == bgcolor):textcolor = getRandColor()
textcolor = colorCheck(textcolor,"fc")
bgcolor = colorCheck(bgcolor,"bg")
return(textcolor,bgcolor)
def colorCheck(fc_bgcolor,fcbg):
""" textColor or background can be anything like string or tuple or list \
if string its taken as colorname further down in validColorcheck checked for valid color, \
but for tuple and list its taken as r,g,b value.
"""
if isinstance(fc_bgcolor,str):
if fc_bgcolor.startswith("#"):
fc_bgcolor = QColor(fc_bgcolor)
elif fc_bgcolor.isdigit():
""" color is int a map from int to r,g,b triplets from pickled color map file """
tc = int(fc_bgcolor)
tc = 2*tc
pickledColor = colorMap[tc]
fc_bgcolor = QColor(*pickledColor)
elif fc_bgcolor.isalpha() or fc_bgcolor.isalnum():
fc_bgcolor = validColorcheck(fc_bgcolor)
else:
fc_bgcolor = QColor(*eval(fc_bgcolor))
# fc_bgcolor = validColorcheck(fc_bgcolor)
return(fc_bgcolor)
def validColorcheck(color):
'''
Both in Qt4.7 and 4.8 if not a valid color it makes it as back but in 4.7 there will be a warning mssg which is taken here
checking if textcolor or backgroundcolor is valid color, if 'No' making white color as default
where I have not taken care for checking what will be backgroundcolor for textcolor or textcolor for backgroundcolor
'''
if QColor(color).isValid():
return (QColor(color))
else:
return(QColor("white"))
def moveMin(reference, collider, layoutPt,margin):
referenceRect = reference.sceneBoundingRect()
colliderRect = collider.sceneBoundingRect()
xDistance = referenceRect.x() + referenceRect.width() / 2.0 + colliderRect.width() / 2.0 + margin - colliderRect.x()
yDistance = 0.0
if colliderRect.y() < referenceRect.y():
yDistance = (referenceRect.y() - referenceRect.height() / 2.0 - colliderRect.height() / 2.0 - margin) - colliderRect.y()
else:
yDistance = referenceRect.y() + referenceRect.height() / 2.0 + colliderRect.height() / 2.0 + margin - colliderRect.y()
#if xDistance > yDistance:
collider.moveBy(xDistance, yDistance)
#else:
# collider.moveBy(xDistance, 0.0)
layoutPt.drawLine_arrow(itemignoreZooming=False)
def moveX(reference, collider, layoutPt, margin):
referenceRect = reference.sceneBoundingRect()
colliderRect = collider.sceneBoundingRect()
xc = abs(referenceRect.topRight().x()) - abs(colliderRect.topLeft().x())+margin
yc = 0.0
collider.moveBy(xc,yc)
layoutPt.drawLine_arrow(itemignoreZooming=False)
def handleCollisions(compartments, moveCallback, layoutPt,margin = 5.0):
if len(compartments) is 0 : return
compartments = sorted(compartments, key = lambda c: c.sceneBoundingRect().center().x())
reference = compartments.pop(0);
print reference.name
referenceRect = reference.sceneBoundingRect()
colliders = filter( lambda compartment : referenceRect.intersects(compartment.sceneBoundingRect())
, compartments
)
for collider in colliders:
moveCallback(reference, collider, layoutPt,margin)
return handleCollisions(compartments, moveCallback, layoutPt,margin)
|
dilawar/moose-full
|
moose-gui/plugins/kkitUtil.py
|
Python
|
gpl-2.0
| 5,378
|
[
"MOOSE"
] |
7e3ef2b858a8eaac0c81474ed95750036e5b25bbe8af8d560b91e9929dbeb0a7
|
#################################################
## PTDNN - Python Toolkit for Deep Neural Network
## Author: Yajie Miao
#################################################
import os
import sys
from utils.learn_rates import LearningRateExpDecay
class BnfExpConfig(object):
def __init__(self):
# working directory; by default, the pfiles should be here
self.wdir = "WORK/" # Note: we'll replace CWD with the current directory
# when we move this to the right place.
self.pretrain_data = self.wdir + 'train.pfile.gz' # pretraining data
self.pretrain_output = self.wdir + "rbm.ptr" # pretraining output
# finetuning data
self.finetune_train_data = self.wdir + 'train.pfile.gz' # finetune training data
self.finetune_valid_data = self.wdir + 'valid.pfile.gz' # finetune validation data
self.finetune_output = self.wdir + "final.nnet.raw" # finetune output
self.nnet_kaldi_fmt = self.wdir + "final.nnet"
# global config for nnet topo
self.n_ins=250 # size of input data
self.n_outs=N_OUTS # number of output targets.. we'll replace this with
# the correct number when we move this to the right place.
self.hidden_layers_sizes=[1024, 1024, 1024, 1024, 42, 1024] # hidden layer sizes
self.bnf_layer_index = 5 # the index of the Bottleneck layer
self.pretrain_layer_num = 4 # number of hidden layers to be pretrained
# global config for data
self.shuffle = True
self.chunk_size = '200m'
# pretraining batch size
self.pretrain_batch_size = 128 # batch-size in pretraining
# pretraining schedule
self.pretrain_gbrbm_lr = 0.005 # learning rate for Gaussian-Bernoulli RBM
self.pretrain_rbm_lr = 0.08 # learning rate for Bernoulli-Bernoulli RBM
self.initial_momentum = 0.5 # initial momentum
self.final_momentum = 0.9 # final momentum
self.initial_momentum_epoch = 5 # for how many epochs do we use initial_momentum
self.pretraining_epochs=10 # total epochs
# finetuning batch size
self.finetune_batch_size = 256 # batch-size for finetuning
# finetuning schedule
self.finetune_momentum = 0.5 # momentum for finetuning
self.lrate = LearningRateExpDecay(start_rate=0.08, # starting learning rate
scale_by = 0.5, # decaying factor in ramping
max_epochs = 1000, # 'dump' epoch limit, never can be reached
min_derror_ramp_start = 0.01, # min validation error difference to trigger ramping
min_derror_stop = 0.01, # min validation error difference to stop finetuning, after ramping
init_error = 100)
|
irrawaddy28/babel
|
s5c/conf/bnf/config_limited.py
|
Python
|
apache-2.0
| 3,287
|
[
"Gaussian"
] |
ed8ee2a66aaa89481b72ffa26161eb5b22af7a7e874345249803373cb3cea334
|
# FermiLib plugin to interface with Psi4
#
# Copyright (C) 2017 ProjectQ-Framework (www.projectq.ch)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This is a simple script for generating data."""
import os
from fermilib.utils import MolecularData
from fermilibpluginpsi4 import run_psi4
if __name__ == '__main__':
# Set chemical parameters.
element_names = ['H', 'H']
basis = 'sto-3g'
charge = 0
multiplicity = 1
# Single point at equilibrium for testing
spacings = [0.7414]
# Add points for a full dissociation curve from 0.1 to 3.0 angstroms
spacings += [0.1 * r for r in range(1, 31)]
# Set run options
run_scf = 1
run_mp2 = 1
run_cisd = 1
run_ccsd = 1
run_fci = 1
verbose = 1
tolerate_error = 1
# Run Diatomic Curve
for spacing in spacings:
description = "{}".format(spacing)
geometry = [[element_names[0], [0, 0, 0]],
[element_names[1], [0, 0, spacing]]]
molecule = MolecularData(geometry,
basis,
multiplicity,
charge,
description)
molecule = run_psi4(molecule,
run_scf=run_scf,
run_mp2=run_mp2,
run_cisd=run_cisd,
run_ccsd=run_ccsd,
run_fci=run_fci,
verbose=verbose,
tolerate_error=tolerate_error)
molecule.save()
# Run Li H single point
description = "1.45"
geometry = [['Li', [0, 0, 0]],
['H', [0, 0, 1.45]]]
molecule = MolecularData(geometry,
basis,
multiplicity,
charge,
description)
molecule = run_psi4(molecule,
run_scf=run_scf,
run_mp2=run_mp2,
run_cisd=run_cisd,
run_ccsd=run_ccsd,
run_fci=run_fci,
verbose=verbose,
tolerate_error=tolerate_error)
molecule.save()
|
ProjectQ-Framework/FermiLib-Plugin-Psi4
|
examples/generate_diatomic.py
|
Python
|
lgpl-3.0
| 2,889
|
[
"Psi4"
] |
f2f95bb08f4f9ee2a5bde4d111ac8652a1b42d456ad41dc55aa2765cee8208f2
|
#!/usr/bin/env python3
import sys
from gaussian import GaussianCom, GaussianLog
import math
import sys
CUTOFF = 2.5
try:
atom_number = int(sys.argv[1])
distances_filename = sys.argv[2]
gaussian_file = GaussianCom(sys.argv[3])
except:
print("Usage: create_distances_file <atom_number> <distances_files> <gaussian_example>")
sys.exit()
center_atom = gaussian_file.atoms_list[atom_number-1]
distances_file_lines = []
atoms_considered = []
atoms_list = gaussian_file.atoms_list
for no, atom in enumerate(atoms_list):
#distances
if atom.distance(center_atom) < CUTOFF and atom is not center_atom:
line = "{0} {1}\n".format(atom_number, no+1)
distances_file_lines.append(line)
atoms_considered.append(atom)
#angles
for no, atom in enumerate(atoms_considered):
for other_atom in atoms_considered[no+1:]:
line = "{0} {1} {2}\n".format(atoms_list.index(atom)+1, atom_number, atoms_list.index(other_atom)+1)
distances_file_lines.append(line)
with open(distances_filename, 'w') as distances_file:
for line in distances_file_lines:
distances_file.write(line)
|
eduardoftoliveira/qt_scripts
|
scripts/create_distances_file.py
|
Python
|
gpl-3.0
| 1,141
|
[
"Gaussian"
] |
3f526c1043aa18286771b3b48ab83d7fcd0a80d873c492ca24a561f80d2b6981
|
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.contrib.staging import StagingArea
from baselines import logger
from baselines.her.util import (
import_function, store_args, flatten_grads, transitions_in_episode_batch, convert_episode_to_batch_major)
from baselines.her.normalizer import Normalizer
from baselines.her.replay_buffer import ReplayBuffer
from baselines.common.mpi_adam import MpiAdam
def dims_to_shapes(input_dims):
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
global demoBuffer #buffer for demonstrations
class DDPG(object):
@store_args
def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,
Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,
rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,
bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,
sample_transitions, gamma, reuse=False, **kwargs):
"""Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).
Added functionality to use demonstrations for training to Overcome exploration problem.
Args:
input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the
actions (u)
buffer_size (int): number of transitions that are stored in the replay buffer
hidden (int): number of units in the hidden layers
layers (int): number of hidden layers
network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')
polyak (float): coefficient for Polyak-averaging of the target network
batch_size (int): batch size for training
Q_lr (float): learning rate for the Q (critic) network
pi_lr (float): learning rate for the pi (actor) network
norm_eps (float): a small value used in the normalizer to avoid numerical instabilities
norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]
max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]
action_l2 (float): coefficient for L2 penalty on the actions
clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]
scope (str): the scope used for the TensorFlow graph
T (int): the time horizon for rollouts
rollout_batch_size (int): number of parallel rollouts per DDPG agent
subtract_goals (function): function that subtracts goals from each other
relative_goals (boolean): whether or not relative goals should be fed into the network
clip_pos_returns (boolean): whether or not positive returns should be clipped
clip_return (float): clip returns to be in [-clip_return, clip_return]
sample_transitions (function) function that samples from the replay buffer
gamma (float): gamma used for Q learning updates
reuse (boolean): whether or not the networks should be reused
bc_loss: whether or not the behavior cloning loss should be used as an auxilliary loss
q_filter: whether or not a filter on the q value update should be used when training with demonstartions
num_demo: Number of episodes in to be used in the demonstration buffer
demo_batch_size: number of samples to be used from the demonstrations buffer, per mpi thread
prm_loss_weight: Weight corresponding to the primary loss
aux_loss_weight: Weight corresponding to the auxilliary loss also called the cloning loss
"""
if self.clip_return is None:
self.clip_return = np.inf
self.create_actor_critic = import_function(self.network_class)
input_shapes = dims_to_shapes(self.input_dims)
self.dimo = self.input_dims['o']
self.dimg = self.input_dims['g']
self.dimu = self.input_dims['u']
# Prepare staging area for feeding data to the model.
stage_shapes = OrderedDict()
for key in sorted(self.input_dims.keys()):
if key.startswith('info_'):
continue
stage_shapes[key] = (None, *input_shapes[key])
for key in ['o', 'g']:
stage_shapes[key + '_2'] = stage_shapes[key]
stage_shapes['r'] = (None,)
self.stage_shapes = stage_shapes
# Create network.
with tf.variable_scope(self.scope):
self.staging_tf = StagingArea(
dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
shapes=list(self.stage_shapes.values()))
self.buffer_ph_tf = [
tf.placeholder(tf.float32, shape=shape) for shape in self.stage_shapes.values()]
self.stage_op = self.staging_tf.put(self.buffer_ph_tf)
self._create_network(reuse=reuse)
# Configure the replay buffer.
buffer_shapes = {key: (self.T if key != 'o' else self.T+1, *input_shapes[key])
for key, val in input_shapes.items()}
buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
buffer_shapes['ag'] = (self.T+1, self.dimg)
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
global demoBuffer
demoBuffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions) #initialize the demo buffer; in the same way as the primary data buffer
def _random_action(self, n):
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
def _preprocess_og(self, o, ag, g):
if self.relative_goals:
g_shape = g.shape
g = g.reshape(-1, self.dimg)
ag = ag.reshape(-1, self.dimg)
g = self.subtract_goals(g, ag)
g = g.reshape(*g_shape)
o = np.clip(o, -self.clip_obs, self.clip_obs)
g = np.clip(g, -self.clip_obs, self.clip_obs)
return o, g
def get_actions(self, o, ag, g, noise_eps=0., random_eps=0., use_target_net=False,
compute_Q=False):
o, g = self._preprocess_og(o, ag, g)
policy = self.target if use_target_net else self.main
# values to compute
vals = [policy.pi_tf]
if compute_Q:
vals += [policy.Q_pi_tf]
# feed
feed = {
policy.o_tf: o.reshape(-1, self.dimo),
policy.g_tf: g.reshape(-1, self.dimg),
policy.u_tf: np.zeros((o.size // self.dimo, self.dimu), dtype=np.float32)
}
ret = self.sess.run(vals, feed_dict=feed)
# action postprocessing
u = ret[0]
noise = noise_eps * self.max_u * np.random.randn(*u.shape) # gaussian noise
u += noise
u = np.clip(u, -self.max_u, self.max_u)
u += np.random.binomial(1, random_eps, u.shape[0]).reshape(-1, 1) * (self._random_action(u.shape[0]) - u) # eps-greedy
if u.shape[0] == 1:
u = u[0]
u = u.copy()
ret[0] = u
if len(ret) == 1:
return ret[0]
else:
return ret
def initDemoBuffer(self, demoDataFile, update_stats=True): #function that initializes the demo buffer
demoData = np.load(demoDataFile) #load the demonstration data from data file
info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')]
info_values = [np.empty((self.T, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys]
for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training
obs, acts, goals, achieved_goals = [], [] ,[] ,[]
i = 0
for transition in range(self.T):
obs.append([demoData['obs'][epsd ][transition].get('observation')])
acts.append([demoData['acs'][epsd][transition]])
goals.append([demoData['obs'][epsd][transition].get('desired_goal')])
achieved_goals.append([demoData['obs'][epsd][transition].get('achieved_goal')])
for idx, key in enumerate(info_keys):
info_values[idx][transition, i] = demoData['info'][epsd][transition][key]
obs.append([demoData['obs'][epsd][self.T].get('observation')])
achieved_goals.append([demoData['obs'][epsd][self.T].get('achieved_goal')])
episode = dict(o=obs,
u=acts,
g=goals,
ag=achieved_goals)
for key, value in zip(info_keys, info_values):
episode['info_{}'.format(key)] = value
episode = convert_episode_to_batch_major(episode)
global demoBuffer
demoBuffer.store_episode(episode) # create the observation dict and append them into the demonstration buffer
print("Demo buffer size currently ", demoBuffer.get_current_size()) #print out the demonstration buffer size
if update_stats:
# add transitions to normalizer to normalize the demo data as well
episode['o_2'] = episode['o'][:, 1:, :]
episode['ag_2'] = episode['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode)
transitions = self.sample_transitions(episode, num_normalizing_transitions)
o, o_2, g, ag = transitions['o'], transitions['o_2'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
episode.clear()
def store_episode(self, episode_batch, update_stats=True):
"""
episode_batch: array of batch_size x (T or T+1) x dim_key
'o' is of size T+1, others are of size T
"""
self.buffer.store_episode(episode_batch)
if update_stats:
# add transitions to normalizer
episode_batch['o_2'] = episode_batch['o'][:, 1:, :]
episode_batch['ag_2'] = episode_batch['ag'][:, 1:, :]
num_normalizing_transitions = transitions_in_episode_batch(episode_batch)
transitions = self.sample_transitions(episode_batch, num_normalizing_transitions)
o, o_2, g, ag = transitions['o'], transitions['o_2'], transitions['g'], transitions['ag']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
# No need to preprocess the o_2 and g_2 since this is only used for stats
self.o_stats.update(transitions['o'])
self.g_stats.update(transitions['g'])
self.o_stats.recompute_stats()
self.g_stats.recompute_stats()
def get_current_buffer_size(self):
return self.buffer.get_current_size()
def _sync_optimizers(self):
self.Q_adam.sync()
self.pi_adam.sync()
def _grads(self):
# Avoid feed_dict here for performance!
critic_loss, actor_loss, Q_grad, pi_grad = self.sess.run([
self.Q_loss_tf,
self.main.Q_pi_tf,
self.Q_grad_tf,
self.pi_grad_tf
])
return critic_loss, actor_loss, Q_grad, pi_grad
def _update(self, Q_grad, pi_grad):
self.Q_adam.update(Q_grad, self.Q_lr)
self.pi_adam.update(pi_grad, self.pi_lr)
def sample_batch(self):
if self.bc_loss: #use demonstration buffer to sample as well if bc_loss flag is set TRUE
transitions = self.buffer.sample(self.batch_size - self.demo_batch_size)
global demoBuffer
transitionsDemo = demoBuffer.sample(self.demo_batch_size) #sample from the demo buffer
for k, values in transitionsDemo.items():
rolloutV = transitions[k].tolist()
for v in values:
rolloutV.append(v.tolist())
transitions[k] = np.array(rolloutV)
else:
transitions = self.buffer.sample(self.batch_size) #otherwise only sample from primary buffer
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
ag, ag_2 = transitions['ag'], transitions['ag_2']
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
transitions['o_2'], transitions['g_2'] = self._preprocess_og(o_2, ag_2, g)
transitions_batch = [transitions[key] for key in self.stage_shapes.keys()]
return transitions_batch
def stage_batch(self, batch=None):
if batch is None:
batch = self.sample_batch()
assert len(self.buffer_ph_tf) == len(batch)
self.sess.run(self.stage_op, feed_dict=dict(zip(self.buffer_ph_tf, batch)))
def train(self, stage=True):
if stage:
self.stage_batch()
critic_loss, actor_loss, Q_grad, pi_grad = self._grads()
self._update(Q_grad, pi_grad)
return critic_loss, actor_loss
def _init_target_net(self):
self.sess.run(self.init_target_net_op)
def update_target_net(self):
self.sess.run(self.update_target_net_op)
def clear_buffer(self):
self.buffer.clear_buffer()
def _vars(self, scope):
res = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope + '/' + scope)
assert len(res) > 0
return res
def _global_vars(self, scope):
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope + '/' + scope)
return res
def _create_network(self, reuse=False):
logger.info("Creating a DDPG agent with action space %d x %s..." % (self.dimu, self.max_u))
self.sess = tf.get_default_session()
if self.sess is None:
self.sess = tf.InteractiveSession()
# running averages
with tf.variable_scope('o_stats') as vs:
if reuse:
vs.reuse_variables()
self.o_stats = Normalizer(self.dimo, self.norm_eps, self.norm_clip, sess=self.sess)
with tf.variable_scope('g_stats') as vs:
if reuse:
vs.reuse_variables()
self.g_stats = Normalizer(self.dimg, self.norm_eps, self.norm_clip, sess=self.sess)
# mini-batch sampling.
batch = self.staging_tf.get()
batch_tf = OrderedDict([(key, batch[i])
for i, key in enumerate(self.stage_shapes.keys())])
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
#choose only the demo buffer samples
mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)
# networks
with tf.variable_scope('main') as vs:
if reuse:
vs.reuse_variables()
self.main = self.create_actor_critic(batch_tf, net_type='main', **self.__dict__)
vs.reuse_variables()
with tf.variable_scope('target') as vs:
if reuse:
vs.reuse_variables()
target_batch_tf = batch_tf.copy()
target_batch_tf['o'] = batch_tf['o_2']
target_batch_tf['g'] = batch_tf['g_2']
self.target = self.create_actor_critic(
target_batch_tf, net_type='target', **self.__dict__)
vs.reuse_variables()
assert len(self._vars("main")) == len(self._vars("target"))
# loss functions
target_Q_pi_tf = self.target.Q_pi_tf
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
if self.bc_loss ==1 and self.q_filter == 1 : # train with demonstrations and use bc_loss and q_filter both
maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1]) #where is the demonstrator action better than actor action according to the critic? choose those samples only
#define the cloning loss on the actor's actions only on the samples which adhere to the above masks
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u)) #L2 loss on action values scaled by the same weight prm_loss_weight
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf #adding the cloning loss to the actor loss as an auxilliary loss scaled by its weight aux_loss_weight
elif self.bc_loss == 1 and self.q_filter == 0: # train with demonstrations without q_filter
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
else: #If not training with demonstrations
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
pi_grads_tf = tf.gradients(self.pi_loss_tf, self._vars('main/pi'))
assert len(self._vars('main/Q')) == len(Q_grads_tf)
assert len(self._vars('main/pi')) == len(pi_grads_tf)
self.Q_grads_vars_tf = zip(Q_grads_tf, self._vars('main/Q'))
self.pi_grads_vars_tf = zip(pi_grads_tf, self._vars('main/pi'))
self.Q_grad_tf = flatten_grads(grads=Q_grads_tf, var_list=self._vars('main/Q'))
self.pi_grad_tf = flatten_grads(grads=pi_grads_tf, var_list=self._vars('main/pi'))
# optimizers
self.Q_adam = MpiAdam(self._vars('main/Q'), scale_grad_by_procs=False)
self.pi_adam = MpiAdam(self._vars('main/pi'), scale_grad_by_procs=False)
# polyak averaging
self.main_vars = self._vars('main/Q') + self._vars('main/pi')
self.target_vars = self._vars('target/Q') + self._vars('target/pi')
self.stats_vars = self._global_vars('o_stats') + self._global_vars('g_stats')
self.init_target_net_op = list(
map(lambda v: v[0].assign(v[1]), zip(self.target_vars, self.main_vars)))
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
# initialize all variables
tf.variables_initializer(self._global_vars('')).run()
self._sync_optimizers()
self._init_target_net()
def logs(self, prefix=''):
logs = []
logs += [('stats_o/mean', np.mean(self.sess.run([self.o_stats.mean])))]
logs += [('stats_o/std', np.mean(self.sess.run([self.o_stats.std])))]
logs += [('stats_g/mean', np.mean(self.sess.run([self.g_stats.mean])))]
logs += [('stats_g/std', np.mean(self.sess.run([self.g_stats.std])))]
if prefix is not '' and not prefix.endswith('/'):
return [(prefix + '/' + key, val) for key, val in logs]
else:
return logs
def __getstate__(self):
"""Our policies can be loaded from pkl, but after unpickling you cannot continue training.
"""
excluded_subnames = ['_tf', '_op', '_vars', '_adam', 'buffer', 'sess', '_stats',
'main', 'target', 'lock', 'env', 'sample_transitions',
'stage_shapes', 'create_actor_critic']
state = {k: v for k, v in self.__dict__.items() if all([not subname in k for subname in excluded_subnames])}
state['buffer_size'] = self.buffer_size
state['tf'] = self.sess.run([x for x in self._global_vars('') if 'buffer' not in x.name])
return state
def __setstate__(self, state):
if 'sample_transitions' not in state:
# We don't need this for playing the policy.
state['sample_transitions'] = None
self.__init__(**state)
# set up stats (they are overwritten in __init__)
for k, v in state.items():
if k[-6:] == '_stats':
self.__dict__[k] = v
# load TF variables
vars = [x for x in self._global_vars('') if 'buffer' not in x.name]
assert(len(vars) == len(state["tf"]))
node = [tf.assign(var, val) for var, val in zip(vars, state["tf"])]
self.sess.run(node)
|
dsbrown1331/CoRL2019-DREX
|
drex-mujoco/learner/baselines/baselines/her/ddpg.py
|
Python
|
mit
| 21,742
|
[
"Gaussian"
] |
7a80586d94d76a846e3fae382bfc499b87f609a5db58dc5532728f3b0d785bf2
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live value resolution.
Live values are extracted from the known execution context.
Requires activity analysis annotations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
class LiveValueResolver(transformer.Base):
"""Annotates nodes with live values."""
def __init__(self, context, literals):
super(LiveValueResolver, self).__init__(context)
self.literals = literals
def visit_ClassDef(self, node):
self.generic_visit(node)
anno.setanno(node, 'live_val', self.context.namespace[node.name])
return node
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
assert anno.hasanno(node, NodeAnno.IS_LOCAL), node
symbol_is_local = anno.getanno(node, NodeAnno.IS_LOCAL)
assert anno.hasanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY), node
symbol_is_modified = anno.getanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY)
assert anno.hasanno(node, NodeAnno.IS_PARAM), node
symbol_is_param = anno.getanno(node, NodeAnno.IS_PARAM)
if not symbol_is_local and not symbol_is_param:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
elif node.id in self.context.namespace:
obj = self.context.namespace[node.id]
anno.setanno(node, 'live_val', obj)
if hasattr(obj, '__name__'):
anno.setanno(node, 'fqn', (obj.__name__,))
elif hasattr(obj, '__class__'):
obj_class = obj.__class__
anno.setanno(node, 'fqn',
(obj_class.__module__, obj_class.__name__))
else:
# If the symbol value is for example a primitive, then it will not
# have a name.
pass
else:
pass
# TODO(mdan): Should we raise an error here?
# Can encounter this when:
# * a symbol truly lacks reference
# * a symbol is new, like the new name of a function we just renamed.
else:
pass
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
if not symbol_is_modified:
if node.id in self.context.arg_values:
obj = self.context.arg_values[node.id]
anno.setanno(node, 'live_val', obj)
anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
self.generic_visit(node)
if anno.hasanno(node.value, 'live_val'):
assert anno.hasanno(node.value, 'fqn')
parent_object = anno.getanno(node.value, 'live_val')
if not hasattr(parent_object, node.attr):
raise AttributeError('%s has no attribute %s' % (parent_object,
node.attr))
anno.setanno(node, 'parent_type', type(parent_object))
anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))
# TODO(mdan): Investigate the role built-in annotations can play here.
elif anno.hasanno(node.value, 'type'):
parent_type = anno.getanno(node.value, 'type')
if hasattr(parent_type, node.attr):
# This should hold for static members like methods.
# This would not hold for dynamic members like function attributes.
# For the dynamic case, we simply leave the node without an annotation,
# and let downstream consumers figure out what to do.
anno.setanno(node, 'parent_type', parent_type)
anno.setanno(node, 'live_val', getattr(parent_type, node.attr))
anno.setanno(node, 'fqn',
anno.getanno(node.value, 'type_fqn') + (node.attr,))
elif isinstance(node.value, gast.Name):
stem_name = node.value
# All nonlocal symbols should be fully resolved.
assert anno.hasanno(stem_name, NodeAnno.IS_LOCAL), stem_name
# TODO(mdan): Figure out what to do when calling attribute on local object
# Maybe just leave as-is?
return node
def resolve(node, context, literals):
return LiveValueResolver(context, literals).visit(node)
|
nburn42/tensorflow
|
tensorflow/contrib/autograph/pyct/static_analysis/live_values.py
|
Python
|
apache-2.0
| 5,123
|
[
"VisIt"
] |
8e89896cbe987995c2868f087b24b21db8875c7258ac7d14faa1c0334aa6c66b
|
# -*- coding: utf-8 -*-
import os
import sys
def check_cclib(cclib):
"""Make sure we are importing code from a subdirectory, which should exist
and should have been updated just before running this script. Note that
this script does not assume any version in the module and just takes
what it finds... so an appropriate checkout should be done first."""
if cclib.__file__[:len(os.getcwd())] != os.getcwd():
print("Do not seem to be importing from current directory")
sys.exit(1)
if __name__ == "__main__":
import cclib
check_cclib(cclib)
# Need to parse the ccData docstring, since only that currently
# contains all the information needed for this table.
data_doc = cclib.parser.data.ccData.__doc__
attributes = [line for line in data_doc.split('\n') if line[:8].strip() == '']
attributes = [line for line in attributes if "--" in line]
# These are the widths of the columns in the table
wattr = 20
wdesc = 65
wunit = 28
wtype = 32
dashes = " "
for w in [wattr, wdesc, wunit, wtype]:
dashes += "="*(w-1) + " "
header = " "
header += "Name".ljust(wattr)
header += "Description".ljust(wdesc)
header += "Units".ljust(wunit)
header += "Data type".ljust(wtype)
print(dashes)
print(header)
print(dashes)
names = []
for line in attributes:
# There is always a double dash after the name.
attr, desc = line.strip().split(' -- ')
names.append(attr)
# The type and unit are in parentheses, but these
# are not always the only parentheses on the line.
other = desc.split('(')[-1]
desc = desc[:-len(other)-1].strip()
other = other.split(')')[0]
# Furthermore, the unit is not always there.
if "," in other:
atype, aunit = other.split(", ")
else:
atype = other
aunit = ''
# Print the line with columns align to the table. Note that
# the description sometimes contain Unicode characters, so
# decode-encode when justifying to get the correct length.
attr = ("`%s`_" % attr).ljust(wattr)
desc = desc.decode('utf-8').ljust(wdesc).encode('utf-8')
aunit = aunit.ljust(wunit)
for i in range(1,4):
atype = atype.replace('[%i]' % i, ' of rank %i' % i)
print(" " + attr + desc + aunit + atype)
print(dashes)
print("")
for n in names:
print(".. _`%s`: data_notes.html#%s" % (n, n))
|
hainm/cclib.github.io
|
sphinx/attributes.py
|
Python
|
lgpl-2.1
| 2,546
|
[
"cclib"
] |
d29e9adc5435ebe9a76ca256a1d2e58896c36cd31929c230d16e150e07e9a547
|
"""Support for the Amazon Polly text to speech service."""
import logging
import voluptuous as vol
from homeassistant.components.tts import PLATFORM_SCHEMA, Provider
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_REGION = 'region_name'
CONF_ACCESS_KEY_ID = 'aws_access_key_id'
CONF_SECRET_ACCESS_KEY = 'aws_secret_access_key'
CONF_PROFILE_NAME = 'profile_name'
ATTR_CREDENTIALS = 'credentials'
DEFAULT_REGION = 'us-east-1'
SUPPORTED_REGIONS = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2',
'ca-central-1', 'eu-west-1', 'eu-central-1', 'eu-west-2',
'eu-west-3', 'ap-southeast-1', 'ap-southeast-2',
'ap-northeast-2', 'ap-northeast-1', 'ap-south-1',
'sa-east-1']
CONF_VOICE = 'voice'
CONF_OUTPUT_FORMAT = 'output_format'
CONF_SAMPLE_RATE = 'sample_rate'
CONF_TEXT_TYPE = 'text_type'
SUPPORTED_VOICES = [
'Zhiyu', # Chinese
'Mads', 'Naja', # Danish
'Ruben', 'Lotte', # Dutch
'Russell', 'Nicole', # English Austrailian
'Brian', 'Amy', 'Emma', # English
'Aditi', 'Raveena', # English, Indian
'Joey', 'Justin', 'Matthew', 'Ivy', 'Joanna', 'Kendra', 'Kimberly',
'Salli', # English
'Geraint', # English Welsh
'Mathieu', 'Celine', 'Lea', # French
'Chantal', # French Canadian
'Hans', 'Marlene', 'Vicki', # German
'Aditi', # Hindi
'Karl', 'Dora', # Icelandic
'Giorgio', 'Carla', 'Bianca', # Italian
'Takumi', 'Mizuki', # Japanese
'Seoyeon', # Korean
'Liv', # Norwegian
'Jacek', 'Jan', 'Ewa', 'Maja', # Polish
'Ricardo', 'Vitoria', # Portuguese, Brazilian
'Cristiano', 'Ines', # Portuguese, European
'Carmen', # Romanian
'Maxim', 'Tatyana', # Russian
'Enrique', 'Conchita', 'Lucia', # Spanish European
'Mia', # Spanish Mexican
'Miguel', 'Penelope', # Spanish US
'Astrid', # Swedish
'Filiz', # Turkish
'Gwyneth', # Welsh
]
SUPPORTED_OUTPUT_FORMATS = ['mp3', 'ogg_vorbis', 'pcm']
SUPPORTED_SAMPLE_RATES = ['8000', '16000', '22050']
SUPPORTED_SAMPLE_RATES_MAP = {
'mp3': ['8000', '16000', '22050'],
'ogg_vorbis': ['8000', '16000', '22050'],
'pcm': ['8000', '16000'],
}
SUPPORTED_TEXT_TYPES = ['text', 'ssml']
CONTENT_TYPE_EXTENSIONS = {
'audio/mpeg': 'mp3',
'audio/ogg': 'ogg',
'audio/pcm': 'pcm',
}
DEFAULT_VOICE = 'Joanna'
DEFAULT_OUTPUT_FORMAT = 'mp3'
DEFAULT_TEXT_TYPE = 'text'
DEFAULT_SAMPLE_RATES = {
'mp3': '22050',
'ogg_vorbis': '22050',
'pcm': '16000',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_REGION, default=DEFAULT_REGION):
vol.In(SUPPORTED_REGIONS),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORTED_VOICES),
vol.Optional(CONF_OUTPUT_FORMAT, default=DEFAULT_OUTPUT_FORMAT):
vol.In(SUPPORTED_OUTPUT_FORMATS),
vol.Optional(CONF_SAMPLE_RATE):
vol.All(cv.string, vol.In(SUPPORTED_SAMPLE_RATES)),
vol.Optional(CONF_TEXT_TYPE, default=DEFAULT_TEXT_TYPE):
vol.In(SUPPORTED_TEXT_TYPES),
})
def get_engine(hass, config):
"""Set up Amazon Polly speech component."""
output_format = config.get(CONF_OUTPUT_FORMAT)
sample_rate = config.get(
CONF_SAMPLE_RATE, DEFAULT_SAMPLE_RATES[output_format])
if sample_rate not in SUPPORTED_SAMPLE_RATES_MAP.get(output_format):
_LOGGER.error("%s is not a valid sample rate for %s",
sample_rate, output_format)
return None
config[CONF_SAMPLE_RATE] = sample_rate
import boto3
profile = config.get(CONF_PROFILE_NAME)
if profile is not None:
boto3.setup_default_session(profile_name=profile)
aws_config = {
CONF_REGION: config.get(CONF_REGION),
CONF_ACCESS_KEY_ID: config.get(CONF_ACCESS_KEY_ID),
CONF_SECRET_ACCESS_KEY: config.get(CONF_SECRET_ACCESS_KEY),
}
del config[CONF_REGION]
del config[CONF_ACCESS_KEY_ID]
del config[CONF_SECRET_ACCESS_KEY]
polly_client = boto3.client('polly', **aws_config)
supported_languages = []
all_voices = {}
all_voices_req = polly_client.describe_voices()
for voice in all_voices_req.get('Voices'):
all_voices[voice.get('Id')] = voice
if voice.get('LanguageCode') not in supported_languages:
supported_languages.append(voice.get('LanguageCode'))
return AmazonPollyProvider(
polly_client, config, supported_languages, all_voices)
class AmazonPollyProvider(Provider):
"""Amazon Polly speech api provider."""
def __init__(self, polly_client, config, supported_languages,
all_voices):
"""Initialize Amazon Polly provider for TTS."""
self.client = polly_client
self.config = config
self.supported_langs = supported_languages
self.all_voices = all_voices
self.default_voice = self.config.get(CONF_VOICE)
self.name = 'Amazon Polly'
@property
def supported_languages(self):
"""Return a list of supported languages."""
return self.supported_langs
@property
def default_language(self):
"""Return the default language."""
return self.all_voices.get(self.default_voice).get('LanguageCode')
@property
def default_options(self):
"""Return dict include default options."""
return {CONF_VOICE: self.default_voice}
@property
def supported_options(self):
"""Return a list of supported options."""
return [CONF_VOICE]
def get_tts_audio(self, message, language=None, options=None):
"""Request TTS file from Polly."""
voice_id = options.get(CONF_VOICE, self.default_voice)
voice_in_dict = self.all_voices.get(voice_id)
if language != voice_in_dict.get('LanguageCode'):
_LOGGER.error("%s does not support the %s language",
voice_id, language)
return None, None
resp = self.client.synthesize_speech(
OutputFormat=self.config[CONF_OUTPUT_FORMAT],
SampleRate=self.config[CONF_SAMPLE_RATE],
Text=message,
TextType=self.config[CONF_TEXT_TYPE],
VoiceId=voice_id
)
return (CONTENT_TYPE_EXTENSIONS[resp.get('ContentType')],
resp.get('AudioStream').read())
|
MartinHjelmare/home-assistant
|
homeassistant/components/amazon_polly/tts.py
|
Python
|
apache-2.0
| 6,590
|
[
"Brian"
] |
bc39c0243108764e5840712b33e47d3100bbb26534f6d3c8f92a6a1d8ef9caf0
|
# Copyright (C) 2012 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""This module contains functions to generate gaussian noise colored with a
noise spectrum.
"""
from pycbc.types import TimeSeries, zeros
from pycbc.types import complex_same_precision_as, FrequencySeries
from lalsimulation import SimNoise
import lal
import numpy.random
def frequency_noise_from_psd(psd, seed=None):
""" Create noise with a given psd.
Return noise coloured with the given psd. The returned noise
FrequencySeries has the same length and frequency step as the given psd.
Note that if unique noise is desired a unique seed should be provided.
Parameters
----------
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int} or None
The seed to generate the noise. If None specified,
the seed will not be reset.
Returns
--------
noise : FrequencySeriesSeries
A FrequencySeries containing gaussian noise colored by the given psd.
"""
sigma = 0.5 * (psd / psd.delta_f) ** (0.5)
if seed is not None:
numpy.random.seed(seed)
sigma = sigma.numpy()
dtype = complex_same_precision_as(psd)
not_zero = (sigma != 0)
sigma_red = sigma[not_zero]
noise_re = numpy.random.normal(0, sigma_red)
noise_co = numpy.random.normal(0, sigma_red)
noise_red = noise_re + 1j * noise_co
noise = numpy.zeros(len(sigma), dtype=dtype)
noise[not_zero] = noise_red
return FrequencySeries(noise,
delta_f=psd.delta_f,
dtype=dtype)
def noise_from_psd(length, delta_t, psd, seed=None):
""" Create noise with a given psd.
Return noise with a given psd. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
psd : FrequencySeries
The noise weighting to color the noise.
seed : {0, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
noise_ts = TimeSeries(zeros(length), delta_t=delta_t)
if seed is None:
seed = numpy.random.randint(2**32)
randomness = lal.gsl_rng("ranlux", seed)
N = int (1.0 / delta_t / psd.delta_f)
n = N//2+1
stride = N//2
if n > len(psd):
raise ValueError("PSD not compatible with requested delta_t")
psd = (psd[0:n]).lal()
psd.data.data[n-1] = 0
segment = TimeSeries(zeros(N), delta_t=delta_t).lal()
length_generated = 0
SimNoise(segment, 0, psd, randomness)
while (length_generated < length):
if (length_generated + stride) < length:
noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride]
else:
noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated]
length_generated += stride
SimNoise(segment, stride, psd, randomness)
return noise_ts
def noise_from_string(psd_name, length, delta_t, seed=None, low_frequency_cutoff=10.0):
""" Create noise from an analytic PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd_name : str
Name of the analytic PSD to use.
low_fr
length : int
The length of noise to generate in samples.
delta_t : float
The time step of the noise.
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {10.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
import pycbc.psd
# We just need enough resolution to resolve lines
delta_f = 1.0 / 8
flen = int(.5 / delta_t / delta_f) + 1
psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff)
return noise_from_psd(int(length), delta_t, psd, seed=seed)
|
cmbiwer/pycbc
|
pycbc/noise/gaussian.py
|
Python
|
gpl-3.0
| 5,087
|
[
"Gaussian"
] |
71e0ca8449a9ee277bf4f00d3366f85c492e9d126eb1b89ee489b121659e54fb
|
# Copyright (C) 2014 Sereina Riniker
#
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Torsion Fingerprints (Deviation) (TFD)
According to a paper from Schulz-Gasch et al., JCIM, 52, 1499-1512 (2012).
"""
from rdkit import rdBase
from rdkit import RDConfig
from rdkit import Geometry
from rdkit import Chem
from rdkit.Chem import rdchem
from rdkit.Chem import rdMolDescriptors
import math, os
def _doMatch(inv, atoms):
""" Helper function to check if all atoms in the list are the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] != inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doNotMatch(inv, atoms):
""" Helper function to check if all atoms in the list are NOT the same
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: boolean
"""
match = True
for i in range(len(atoms)-1):
for j in range(i+1, len(atoms)):
if (inv[atoms[i].GetIdx()] == inv[atoms[j].GetIdx()]):
match = False
return match
return match
def _doMatchExcept1(inv, atoms):
""" Helper function to check if two atoms in the list are the same,
and one not
Note: Works only for three atoms
Arguments:
- inv: atom invariants (used to define equivalence of atoms)
- atoms: list of atoms to check
Return: atom that is different
"""
if len(atoms) != 3:
raise ValueError("Number of atoms must be three")
a1 = atoms[0].GetIdx()
a2 = atoms[1].GetIdx()
a3 = atoms[2].GetIdx()
if (inv[a1] == inv[a2] and inv[a1] != inv[a3] and inv[a2] != inv[a3]):
return atoms[2]
elif (inv[a1] != inv[a2] and inv[a1] == inv[a3] and inv[a2] != inv[a3]):
return atoms[1]
elif (inv[a1] != inv[a2] and inv[a1] != inv[a3] and inv[a2] == inv[a3]):
return atoms[0]
return None
def _getAtomInvariantsWithRadius(mol, radius):
""" Helper function to calculate the atom invariants for each atom
with a given radius
Arguments:
- mol: the molecule of interest
- radius: the radius for the Morgan fingerprint
Return: list of atom invariants
"""
inv = []
for i in range(mol.GetNumAtoms()):
info = {}
fp = rdMolDescriptors.GetMorganFingerprint(mol, radius, fromAtoms=[i], bitInfo=info)
for k in info.keys():
if info[k][0][1] == radius:
inv.append(k)
return inv
def _getHeavyAtomNeighbors(atom1, aid2=-1):
""" Helper function to calculate the number of heavy atom neighbors.
Arguments:
- atom1: the atom of interest
- aid2: atom index that should be excluded from neighbors (default: none)
Return: a list of heavy atom neighbors of the given atom
"""
if aid2 < 0:
return [n for n in atom1.GetNeighbors() if n.GetSymbol()!='H']
else:
return [n for n in atom1.GetNeighbors() if (n.GetSymbol()!='H' and n.GetIdx()!=aid2)]
def _getIndexforTorsion(neighbors, inv):
""" Helper function to calculate the index of the reference atom for
a given atom
Arguments:
- neighbors: list of the neighbors of the atom
- inv: atom invariants
Return: list of atom indices as reference for torsion
"""
if len(neighbors) == 1: # atom has only one neighbor
return [neighbors[0]]
elif _doMatch(inv, neighbors): # atom has all symmetric neighbors
return neighbors
elif _doNotMatch(inv, neighbors): # atom has all different neighbors
# sort by atom inv and simply use the first neighbor
neighbors = sorted(neighbors, key = lambda x: inv[x.GetIdx()])
return [neighbors[0]]
at = _doMatchExcept1(inv, neighbors) # two neighbors the same, one different
if at is None:
raise ValueError("Atom neighbors are either all the same or all different")
return [at]
def _getBondsForTorsions(mol, ignoreColinearBonds):
""" Determine the bonds (or pair of atoms treated like a bond) for which
torsions should be calculated.
Arguments:
- refmol: the molecule of interest
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
"""
# flag the atoms that cannot be part of the centre atoms of a torsion
# patterns: triple bonds and allenes
patts = [Chem.MolFromSmarts(x) for x in ['*#*', '[$([C](=*)=*)]']]
atomFlags = [0]*mol.GetNumAtoms()
for p in patts:
if mol.HasSubstructMatch(p):
matches = mol.GetSubstructMatches(p)
for match in matches:
for a in match:
atomFlags[a] = 1
bonds = []
doneBonds = [0]*mol.GetNumBonds()
for b in mol.GetBonds():
if b.IsInRing(): continue
a1 = b.GetBeginAtomIdx()
a2 = b.GetEndAtomIdx()
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2)
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1)
if not doneBonds[b.GetIdx()] and (nb1 and nb2): # no terminal bonds
doneBonds[b.GetIdx()] = 1;
# check if atoms cannot be middle atoms
if atomFlags[a1] or atomFlags[a2]:
if not ignoreColinearBonds: # search for alternative not-covalently bound atoms
while len(nb1)==1 and atomFlags[a1]:
a1old = a1
a1 = nb1[0].GetIdx()
b = mol.GetBondBetweenAtoms(a1old, a1)
if b.GetEndAtom().GetIdx() == a1old:
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a1old)
else:
nb1 = _getHeavyAtomNeighbors(b.GetEndAtom(), a1old)
doneBonds[b.GetIdx()] = 1;
while len(nb2)==1 and atomFlags[a2]:
doneBonds[b.GetIdx()] = 1;
a2old = a2
a2 = nb2[0].GetIdx()
b = mol.GetBondBetweenAtoms(a2old, a2)
if b.GetBeginAtom().GetIdx() == a2old:
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom(), a2old)
else:
nb2 = _getHeavyAtomNeighbors(b.GetBeginAtom(), a2old)
doneBonds[b.GetIdx()] = 1;
if nb1 and nb2:
bonds.append((a1, a2, nb1, nb2))
else:
bonds.append((a1, a2, nb1, nb2))
return bonds
def CalculateTorsionLists(mol, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Calculate a list of torsions for a given molecule. For each torsion
the four atom indices are determined and stored in a set.
Arguments:
- mol: the molecule of interest
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: two lists of torsions: non-ring and ring torsions
"""
if maxDev not in ['equal', 'spec']:
raise ValueError("maxDev must be either equal or spec")
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get atom invariants
if symmRadius > 0:
inv = _getAtomInvariantsWithRadius(mol, symmRadius)
else:
inv = rdMolDescriptors.GetConnectivityInvariants(mol)
# get the torsions
tors_list = [] # to store the atom indices of the torsions
for a1, a2, nb1, nb2 in bonds:
d1 = _getIndexforTorsion(nb1, inv)
d2 = _getIndexforTorsion(nb2, inv)
if len(d1) == 1 and len(d2) == 1: # case 1, 2, 4, 5, 7, 10, 16, 12, 17, 19
tors_list.append(([(d1[0].GetIdx(), a1, a2, d2[0].GetIdx())], 180.0))
elif len(d1) == 1: # case 3, 6, 8, 13, 20
if len(nb2) == 2: # two neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 90.0))
else: # three neighbors
tors_list.append(([(d1[0].GetIdx(), a1, a2, nb.GetIdx()) for nb in d2], 60.0))
elif len(d2) == 1: # case 3, 6, 8, 13, 20
if len(nb1) == 2:
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 90.0))
else: # three neighbors
tors_list.append(([(nb.GetIdx(), a1, a2, d2[0].GetIdx()) for nb in d1], 60.0))
else: # both symmetric
tmp = []
for n1 in d1:
for n2 in d2:
tmp.append((n1.GetIdx(), a1, a2, n2.GetIdx()))
if len(nb1) == 2 and len(nb2) == 2: # case 9
tors_list.append((tmp, 90.0))
elif len(nb1) == 3 and len(nb2) == 3: # case 21
tors_list.append((tmp, 60.0))
else: # case 15
tors_list.append((tmp, 30.0))
# maximal possible deviation for non-cyclic bonds
if maxDev == 'equal':
tors_list = [(t,180.0) for t,d in tors_list]
# rings
rings = Chem.GetSymmSSSR(mol)
tors_list_rings = []
for r in rings:
# get the torsions
tmp = []
num = len(r)
maxdev = 180.0 * math.exp(-0.025*(num-14)*(num-14))
for i in range(len(r)):
tmp.append((r[i], r[(i+1)%num], r[(i+2)%num], r[(i+3)%num]))
tors_list_rings.append((tmp,maxdev))
return tors_list, tors_list_rings
def _getTorsionAtomPositions(atoms, conf):
""" Helper function to retrieve the coordinates of the four atoms
in a torsion
Arguments:
- atoms: list with the four atoms
- conf: conformation of the molecule
Return: Point3D objects of the four atoms
"""
if len(atoms) != 4:
raise ValueError("List must contain exactly four atoms")
p1 = conf.GetAtomPosition(atoms[0])
p2 = conf.GetAtomPosition(atoms[1])
p3 = conf.GetAtomPosition(atoms[2])
p4 = conf.GetAtomPosition(atoms[3])
return p1, p2, p3, p4
def CalculateTorsionAngles(mol, tors_list, tors_list_rings, confId=-1):
""" Calculate the torsion angles for a list of non-ring and
a list of ring torsions.
Arguments:
- mol: the molecule of interest
- tors_list: list of non-ring torsions
- tors_list_rings: list of ring torsions
- confId: index of the conformation (default: first conformer)
Return: list of torsion angles
"""
torsions = []
conf = mol.GetConformer(confId)
for quartets,maxdev in tors_list:
tors = []
# loop over torsions and calculate angle
for atoms in quartets:
p1, p2, p3, p4 = _getTorsionAtomPositions(atoms, conf)
tmpTors = (Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0
if tmpTors < 0: tmpTors += 360.0 # angle between 0 and 360
tors.append(tmpTors)
torsions.append((tors, maxdev))
# rings
for quartets,maxdev in tors_list_rings:
num = len(quartets)
# loop over torsions and sum them up
tors = 0
for atoms in quartets:
p1, p2, p3, p4 = _getTorsionAtomPositions(atoms, conf)
tmpTors = abs((Geometry.ComputeSignedDihedralAngle(p1, p2, p3, p4)/math.pi)*180.0)
tors += tmpTors
tors /= num
torsions.append(([tors], maxdev))
return torsions
def _findCentralBond(mol, distmat):
""" Helper function to identify the atoms of the most central bond.
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
Return: atom indices of the two most central atoms (in order)
"""
from numpy import std
# get the most central atom = atom with the least STD of shortest distances
stds = []
for i in range(mol.GetNumAtoms()):
# only consider non-terminal atoms
if len(_getHeavyAtomNeighbors(mol.GetAtomWithIdx(i))) < 2: continue
tmp = [d for d in distmat[i]]
tmp.pop(i)
stds.append((std(tmp), i))
stds.sort()
aid1 = stds[0][1]
# find the second most central bond that is bonded to aid1
i = 1
while 1:
if mol.GetBondBetweenAtoms(aid1, stds[i][1]) is None:
i += 1
else:
aid2 = stds[i][1]
break
return aid1, aid2 # most central atom comes first
def _calculateBeta(mol, distmat, aid1):
""" Helper function to calculate the beta for torsion weights
according to the formula in the paper.
w(dmax/2) = 0.1
Arguments:
- mol: the molecule of interest
- distmat: distance matrix of the molecule
- aid1: atom index of the most central atom
Return: value of beta (float)
"""
# get all non-terminal bonds
bonds = []
for b in mol.GetBonds():
nb1 = _getHeavyAtomNeighbors(b.GetBeginAtom())
nb2 = _getHeavyAtomNeighbors(b.GetEndAtom())
if len(nb2) > 1 and len(nb2) > 1:
bonds.append(b)
# get shortest distance
dmax = 0
for b in bonds:
bid1 = b.GetBeginAtom().GetIdx()
bid2 = b.GetEndAtom().GetIdx()
d = max([distmat[aid1][bid1], distmat[aid1][bid2]])
if (d > dmax): dmax = d
dmax2 = dmax/2.0
beta = -math.log(0.1)/(dmax2*dmax2)
return beta
def CalculateTorsionWeights(mol, aid1=-1, aid2=-1, ignoreColinearBonds=True):
""" Calculate the weights for the torsions in a molecule.
By default, the highest weight is given to the bond
connecting the two most central atoms.
If desired, two alternate atoms can be specified (must
be connected by a bond).
Arguments:
- mol: the molecule of interest
- aid1: index of the first atom (default: most central)
- aid2: index of the second atom (default: second most central)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of torsion weights (both non-ring and ring)
"""
# get distance matrix
distmat = Chem.GetDistanceMatrix(mol)
if aid1 < 0 and aid2 < 0:
aid1, aid2 = _findCentralBond(mol, distmat)
else:
b = mol.GetBondBetweenAtoms(aid1, aid2)
if b is None:
raise ValueError("Specified atoms must be connected by a bond.")
# calculate beta according to the formula in the paper
beta = _calculateBeta(mol, distmat, aid1)
# get non-terminal, non-cyclic bonds
bonds = _getBondsForTorsions(mol, ignoreColinearBonds)
# get shortest paths and calculate weights
weights = []
for bid1, bid2, nb1, nb2 in bonds:
if ((bid1, bid2) == (aid1, aid2)
or (bid2, bid1) == (aid1, aid2)): # if it's the most central bond itself
d = 0
else:
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
w = math.exp(-beta*(d*d))
weights.append(w)
## RINGS
rings = mol.GetRingInfo()
for r in rings.BondRings():
# get shortest distances
tmp = []
num = len(r)
for bidx in r:
b = mol.GetBondWithIdx(bidx)
bid1 = b.GetBeginAtomIdx()
bid2 = b.GetEndAtomIdx()
# get shortest distance between the 4 atoms and add 1 to get bond distance
d = min(distmat[aid1][bid1], distmat[aid1][bid2], distmat[aid2][bid1], distmat[aid2][bid2])+1
tmp.append(d)
# calculate weights and append to list
# Note: the description in the paper is not very clear, the following
# formula was found to give the same weights as shown in Fig. 1
# For a ring of size N: w = N/2 * exp(-beta*(sum(w of each bond in ring)/N)^2)
w = sum(tmp)/float(num)
w = math.exp(-beta*(w*w))
weights.append(w*(num/2.0))
return weights
def CalculateTFD(torsions1, torsions2, weights=None):
""" Calculate the torsion deviation fingerprint (TFD) given two lists of
torsion angles.
Arguments:
- torsions1: torsion angles of conformation 1
- torsions2: torsion angles of conformation 2
- weights: list of torsion weights (default: None)
Return: TFD value (float)
"""
if len(torsions1) != len(torsions2):
raise ValueError("List of torsions angles must have the same size.")
# calculate deviations and normalize (divide by max. possible deviation)
deviations = []
for tors1, tors2 in zip(torsions1, torsions2):
mindiff = 180.0
for t1 in tors1[0]:
for t2 in tors2[0]:
diff = abs(t1-t2)
if (360.0-diff) < diff: # we do not care about direction
diff = 360.0 - diff
#print t1, t2, diff
if diff < mindiff:
mindiff = diff
deviations.append(mindiff/tors1[1])
# do we use weights?
if weights is not None:
if len(weights) != len(torsions1):
raise ValueError("List of torsions angles and weights must have the same size.")
deviations = [d*w for d,w in zip(deviations, weights)]
sum_weights = sum(weights)
else:
sum_weights = len(deviations)
tfd = sum(deviations)
if sum_weights != 0: # avoid division by zero
tfd /= sum_weights
return tfd
def _getSameAtomOrder(mol1, mol2):
""" Generate a new molecule with the atom order of mol1 and coordinates
from mol2.
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
Return: RDKit molecule
"""
match = mol2.GetSubstructMatch(mol1)
atomNums = tuple(range(mol1.GetNumAtoms()))
if match != atomNums: # atom orders are not the same!
#print "Atoms of second molecule reordered."
mol3 = Chem.Mol(mol1)
mol3.RemoveAllConformers()
for conf2 in mol2.GetConformers():
confId = conf2.GetId()
conf = rdchem.Conformer(mol1.GetNumAtoms())
conf.SetId(confId)
for i in range(mol1.GetNumAtoms()):
conf.SetAtomPosition(i, mol2.GetConformer(confId).GetAtomPosition(match[i]))
cid = mol3.AddConformer(conf)
return mol3
else:
return Chem.Mol(mol2)
# some wrapper functions
def GetTFDBetweenConformers(mol, confIds1, confIds2, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two list of conformers
of a molecule
Arguments:
- mol: the molecule of interest
- confIds1: first list of conformer indices
- confIds2: second list of conformer indices
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: list of TFD values
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
torsions1 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds1]
torsions2 = [CalculateTorsionAngles(mol, tl, tlr, confId=cid) for cid in confIds2]
tfd = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2, weights=weights))
else:
for t1 in torsions1:
for t2 in torsions2:
tfd.append(CalculateTFD(t1, t2))
return tfd
def GetTFDBetweenMolecules(mol1, mol2, confId1=-1, confId2=-1, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the TFD between two molecules.
Important: The two molecules must be instances of the same molecule
Arguments:
- mol1: first instance of the molecule of interest
- mol2: second instance the molecule of interest
- confId1: conformer index for mol1 (default: first conformer)
- confId2: conformer index for mol2 (default: first conformer)
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: TFD value
"""
if (Chem.MolToSmiles(mol1) != Chem.MolToSmiles(mol2)):
raise ValueError("The two molecules must be instances of the same molecule!")
mol2 = _getSameAtomOrder(mol1, mol2)
tl, tlr = CalculateTorsionLists(mol1, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
# first molecule
torsion1 = CalculateTorsionAngles(mol1, tl, tlr, confId=confId1)
# second molecule
torsion2 = CalculateTorsionAngles(mol2, tl, tlr, confId=confId2)
if useWeights:
weights = CalculateTorsionWeights(mol1, ignoreColinearBonds=ignoreColinearBonds)
tfd = CalculateTFD(torsion1, torsion2, weights=weights)
else:
tfd = CalculateTFD(torsion1, torsion2)
return tfd
def GetTFDMatrix(mol, useWeights=True, maxDev='equal', symmRadius=2, ignoreColinearBonds=True):
""" Wrapper to calculate the matrix of TFD values for the
conformers of a molecule.
Arguments:
- mol: the molecule of interest
- useWeights: flag for using torsion weights in the TFD calculation
- maxDev: maximal deviation used for normalization
'equal': all torsions are normalized using 180.0 (default)
'spec': each torsion is normalized using its specific
maximal deviation as given in the paper
- symmRadius: radius used for calculating the atom invariants
(default: 2)
- ignoreColinearBonds: if True (default), single bonds adjacent to
triple bonds are ignored
if False, alternative not-covalently bound
atoms are used to define the torsion
Return: matrix of TFD values
Note that the returned matrix is symmetrical, i.e. it is the
lower half of the matrix, e.g. for 5 conformers:
matrix = [ a,
b, c,
d, e, f,
g, h, i, j]
"""
tl, tlr = CalculateTorsionLists(mol, maxDev=maxDev, symmRadius=symmRadius, ignoreColinearBonds=ignoreColinearBonds)
numconf = mol.GetNumConformers()
torsions = [CalculateTorsionAngles(mol, tl, tlr, confId=conf.GetId()) for conf in mol.GetConformers()]
tfdmat = []
if useWeights:
weights = CalculateTorsionWeights(mol, ignoreColinearBonds=ignoreColinearBonds)
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j], weights=weights))
else:
for i in range(0, numconf):
for j in range(0, i):
tfdmat.append(CalculateTFD(torsions[i], torsions[j]))
return tfdmat
|
adalke/rdkit
|
rdkit/Chem/TorsionFingerprints.py
|
Python
|
bsd-3-clause
| 23,970
|
[
"RDKit"
] |
07fae897fef175f8a4a857a05cff5421465eff36138f7d5ec33d27fe0302c837
|
from builtins import range
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
###########################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
###########################################################################
#pass
N = x.shape[0]
out = x.reshape((N,-1)).dot(w) + b
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the affine backward pass. #
###########################################################################
#pass
N = x.shape[0]
dx = dout.dot(w.T).reshape(x.shape)
dw = x.reshape((N,-1)).T.dot(dout)
db = np.sum(dout, axis=0)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
###########################################################################
# TODO: Implement the ReLU forward pass. #
###########################################################################
#pass
out = np.maximum(0, x)
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
###########################################################################
# TODO: Implement the ReLU backward pass. #
###########################################################################
#pass
dx = dout
dx[x<0] = 0
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7
implementation of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#######################################################################
# TODO: Implement the training-time forward pass for batch norm. #
# Use minibatch statistics to compute the mean and variance, use #
# these statistics to normalize the incoming data, and scale and #
# shift the normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates #
# that you need for the backward pass should be stored in the cache #
# variable. #
# #
# You should also use your computed sample mean and variance together #
# with the momentum variable to update the running mean and running #
# variance, storing your result in the running_mean and running_var #
# variables. #
#######################################################################
#pass
# https://kratzert.github.io/2016/02/12/understanding-the-gradient-flow-through-the-batch-normalization-layer.html
sample_mean = np.mean(x, axis=0)
sample_var = np.var(x, axis=0)
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
xmu = x - sample_mean
sq = xmu ** 2
var = np.mean(sq, axis=0)
sqrtvar = np.sqrt(var + eps)
ivar = 1.0 / sqrtvar
xhat = xmu * ivar
out = xhat * gamma + beta
cache = (x, xhat,xmu, gamma, beta, ivar, sqrtvar, var, sq, sample_mean, eps)
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#######################################################################
#pass
xmu = x - running_mean
sq = xmu ** 2
var = np.mean(sq, axis=0)
sqrtvar = np.sqrt(var + eps)
ivar = 1.0 / sqrtvar
xhat = xmu * ivar
out = xhat * gamma + beta
cache = (x, xhat,xmu, gamma, beta, ivar, sqrtvar, var, sq, running_mean, eps)
#######################################################################
# END OF YOUR CODE #
#######################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
###########################################################################
#pass
# x, normal_out, gamma, beta, mean,var, eps = cache
x, xhat,xmu, gamma, beta, ivar, sqrtvar, var, sq, sample_mean, eps = cache
N, D = x.shape
dbeta = np.sum(dout, axis=0)
dgamma = np.sum(xhat * dout, axis=0)
dxhat = gamma * dout
divar = np.sum(xmu * dxhat, axis=0)
dxmu = ivar * dxhat
dsqrtvar = -1.0 / (sqrtvar ** 2) * divar
dvar = 0.5 * (var + eps) ** (-0.5) * dsqrtvar # (D,)
dsq = 1.0 / N * np.ones((N,D)) * dvar # (N,D)
dxmu += 2 * xmu * dsq
dx = dxmu
dmu = - np.sum(dxmu, axis=0)
dx += 1.0 / N * np.ones((N,D)) * dmu
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line.#
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
#pass
mask = np.random.rand(*x.shape) < p
out = x * mask / p
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
#pass
out = x
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
#pass
dx = dout * mask / dropout_param['p']
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width WW.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
#pass
N, C, H, W = x.shape
F, C, HH, WW = w.shape
pad = conv_param['pad']
stride = conv_param['stride']
xpad = np.pad(x, ((0,0),(0,0),(pad,pad),(pad,pad)), 'constant', constant_values=(0,))
Hout = 1 + (H + 2 * pad - HH) // stride
Wout = 1 + (W + 2 * pad - WW) // stride
out = np.zeros((N, F, Hout, Wout), dtype=x.dtype)
for inputIdx in xrange(N):
for kernelIdx in xrange(F):
for row in xrange(Hout):
for col in xrange(Wout):
row_start, row_end = row*stride, row*stride+HH
col_start, col_end = col*stride, col*stride+WW
x_window = xpad[inputIdx,:,row_start:row_end,col_start:col_end]
w_window = w[kernelIdx,...]
out[inputIdx][kernelIdx][row][col] = np.sum(x_window * w_window) + b[kernelIdx]
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
#pass
x,w,b, conv_param = cache
pad, stride = conv_param['pad'], conv_param['stride']
N,C,H,W = x.shape
F,_,HH,WW = w.shape
_,_,Hout,Wout = dout.shape
dx = np.zeros_like(x) # (N, C, H, W)
dw = np.zeros_like(w) # (F, C, HH, WW)
db = np.zeros_like(b) # (F,)
# dout (N, F, Hout, Wout)
xpad = np.pad(x, ((0,0),(0,0),(pad,pad),(pad,pad)), 'constant', constant_values=(0,))
dxpad = np.zeros_like(xpad)
for inputIdx in xrange(N):
for filterIdx in xrange(F):
# backprop into b
db[filterIdx] += np.sum(dout[inputIdx, filterIdx, ...])
for dep in xrange(C):
# Backprop into x
# 1.Rotate kernel 180
kernel = w[filterIdx, dep, ...]
kernel = np.flip(kernel, 0)
kernel = np.flip(kernel, 1)
# 2.Cross-correlation( 180(w), dout_with_stride_pad )
for row in xrange(H+2*pad):
for col in xrange(W+2*pad):
kernel_h, kernel_w = kernel.shape
# 2.1 Need to fillup stride with zeros, so dout is the same size as stride=1
dout_one = dout[inputIdx, filterIdx]
h_after_stride = (dout_one.shape[0]-1)*stride + 1
w_after_stride = (dout_one.shape[1]-1)*stride + 1
doutstride = np.zeros((h_after_stride, w_after_stride))
for i in xrange(dout_one.shape[0]):
for j in xrange(dout_one.shape[1]):
doutstride[stride*i, stride*j] = dout_one[i,j]
# 2.2 Add Pad with size (kernel-1). We must add stride first, then pad.
doutstridepad = np.pad(doutstride, ((kernel_h-1,kernel_h-1), (kernel_w-1,kernel_w-1)), 'constant', constant_values=(0,))
# 2.3 Calculate dot
dot = kernel * doutstridepad[row:row+kernel_h, col:col+kernel_w]
dxpad[inputIdx, dep, row, col] += np.sum(dot)
dx[inputIdx,dep,...] = dxpad[inputIdx, dep, pad:-pad, pad:-pad]
# Backprop into w
# Cross-correlation(x_with_pad, dout_with_stride), No rotate
for row in xrange(HH):
for col in xrange(WW):
# 1. Fillup dout as like stride == 1
dout_one = dout[inputIdx, filterIdx]
h_after_stride = (dout_one.shape[0]-1)*stride + 1
w_after_stride = (dout_one.shape[1]-1)*stride + 1
doutstride = np.zeros((h_after_stride, w_after_stride))
for i in xrange(dout_one.shape[0]):
for j in xrange(dout_one.shape[1]):
doutstride[stride*i, stride*j] = dout_one[i,j]
# 2. Get x_with_pad
dout_h, dout_w = doutstride.shape
x_win = xpad[inputIdx, dep, row:row+dout_h, col:col+dout_w]
# 3. Calculate dot
dot = x_win * doutstride
dw[filterIdx, dep, row, col] += np.sum(dot)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
#pass
pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
N,C,H,W = x.shape
# out (N, C, Hout, Wout) Hout = 1 + (H-ph)/stride
Hout = 1 + (H - pool_height) // stride
Wout = 1 + (W - pool_height) // stride
out = np.zeros((N,C,Hout,Wout), dtype=x.dtype)
for inputIdx in xrange(N):
for dep in xrange(C):
for row in xrange(Hout):
for col in xrange(Wout):
out[inputIdx, dep, row, col] = np.max(x[inputIdx, dep, row*stride:row*stride+pool_height, col*stride:col*stride+pool_width])
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
#pass
x, pool_param = cache
pool_height, pool_width, stride = pool_param['pool_height'], pool_param['pool_width'], pool_param['stride']
N,C,H,W = x.shape
_,_,Hout,Wout = dout.shape
# 0. Init dx with zeros
dx = np.zeros_like(x)
for inputIdx in xrange(N):
for dep in xrange(C):
for row in xrange(Hout):
for col in xrange(Wout):
# 1. Get Window as big as pool_window. And get the maximum idx
maxIdx = np.argmax(x[inputIdx,dep, row*stride:row*stride+pool_height, col*stride:col*stride+pool_width])
# 2. Calculate the realtive position of the maximum in the x-pool-window
relative_row = maxIdx // pool_width
relative_col = maxIdx % pool_width
# 3. Fill the dx with 1 * delta, just pass the gradient to the maximum
dx[inputIdx, dep, row*stride+relative_row, col*stride+relative_col] += 1 * dout[inputIdx, dep, row, col]
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
#pass
# 1. naive implement
# out = np.zeros_like(x)
# cache = np.zeros((x.shape[0], x.shape[1]), dtype=tuple)
# for inputIdx in xrange(x.shape[0]):
# for dep in xrange(x.shape[1]):
# out[inputIdx, dep,...], cache_one = batchnorm_forward(x[inputIdx, dep, ...], gamma[dep], beta[dep], bn_param)
# cache[inputIdx, dep] = cache_one
# 2. vector implement
N, C, H, W = x.shape
out_tmp, cache = batchnorm_forward(x.transpose(0,3,2,1).reshape((N*W*H, C)), gamma, beta, bn_param)
out = out_tmp.reshape((N, W, H, C)).transpose(0,3,2,1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
#pass
# 1. naive implement
# dx = np.zeros_like(dout)
# dgamma = np.zeros(dout.shape[1])
# dbeta = np.zeros(dout.shape[1])
# for inputIdx in xrange(dout.shape[0]):
# for dep in xrange(dout.shape[1]):
# _dx, _dgamma, _debta = batchnorm_backward(dout[inputIdx, dep], cache[inputIdx, dep])
# dx[inputIdx,dep] += _dx
# dgamma[dep] += np.sum(_dgamma)
# dbeta[dep] += np.sum(_debta)
# 2. vector implement
N, C, H, W = dout.shape
dx_tmp, dgamma, dbeta = batchnorm_backward(dout.transpose(0,3,2,1).reshape((N*W*H,C)), cache)
dx = dx_tmp.reshape((N,W,H,C)).transpose(0,3,2,1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
gutouyu/cs231n
|
cs231n/assignment/assignment2/cs231n/layers.py
|
Python
|
mit
| 32,482
|
[
"NEURON"
] |
2e9a98b625f1f01e020e768c47f3aa276f9fcebfde924e910a3360d02d7ba173
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `LinearOperator` and sub-classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import numpy as np
import six
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class LinearOperatorDerivedClassTest(test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
@property
def _dtypes_to_test(self):
# TODO(langmore) Test tf.float16 once tf.matrix_solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@abc.abstractproperty
def _shapes_to_test(self):
"""Returns list of tuples, each is one shape that will be tested."""
raise NotImplementedError("shapes_to_test has not been implemented.")
@abc.abstractmethod
def _operator_and_mat_and_feed_dict(self, shape, dtype, use_placeholder):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
shape: List-like of Python integers giving full shape of operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
feed_dict: Dictionary.
If placholder is True, this must contains everything needed to be fed
to sess.run calls at runtime to make the operator work.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def _make_rhs(self, operator, adjoint):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_rhs is not defined.")
@abc.abstractmethod
def _make_x(self, operator, adjoint):
"""Make an 'x' appropriate for calling operator.apply(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
Returns:
A `Tensor`
"""
raise NotImplementedError("_make_x is not defined.")
@property
def _tests_to_skip(self):
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
def _skip_if_tests_to_skip_contains(self, test_name):
"""If self._tests_to_skip contains test_name, raise SkipTest exception.
See tests below for usage.
Args:
test_name: String name corresponding to a test.
Raises:
SkipTest Exception, if test_name is in self._tests_to_skip.
"""
if test_name in self._tests_to_skip:
self.skipTest("%s skipped because it was added to self._tests_to_skip.")
def test_to_dense(self):
self._skip_if_tests_to_skip_contains("to_dense")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(shape, op_dense.get_shape())
op_dense_v, mat_v = sess.run([op_dense, mat], feed_dict=feed_dict)
self.assertAC(op_dense_v, mat_v)
def test_det(self):
self._skip_if_tests_to_skip_contains("det")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
if dtype.is_complex:
self.skipTest(
"tf.matrix_determinant does not work with complex, so this "
"test is being skipped.")
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shape[:-2], op_det.get_shape())
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)],
feed_dict=feed_dict)
self.assertAC(op_det_v, mat_det_v)
def test_log_abs_det(self):
self._skip_if_tests_to_skip_contains("log_abs_det")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
if dtype.is_complex:
self.skipTest(
"tf.matrix_determinant does not work with complex, so this "
"test is being skipped.")
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
mat_log_abs_det = math_ops.log(
math_ops.abs(linalg_ops.matrix_determinant(mat)))
if not use_placeholder:
self.assertAllEqual(shape[:-2], op_log_abs_det.get_shape())
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det],
feed_dict=feed_dict)
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
def test_apply(self):
self._skip_if_tests_to_skip_contains("apply")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
for adjoint in False, True:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
x = self._make_x(operator, adjoint=adjoint)
op_apply = operator.apply(x, adjoint=adjoint)
mat_apply = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_apply.get_shape(), mat_apply.get_shape())
op_apply_v, mat_apply_v = sess.run([op_apply, mat_apply],
feed_dict=feed_dict)
self.assertAC(op_apply_v, mat_apply_v)
def test_solve(self):
self._skip_if_tests_to_skip_contains("solve")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
for adjoint in False, True:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
rhs = self._make_rhs(operator, adjoint=adjoint)
op_solve = operator.solve(rhs, adjoint=adjoint)
mat_solve = linalg_ops.matrix_solve(mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.get_shape(), mat_solve.get_shape())
op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve],
feed_dict=feed_dict)
self.assertAC(op_solve_v, mat_solve_v)
def test_add_to_tensor(self):
self._skip_if_tests_to_skip_contains("add_to_tensor")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(shape, op_plus_2mat.get_shape())
op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat],
feed_dict=feed_dict)
self.assertAC(op_plus_2mat_v, 3 * mat_v)
def test_diag_part(self):
self._skip_if_tests_to_skip_contains("diag_part")
for use_placeholder in False, True:
for shape in self._shapes_to_test:
for dtype in self._dtypes_to_test:
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat, feed_dict = self._operator_and_mat_and_feed_dict(
shape, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(
mat_diag_part.get_shape(), op_diag_part.get_shape())
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part], feed_dict=feed_dict)
self.assertAC(op_diag_part_, mat_diag_part_)
@six.add_metaclass(abc.ABCMeta)
class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _shapes_to_test(self):
# non-batch operators (n, n) and batch operators.
return [(0, 0), (1, 1), (1, 3, 3), (3, 4, 4), (2, 1, 4, 4)]
def _make_rhs(self, operator, adjoint):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self._make_x(operator, adjoint=not adjoint)
def _make_x(self, operator, adjoint):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
x_shape = batch_shape + [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
@six.add_metaclass(abc.ABCMeta)
class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for generic rectangular operators.
Square shapes are never tested by this class, so if you want to test your
operator with a square shape, create two test classes, the other subclassing
SquareLinearOperatorFullMatrixTest.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@property
def _tests_to_skip(self):
"""List of test names to skip."""
return ["solve", "det", "log_abs_det"]
@property
def _shapes_to_test(self):
# non-batch operators (n, n) and batch operators.
return [(2, 1), (1, 2), (1, 3, 2), (3, 3, 4), (2, 1, 2, 4)]
def _make_rhs(self, operator, adjoint):
# TODO(langmore) Add once we're testing solve_ls.
raise NotImplementedError(
"_make_rhs not implemented because we don't test solve")
def _make_x(self, operator, adjoint):
# Return the number of systems for the argument 'x' for .apply(x)
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
if adjoint:
n = operator.range_dimension.value
else:
n = operator.domain_dimension.value
x_shape = batch_shape + [n, r]
else:
batch_shape = operator.batch_shape_tensor()
if adjoint:
n = operator.range_dimension_tensor()
else:
n = operator.domain_dimension_tensor()
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False):
"""[batch] positive definite matrix.
Args:
shape: `TensorShape` or Python list. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype.
force_well_conditioned: Python bool. If `True`, returned matrix has
eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are
chi-squared random variables.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not contrib_tensor_util.is_tensor(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape[-1].assert_is_compatible_with(shape[-2])
with ops.name_scope("random_positive_definite_matrix"):
tril = random_tril_matrix(
shape, dtype, force_well_conditioned=force_well_conditioned)
return math_ops.matmul(tril, tril, adjoint_b=True)
def random_tril_matrix(shape,
dtype,
force_well_conditioned=False,
remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with ops.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = array_ops.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = array_ops.matrix_set_diag(tril, diag)
return tril
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_uniform"):
samples = random_ops.random_uniform(
shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 12345
more_samples = random_ops.random_uniform(
shape,
dtype=dtype.real_dtype,
minval=minval,
maxval=maxval,
seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(
shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def random_normal_correlated_columns(
shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, eps=1e-4, seed=None):
"""Batch matrix with (possibly complex) Gaussian entries and correlated cols.
Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
living close to an embedded hyperplane.
Suppose `shape[-2:] = (M, N)`.
If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.
If `M >= N`, then the colums of `A` will be made almost dependent as follows:
```
L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
B = random normal M x N-1 matrix, mean = 0, stddev = stddev.
G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
E = a random normal M x N matrix, mean = 0, stddev = eps
mu = a constant M x N matrix, equal to the argument "mean"
A = G + E + mu
```
Args:
shape: Python list of integers.
Shape of the returned tensor. Must be at least length two.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
eps: Distance each column is perturbed from the low-dimensional subspace.
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
Raises:
ValueError: If `shape` is not at least length 2.
"""
dtype = dtypes.as_dtype(dtype)
if len(shape) < 2:
raise ValueError(
"Argument shape must be at least length 2. Found: %s" % shape)
# Shape is the final shape, e.g. [..., M, N]
shape = list(shape)
batch_shape = shape[:-2]
m, n = shape[-2:]
# If there is only one column, "they" are by definition correlated.
if n < 2 or n < m:
return random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
# Shape of the matrix with only n - 1 columns that we will embed in higher
# dimensional space.
smaller_shape = batch_shape + [m, n - 1]
# Shape of the embedding matrix, mapping batch matrices
# from [..., N-1, M] to [..., N, M]
embedding_mat_shape = batch_shape + [n, n - 1]
# This stddev for the embedding_mat ensures final result has correct stddev.
stddev_mat = 1 / np.sqrt(n - 1)
with ops.name_scope("random_normal_correlated_columns"):
smaller_mat = random_normal(
smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed)
if seed is not None:
seed += 1287
embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed)
embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True)
embedded = array_ops.matrix_transpose(embedded_t)
mean_mat = array_ops.ones_like(embedded) * mean
return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
|
strint/tensorflow
|
tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py
|
Python
|
apache-2.0
| 24,190
|
[
"Gaussian"
] |
096d939dda47ac0d87bc84031493e095729ed14a30babe5c8fb73462b505a683
|
#!/usr/bin/env python
########################################################################
# File : dirac-admin-get-proxy
# Author : Stuart Paterson
########################################################################
"""
Retrieve a delegated proxy for the given user and group
"""
from __future__ import print_function
import os
import DIRAC
from DIRAC import gLogger
from DIRAC.Core.Base import Script
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
__RCSID__ = "$Id$"
class Params(object):
limited = False
proxyPath = False
proxyLifeTime = 86400
enableVOMS = False
vomsAttr = False
def setLimited(self, args):
self.limited = True
return DIRAC.S_OK()
def setProxyLocation(self, args):
self.proxyPath = args
return DIRAC.S_OK()
def setProxyLifeTime(self, arg):
try:
fields = [f.strip() for f in arg.split(":")]
self.proxyLifeTime = int(fields[0]) * 3600 + int(fields[1]) * 60
except BaseException:
gLogger.notice("Can't parse %s time! Is it a HH:MM?" % arg)
return DIRAC.S_ERROR("Can't parse time argument")
return DIRAC.S_OK()
def automaticVOMS(self, arg):
self.enableVOMS = True
return DIRAC.S_OK()
def setVOMSAttr(self, arg):
self.enableVOMS = True
self.vomsAttr = arg
return DIRAC.S_OK()
def registerCLISwitches(self):
Script.registerSwitch("v:", "valid=", "Valid HH:MM for the proxy. By default is 24 hours", self.setProxyLifeTime)
Script.registerSwitch("l", "limited", "Get a limited proxy", self.setLimited)
Script.registerSwitch("u:", "out=", "File to write as proxy", self.setProxyLocation)
Script.registerSwitch("a", "voms", "Get proxy with VOMS extension mapped to the DIRAC group", self.automaticVOMS)
Script.registerSwitch("m:", "vomsAttr=", "VOMS attribute to require", self.setVOMSAttr)
params = Params()
params.registerCLISwitches()
Script.setUsageMessage('\n'.join([__doc__.split('\n')[1],
'Usage:',
' %s [option|cfgfile] ... <DN|user> group' % Script.scriptName,
'Arguments:',
' DN: DN of the user',
' user: DIRAC user name (will fail if there is more than 1 DN registered)',
' group: DIRAC group name']))
Script.parseCommandLine(ignoreErrors=True)
args = Script.getPositionalArgs()
if len(args) != 2:
Script.showHelp()
userGroup = str(args[1])
userDN = str(args[0])
userName = False
if userDN.find("/") != 0:
userName = userDN
retVal = Registry.getDNForUsername(userName)
if not retVal['OK']:
gLogger.notice("Cannot discover DN for username %s\n\t%s" % (userName, retVal['Message']))
DIRAC.exit(2)
DNList = retVal['Value']
if len(DNList) > 1:
gLogger.notice("Username %s has more than one DN registered" % userName)
ind = 0
for dn in DNList:
gLogger.notice("%d %s" % (ind, dn))
ind += 1
inp = raw_input("Which DN do you want to download? [default 0] ")
if not inp:
inp = 0
else:
inp = int(inp)
userDN = DNList[inp]
else:
userDN = DNList[0]
if not params.proxyPath:
if not userName:
result = Registry.getUsernameForDN(userDN)
if not result['OK']:
gLogger.notice("DN '%s' is not registered in DIRAC" % userDN)
DIRAC.exit(2)
userName = result['Value']
params.proxyPath = "%s/proxy.%s.%s" % (os.getcwd(), userName, userGroup)
if params.enableVOMS:
result = gProxyManager.downloadVOMSProxy(userDN, userGroup, limited=params.limited,
requiredTimeLeft=params.proxyLifeTime,
requiredVOMSAttribute=params.vomsAttr)
else:
result = gProxyManager.downloadProxy(userDN, userGroup, limited=params.limited,
requiredTimeLeft=params.proxyLifeTime)
if not result['OK']:
gLogger.notice('Proxy file cannot be retrieved: %s' % result['Message'])
DIRAC.exit(2)
chain = result['Value']
result = chain.dumpAllToFile(params.proxyPath)
if not result['OK']:
gLogger.notice('Proxy file cannot be written to %s: %s' % (params.proxyPath, result['Message']))
DIRAC.exit(2)
gLogger.notice("Proxy downloaded to %s" % params.proxyPath)
DIRAC.exit(0)
|
chaen/DIRAC
|
FrameworkSystem/scripts/dirac-admin-get-proxy.py
|
Python
|
gpl-3.0
| 4,434
|
[
"DIRAC"
] |
eb89167f0a9cf503bcdb00f54f5b603a1173473081d55d754691b0514169dc01
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Activity analysis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import gast
from tensorflow.contrib.py2tf.pyct import anno
from tensorflow.contrib.py2tf.pyct import transformer
from tensorflow.contrib.py2tf.pyct.static_analysis.annos import NodeAnno
# TODO(mdan): Add support for PY3 (e.g. Param vs arg).
class Scope(object):
"""Encloses local symbol definition and usage information.
This can track for instance whether a symbol is modified in the current scope.
Note that scopes do not necessarily align with Python's scopes. For example,
the body of an if statement may be considered a separate scope.
Attributes:
modified: identifiers modified in this scope
created: identifiers created in this scope
used: identifiers referenced in this scope
"""
def __init__(self, parent, isolated=True):
"""Create a new scope.
Args:
parent: A Scope or None.
isolated: Whether the scope is isolated, that is, whether variables
created in this scope should be visible to the parent scope.
"""
self.isolated = isolated
self.parent = parent
self.modified = set()
self.created = set()
self.used = set()
self.params = set()
self.returned = set()
# TODO(mdan): Rename to `locals`
@property
def referenced(self):
if not self.isolated and self.parent is not None:
return self.used | self.parent.referenced
return self.used
def __repr__(self):
return 'Scope{r=%s, c=%s, w=%s}' % (tuple(self.used), tuple(self.created),
tuple(self.modified))
def copy_from(self, other):
self.modified = copy.copy(other.modified)
self.created = copy.copy(other.created)
self.used = copy.copy(other.used)
self.params = copy.copy(other.params)
self.returned = copy.copy(other.returned)
def merge_from(self, other):
self.modified |= other.modified
self.created |= other.created
self.used |= other.used
self.params |= other.params
self.returned |= other.returned
def has(self, name):
if name in self.modified or name in self.params:
return True
elif self.parent is not None:
return self.parent.has(name)
return False
def is_modified_since_entry(self, name):
if name in self.modified:
return True
elif self.parent is not None and not self.isolated:
return self.parent.is_modified_since_entry(name)
return False
def is_param(self, name):
if name in self.params:
return True
elif self.parent is not None and not self.isolated:
return self.parent.is_param(name)
return False
def mark_read(self, name):
self.used.add(name)
if self.parent is not None and name not in self.created:
self.parent.mark_read(name)
def mark_param(self, name):
self.params.add(name)
def mark_creation(self, name):
if name.is_composite():
parent = name.parent
if self.has(parent):
# This is considered mutation of the parent, not creation.
# TODO(mdan): Is that really so?
return
else:
raise ValueError('Unknown symbol "%s".' % parent)
self.created.add(name)
def mark_write(self, name):
self.modified.add(name)
if self.isolated:
self.mark_creation(name)
else:
if self.parent is None:
self.mark_creation(name)
else:
if not self.parent.has(name):
self.mark_creation(name)
self.parent.mark_write(name)
def mark_returned(self, name):
self.returned.add(name)
if not self.isolated and self.parent is not None:
self.parent.mark_returned(name)
class ActivityAnalizer(transformer.Base):
"""Annotates nodes with local scope information. See Scope."""
def __init__(self, context, parent_scope):
super(ActivityAnalizer, self).__init__(context)
self.scope = Scope(parent_scope)
self._in_return_statement = False
def _track_symbol(self, node):
qn = anno.getanno(node, anno.Basic.QN)
if isinstance(node.ctx, gast.Store):
self.scope.mark_write(qn)
elif isinstance(node.ctx, gast.Load):
self.scope.mark_read(qn)
elif isinstance(node.ctx, gast.Param):
# Param contexts appear in function defs, so they have the meaning of
# defining a variable.
# TODO(mdan): This bay be incorrect with nested functions.
# For nested functions, we'll have to add the notion of hiding args from
# the parent scope, not writing to them.
self.scope.mark_creation(qn)
self.scope.mark_param(qn)
else:
raise ValueError('Unknown context %s for node %s.' % (type(node.ctx), qn))
anno.setanno(node, NodeAnno.IS_LOCAL, self.scope.has(qn))
anno.setanno(node, NodeAnno.IS_MODIFIED_SINCE_ENTRY,
self.scope.is_modified_since_entry(qn))
anno.setanno(node, NodeAnno.IS_PARAM, self.scope.is_param(qn))
if self._in_return_statement:
self.scope.mark_returned(qn)
def visit_Name(self, node):
self.generic_visit(node)
self._track_symbol(node)
return node
def visit_Attribute(self, node):
self.generic_visit(node)
self._track_symbol(node)
return node
def visit_Print(self, node):
current_scope = self.scope
args_scope = Scope(current_scope)
self.scope = args_scope
for n in node.values:
self.visit(n)
anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
self.scope = current_scope
return node
def visit_Call(self, node):
current_scope = self.scope
args_scope = Scope(current_scope, isolated=False)
self.scope = args_scope
for n in node.args:
self.visit(n)
# TODO(mdan): Account starargs, kwargs
for n in node.keywords:
self.visit(n)
anno.setanno(node, NodeAnno.ARGS_SCOPE, args_scope)
self.scope = current_scope
self.visit(node.func)
return node
def _process_block_node(self, node, block, scope_name):
current_scope = self.scope
block_scope = Scope(current_scope, isolated=False)
self.scope = block_scope
for n in block:
self.visit(n)
anno.setanno(node, scope_name, block_scope)
self.scope = current_scope
return node
def _process_parallel_blocks(self, parent, children):
# Because the scopes are not isolated, processing any child block
# modifies the parent state causing the other child blocks to be
# processed incorrectly. So we need to checkpoint the parent scope so that
# each child sees the same context.
before_parent = Scope(None)
before_parent.copy_from(self.scope)
after_children = []
for child, scope_name in children:
self.scope.copy_from(before_parent)
parent = self._process_block_node(parent, child, scope_name)
after_child = Scope(None)
after_child.copy_from(self.scope)
after_children.append(after_child)
for after_child in after_children:
self.scope.merge_from(after_child)
return parent
def visit_If(self, node):
self.visit(node.test)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_For(self, node):
self.visit(node.target)
self.visit(node.iter)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_While(self, node):
self.visit(node.test)
node = self._process_parallel_blocks(node,
((node.body, NodeAnno.BODY_SCOPE),
(node.orelse, NodeAnno.ORELSE_SCOPE)))
return node
def visit_Return(self, node):
self._in_return_statement = True
node = self.generic_visit(node)
self._in_return_statement = False
return node
def resolve(node, context, parent_scope=None):
return ActivityAnalizer(context, parent_scope).visit(node)
|
av8ramit/tensorflow
|
tensorflow/contrib/py2tf/pyct/static_analysis/activity.py
|
Python
|
apache-2.0
| 8,821
|
[
"VisIt"
] |
0b1733e258aabd240fa0fe040a9f22a57073347a444f7606467c568208550cda
|
from django.utils.translation import ugettext as _
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from djangoedu.ldap.fields import LdapObjectField
try:
import mptt
except ImportError:
raise ImproperlyConfigured, "You're missing django-mptt, go get it here: http://code.google.com/p/django-mptt/"
# grab defaults from settings file
try:
SEMESTER_LIST = settings.SEMESTER_LIST
except:
SEMESTER_LIST = (
('2', _('Spring')),
('6', _('Summer')),
('9', _("Fall")),
)
class SemesterManager(models.Manager):
"""Custom Semester Manager
Extra query provided:
* ``current_semester([date])``: Returns the current semester or the next
semester if date is in between semesters. Raises DoesNotExist
if no matching semester is found. Default date is todays date.
"""
def current_semester(self, date=None):
"""Returns the current semester.
Options:
* ``date``: (Optional) Defaults to todays date, if passed returns
the semester for that contains the date.
Example::
>>> import datetime
>>> now = datetime.datetime.now()
>>> tomorrow = now + datetime.timedelta(days=1)
>>> yesterday = now - datetime.timedelta(days=1)
>>> next_week = now + datetime.timedelta(weeks=1)
# No semesters should raise DoesNotExit
>>> Semester.objects.current_semester()
Traceback (most recent call last):
...
DoesNotExist
# Create a current semester
>>> s = Semester.objects.create(year=now.year,semester='2',sdate=now,edate=tomorrow)
>>> Semester.objects.current_semester()
<Semester: ...>
# Optionally specify the date to check
>>> Semester.objects.current_semester(date=yesterday)
<Semester: ...>
>>> Semester.objects.current_semester(date=next_week)
Traceback (most recent call last):
...
DoesNotExist
"""
if not date:
import datetime
date = datetime.datetime.now().date()
try:
return self.get(sdate__lte=date, edate__gte=date)
except self.model.DoesNotExist:
objs = self.get_query_set().filter(sdate__gte=date).order_by('sdate')
if objs:
# first one should be the next semester
return objs[0]
raise self.model.DoesNotExist
class Semester(models.Model):
"""*Semesters*
Holds the semesters information. Storing the primary key in CCYYS format.
See: http://www.unece.org/trade/untdid/d03a/tred/tred2379.htm
This model allows us to sort and print the CCYYS better then with template tags.
"""
ccyys = models.PositiveIntegerField(primary_key=True, editable=False)
year = models.PositiveIntegerField(_("Year"))
semester = models.PositiveIntegerField(_("Semester"), choices=SEMESTER_LIST)
sdate = models.DateField(_("Start of Semester"))
edate = models.DateField(_("End of Semester"))
objects = SemesterManager()
def __unicode__(self):
return u"%s %s" % (unicode(self.get_semester_display()), self.year)
def save(self):
self.ccyys = u"%s%s" % (self.year, self.semester)
super(Semester, self).save()
def yys(self):
"""Returns just the last three digits of the ccyys."""
return self.ccyys[2:]
class Meta:
ordering = ['-ccyys']
unique_together = ['year', 'semester']
class Admin:
list_display = ('year', 'semester', 'sdate', 'edate')
class eduPersonManager(models.Manager):
"""Custom manager to handle creation."""
# TODO: override create* and possibly more
class eduPerson(models.Model):
"""*eduPerson*
This model is designed to map to the LDAP eduPerson schema
http://www.educause.edu/eduperson/ . The main purpose of this
django model is to relate your LDAP directory to the other objects
in django.
Since everyones LDAP schema could be slightly different we do
not store any of that info in this model. When a query is executed
on this model the ``ldap`` field returns a LDAP object, sort of like a
foreign key field. This allows you to store the info in LDAP as
your primary source of person data. This allow requires that you
have properly set up your project ``settings.py`` file with the
following::
LDAP_SERVER = (required)
LDAP_SERVER_PORT = (default 389)
LDAP_SERVER_USER = (default None)
LDAP_SERVER_USER_PASSWORD = (default no password)
The eduPerson model is based on person, organizationalPerson and
inetOrgPerson object classes as included in X.521 so any object
class that has these same properties should work with eduPerson.
You specify which object you wish to connect to such as::
>>> from django.conf import settings
>>> settings.LDAP_SERVER = 'ldap.utexas.edu'
>>> p = eduPerson.objects.create(ldap="rm6776")
>>> p.ldap.givenName
'Robert'
"""
user = models.OneToOneField(User, verbose_name=_('User'), primary_key=True, raw_id_admin=True)
ldap = LdapObjectField(_("LDAP Person Object"), filter_attr='uid')
active = models.BooleanField(_("Active"), default=True)
objects = eduPersonManager()
def _get_ou(self):
return self.ldap.ou[0]
department = property(_get_ou)
def __unicode__(self):
return unicode(self.user)
def update_user(self):
"""Update django.contrib.auth User model with info from LDAP."""
self.user.first_name = self.ldap.givenName[0]
self.user.last_name = self.ldap.sn[0]
self.user.email = self.ldap.mail[0]
self.user.save()
def save(self):
self.update_user()
super(eduPerson, self).save()
class Admin:
list_display = ('user', 'ldap', 'department', 'active')
class Organization(models.Model):
"""*Organization*
A Heirarchy of Organizations internal and external. When adding a
organization the organization is inserted alphabetically. The tree
structure if the data is maintained by the django-mptt application.
For more information please visit the django-mptt project page:
http://code.google.com/p/django-mptt/
By using mptt we can store an large number of sub groups and easily
query only the part we are interested in.
Consider the example::
Internal Organizations External Organization
---------------------------- ---------------------------
University of State IEEE
| |
+---College of Liberal Arts +---IEEE Chapter at myU
| |
| +---Dept. of English
| |
| +---Dept. of German
|
+---College of Science
|
+---Dept. of Math
A query that returns the "College of Liberal Arts" would return only
the subtree::
College of Liberal Arts
|
+---Dept. of English
|
+---Dept. of German
Resulting in less queries to the database server.
"""
parent = models.ForeignKey('self', null=True, editable=False,
related_name='children')
name = models.CharField(_("Department Name"), max_length=255)
abbr = models.CharField(_("Abbreviation"), max_length=25, blank=True)
website = models.URLField(_("Web Site"), verify_exists=False, blank=True)
logo = models.ImageField(_("Logo"), upload_to="org/logos/", blank=True, null=True)
contact = models.ForeignKey(eduPerson, verbose_name=_("Contact Person"),
blank=True, null=True)
# extra 'hidden' MPTT fields
lft = models.PositiveIntegerField(db_index=True, editable=False)
rght = models.PositiveIntegerField(db_index=True, editable=False)
tree_id = models.PositiveIntegerField(db_index=True, editable=False)
level = models.PositiveIntegerField(db_index=True, editable=False)
def __unicode__(self):
return self.abbr
class Admin:
list_display = ('abbr', 'name')
mptt.register(Organization, order_insertion_by='name')
|
tjnapster555/django-edu
|
djangoedu/core/models.py
|
Python
|
mit
| 8,493
|
[
"VisIt"
] |
12503da7a9802a674e451c4ffebc9d16db91264ff75af62c436e355670bbee5d
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wraps a function body with a `name_scope` of the function name."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import templates
class FunctionNameScopeTransformer(converter.Base):
"""Wrap a function body with a `name_scope` of the function name."""
def _name_for_current_scope(self):
innermost = self.enclosing_entities[-1]
if len(self.enclosing_entities) > 1:
parent = self.enclosing_entities[-2]
if isinstance(parent, gast.ClassDef):
# Methods also take the name of their class.
name = '%s/%s' % (parent.name, innermost.name)
else:
name = innermost.name
else:
name = innermost.name
# Sanitize the name.
# See https://www.tensorflow.org/api_docs/python/tf/Graph#name_scope
# TensorFlow doesn't like leading underscores at the top level.
while name[0] == '_':
name = name[1:]
return name
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
unscoped_body = []
scoped_body = node.body
if scoped_body:
first = scoped_body[0]
if isinstance(first, gast.Expr) and isinstance(first.value, gast.Str):
# Skip any docstring.
unscoped_body = scoped_body[:1]
scoped_body = scoped_body[1:]
template = """
with tf.name_scope(scope_name):
body
"""
scoped_body = templates.replace(
template,
scope_name=gast.Str(self._name_for_current_scope()),
body=scoped_body)
node.body = unscoped_body + scoped_body
return node
def transform(node, ctx):
return FunctionNameScopeTransformer(ctx).visit(node)
|
kobejean/tensorflow
|
tensorflow/python/autograph/converters/name_scopes.py
|
Python
|
apache-2.0
| 2,471
|
[
"VisIt"
] |
59f38289e7219b071bb947822e427547d157ed353835197c06b8e20ee8591654
|
import csv
import numpy as np
import random
import Network as net
import ActivationFunctions as af
X = np.empty(shape=(4000,401), dtype=np.int8)
Y = np.zeros(shape=(4000,10), dtype=np.int8)
X_test = np.empty(shape=(200,401), dtype=np.int8)
Y_test = np.empty(shape=(200,401), dtype=np.int8)
def loadData():
random.seed(1)
initElement = np.empty(shape=(28,28))
fixedElement = np.empty(shape=(401,))
global X, Y, X_test, Y_test
with open('train.csv', "rb") as data:
reader = csv.reader(data)
for row in reader:
lineNumber = reader.line_num
if row == '' or X.shape[0] + X_test.shape[0] <= lineNumber:
break
colNum = -1
for column in row:
if colNum == -1:
if lineNumber < X.shape[0]:
Y[lineNumber,column] = 1
else:
Y_test[lineNumber - X.shape[0],column] = 1
else:
x = colNum // 28
y = colNum - x * 28
activation = 0
if int(column) > 160:
activation = 1
initElement[x, y] = activation
colNum += 1
stepX = 4
stepY = 4
for x in range(20):
for y in range(20):
fixedElement[x * 20 + y] = initElement[x + stepX, y + stepY]
fixedElement[400] = 1
if lineNumber < X.shape[0]:
X[lineNumber] = fixedElement
else:
X_test[lineNumber - X.shape[0]] = fixedElement
if __name__ == "__main__":
'''
Example method to load MNIST dataset from csv
'''
loadData()
'''
Define activation function. From: ActivationFunctions.py
'''
myAf = af.Sigmoid()
'''
Format data (output) to fit activation function
'''
Y = myAf.format_data(Y)
Y_test = myAf.format_data(Y_test)
'''
Create neural network with defined activation function and hidden layer size
'''
neuron = net.NeuralNetwork(myAf, 200)
'''
Train neural network with these X, Y, learning rate, iterations
'''
neuron.train(X, Y, 0.001, 10000)
'''
You can also call train multiple times
'''
neuron.train(X, Y, 0.001, 100)
'''
Validate trained neural network with this data: input, expected output, iteration count
'''
neuron.validate(X_test, Y_test, 200)
'''
Export neural network to JSON
'''
neuron.export_network("Tahn_w_1.json", "Tahn_w_2.json")
|
evalkaz94/neural_network_py
|
Example.py
|
Python
|
mit
| 2,611
|
[
"NEURON"
] |
abccaaf10a688f26a990cb188ac6da0094e244fff603a702b055a4cc6ebea328
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import subprocess
import sys
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_rules.py",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_simple.py",
r"^native_client_sdk[\\\/]src[\\\/]tools[\\\/].*.mk",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
r".+[\\\/]pnacl_shim\.c$",
)
# Fragment of a regular expression that matches C++ and Objective-C++
# implementation files.
_IMPLEMENTATION_EXTENSIONS = r'\.(cc|cpp|cxx|mm)$'
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.*[/\\](fake_|test_|mock_).+%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_test_(base|support|util)%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_(api|browser|perf|pixel|unit|ui)?test(_[a-z]+)?%s' %
_IMPLEMENTATION_EXTENSIONS,
r'.+profile_sync_service_harness%s' % _IMPLEMENTATION_EXTENSIONS,
r'.*[/\\](test|tool(s)?)[/\\].*',
# content_shell is used for running layout tests.
r'content[/\\]shell[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
_INCLUDE_ORDER_WARNING = (
'Your #include order seems to be broken. Send mail to\n'
'marja@chromium.org if this is not the case.')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'NSTrackingArea',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
(),
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
(
r"^content[\\\/]shell[\\\/]shell_browser_main\.cc$",
r"^net[\\\/]disk_cache[\\\/]cache_util\.cc$",
),
),
(
'SkRefPtr',
(
'The use of SkRefPtr is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoRef',
(
'The indirect use of SkRefPtr via SkAutoRef is prohibited. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoTUnref',
(
'The use of SkAutoTUnref is dangerous because it implicitly ',
'converts to a raw pointer. Please use skia::RefPtr instead.'
),
True,
(),
),
(
'SkAutoUnref',
(
'The indirect use of SkAutoTUnref through SkAutoUnref is dangerous ',
'because it implicitly converts to a raw pointer. ',
'Please use skia::RefPtr instead.'
),
True,
(),
),
)
_VALID_OS_MACROS = (
# Please keep sorted.
'OS_ANDROID',
'OS_BSD',
'OS_CAT', # For testing.
'OS_CHROMEOS',
'OS_FREEBSD',
'OS_IOS',
'OS_LINUX',
'OS_MACOSX',
'OS_NACL',
'OS_OPENBSD',
'OS_POSIX',
'OS_SOLARIS',
'OS_WIN',
)
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
file_inclusion_pattern = r'.+%s' % _IMPLEMENTATION_EXTENSIONS
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*%s' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST"""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
allowWString = False
for line_num, line in f.ChangedContents():
if 'presubmit: allow wstring' in line:
allowWString = True
elif not allowWString and 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
allowWString = False
else:
allowWString = False
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling a cross-platform API that accepts a wstring, '
'fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error, excluded_paths in _BANNED_CPP_FUNCTIONS:
def IsBlacklisted(affected_file, blacklist):
local_path = affected_file.LocalPath()
for item in blacklist:
if input_api.re.match(item, local_path):
return True
return False
if IsBlacklisted(f, excluded_paths):
continue
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoTrinaryTrueFalse(input_api, output_api):
"""Checks to make sure we don't introduce use of foo ? true : false."""
problems = []
pattern = input_api.re.compile(r'\?\s*(true|false)\s*:\s*(true|false)')
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Please consider avoiding the "? true : false" pattern if possible.\n' +
'\n'.join(problems))]
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
args = [sys.executable, 'tools/checkperms/checkperms.py', '--root',
input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
errors = []
(errors, stderrdata) = subprocess.Popen(args).communicate()
results = []
if errors:
results.append(output_api.PresubmitError('checkperms.py failed.',
errors))
return results
def _CheckNoAuraWindowPropertyHInHeaders(input_api, output_api):
"""Makes sure we don't include ui/aura/window_property.h
in header files.
"""
pattern = input_api.re.compile(r'^#include\s*"ui/aura/window_property.h"')
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('.h'):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d' % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitError(
'Header files should not include ui/aura/window_property.h', errors))
return results
def _CheckIncludeOrderForScope(scope, input_api, file_path, changed_linenums):
"""Checks that the lines in scope occur in the right order.
1. C system files in alphabetical order
2. C++ system files in alphabetical order
3. Project's .h files
"""
c_system_include_pattern = input_api.re.compile(r'\s*#include <.*\.h>')
cpp_system_include_pattern = input_api.re.compile(r'\s*#include <.*>')
custom_include_pattern = input_api.re.compile(r'\s*#include ".*')
C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = range(3)
state = C_SYSTEM_INCLUDES
previous_line = ''
previous_line_num = 0
problem_linenums = []
for line_num, line in scope:
if c_system_include_pattern.match(line):
if state != C_SYSTEM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif cpp_system_include_pattern.match(line):
if state == C_SYSTEM_INCLUDES:
state = CPP_SYSTEM_INCLUDES
elif state == CUSTOM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif custom_include_pattern.match(line):
if state != CUSTOM_INCLUDES:
state = CUSTOM_INCLUDES
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
else:
problem_linenums.append(line_num)
previous_line = line
previous_line_num = line_num
warnings = []
for (line_num, previous_line_num) in problem_linenums:
if line_num in changed_linenums or previous_line_num in changed_linenums:
warnings.append(' %s:%d' % (file_path, line_num))
return warnings
def _CheckIncludeOrderInFile(input_api, f, changed_linenums):
"""Checks the #include order for the given file f."""
system_include_pattern = input_api.re.compile(r'\s*#include \<.*')
# Exclude #include <.../...> includes from the check; e.g., <sys/...> includes
# often need to appear in a specific order.
excluded_include_pattern = input_api.re.compile(r'\s*#include \<.*/.*')
custom_include_pattern = input_api.re.compile(r'\s*#include "(?P<FILE>.*)"')
if_pattern = input_api.re.compile(
r'\s*#\s*(if|elif|else|endif|define|undef).*')
# Some files need specialized order of includes; exclude such files from this
# check.
uncheckable_includes_pattern = input_api.re.compile(
r'\s*#include '
'("ipc/.*macros\.h"|<windows\.h>|".*gl.*autogen.h")\s*')
contents = f.NewContents()
warnings = []
line_num = 0
# Handle the special first include. If the first include file is
# some/path/file.h, the corresponding including file can be some/path/file.cc,
# some/other/path/file.cc, some/path/file_platform.cc, some/path/file-suffix.h
# etc. It's also possible that no special first include exists.
for line in contents:
line_num += 1
if system_include_pattern.match(line):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
match = custom_include_pattern.match(line)
if match:
match_dict = match.groupdict()
header_basename = input_api.os_path.basename(
match_dict['FILE']).replace('.h', '')
if header_basename not in input_api.os_path.basename(f.LocalPath()):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
# Split into scopes: Each region between #if and #endif is its own scope.
scopes = []
current_scope = []
for line in contents[line_num:]:
line_num += 1
if uncheckable_includes_pattern.match(line):
return []
if if_pattern.match(line):
scopes.append(current_scope)
current_scope = []
elif ((system_include_pattern.match(line) or
custom_include_pattern.match(line)) and
not excluded_include_pattern.match(line)):
current_scope.append((line_num, line))
scopes.append(current_scope)
for scope in scopes:
warnings.extend(_CheckIncludeOrderForScope(scope, input_api, f.LocalPath(),
changed_linenums))
return warnings
def _CheckIncludeOrder(input_api, output_api):
"""Checks that the #include order is correct.
1. The corresponding header for source files.
2. C system files in alphabetical order
3. C++ system files in alphabetical order
4. Project's .h files in alphabetical order
Each region separated by #if, #elif, #else, #endif, #define and #undef follows
these rules separately.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath().endswith(('.cc', '.h')):
changed_linenums = set(line_num for line_num, _ in f.ChangedContents())
warnings.extend(_CheckIncludeOrderInFile(input_api, f, changed_linenums))
results = []
if warnings:
results.append(output_api.PresubmitPromptOrNotify(_INCLUDE_ORDER_WARNING,
warnings))
return results
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api):
def FilterFile(affected_file):
"""Filter function for use with input_api.AffectedSourceFiles,
below. This filters out everything except non-test files from
top-level directories that generally speaking should not hard-code
service URLs (e.g. src/android_webview/, src/content/ and others).
"""
return input_api.FilterSourceFile(
affected_file,
white_list=(r'^(android_webview|base|content|net)[\\\/].*', ),
black_list=(_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST))
base_pattern = '"[^"]*google\.com[^"]*"'
comment_pattern = input_api.re.compile('//.*%s' % base_pattern)
pattern = input_api.re.compile(base_pattern)
problems = [] # items are (filename, line_number, line)
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
if not comment_pattern.search(line) and pattern.search(line):
problems.append((f.LocalPath(), line_num, line))
if problems:
return [output_api.PresubmitPromptOrNotify(
'Most layers below src/chrome/ should not hardcode service URLs.\n'
'Are you sure this is correct? (Contact: joi@chromium.org)',
[' %s:%d: %s' % (
problem[0], problem[1], problem[2]) for problem in problems])]
else:
return []
def _CheckNoAbbreviationInPngFileName(input_api, output_api):
"""Makes sure there are no abbreviations in the name of PNG files.
"""
pattern = input_api.re.compile(r'.*_[a-z]_.*\.png$|.*_[a-z]\.png$')
errors = []
for f in input_api.AffectedFiles(include_deletes=False):
if pattern.match(f.LocalPath()):
errors.append(' %s' % f.LocalPath())
results = []
if errors:
results.append(output_api.PresubmitError(
'The name of PNG files should not have abbreviations. \n'
'Use _hover.png, _center.png, instead of _h.png, _c.png.\n'
'Contact oshima@chromium.org if you have questions.', errors))
return results
def _DepsFilesToCheck(re, changed_lines):
"""Helper method for _CheckAddedDepsHaveTargetApprovals. Returns
a set of DEPS entries that we should look up."""
results = set()
# This pattern grabs the path without basename in the first
# parentheses, and the basename (if present) in the second. It
# relies on the simple heuristic that if there is a basename it will
# be a header file ending in ".h".
pattern = re.compile(
r"""['"]\+([^'"]+?)(/[a-zA-Z0-9_]+\.h)?['"].*""")
for changed_line in changed_lines:
m = pattern.match(changed_line)
if m:
path = m.group(1)
if not (path.startswith('grit/') or path == 'grit'):
results.add('%s/DEPS' % m.group(1))
return results
def _CheckAddedDepsHaveTargetApprovals(input_api, output_api):
"""When a dependency prefixed with + is added to a DEPS file, we
want to make sure that the change is reviewed by an OWNER of the
target file or directory, to avoid layering violations from being
introduced. This check verifies that this happens.
"""
changed_lines = set()
for f in input_api.AffectedFiles():
filename = input_api.os_path.basename(f.LocalPath())
if filename == 'DEPS':
changed_lines |= set(line.strip()
for line_num, line
in f.ChangedContents())
if not changed_lines:
return []
virtual_depended_on_files = _DepsFilesToCheck(input_api.re, changed_lines)
if not virtual_depended_on_files:
return []
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check for DEPS additions')]
if not input_api.change.issue:
return [output_api.PresubmitError(
"DEPS approval by OWNERS check failed: this change has "
"no Rietveld issue number, so we can't check it for approvals.")]
output = output_api.PresubmitError
else:
output = output_api.PresubmitNotifyResult
owners_db = input_api.owners_db
owner_email, reviewers = input_api.canned_checks._RietveldOwnerAndReviewers(
input_api,
owners_db.email_regexp,
approval_needed=input_api.is_committing)
owner_email = owner_email or input_api.change.author_email
reviewers_plus_owner = set(reviewers)
if owner_email:
reviewers_plus_owner.add(owner_email)
missing_files = owners_db.files_not_covered_by(virtual_depended_on_files,
reviewers_plus_owner)
unapproved_dependencies = ["'+%s'," % path[:-len('/DEPS')]
for path in missing_files]
if unapproved_dependencies:
output_list = [
output('Missing LGTM from OWNERS of directories added to DEPS:\n %s' %
'\n '.join(sorted(unapproved_dependencies)))]
if not input_api.is_committing:
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output(
'Suggested missing target path OWNERS:\n %s' %
'\n '.join(suggested_owners or [])))
return output_list
return []
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoTrinaryTrueFalse(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
results.extend(_CheckNoAuraWindowPropertyHInHeaders(input_api, output_api))
results.extend(_CheckIncludeOrder(input_api, output_api))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api))
results.extend(_CheckNoAbbreviationInPngFileName(input_api, output_api))
results.extend(_CheckForInvalidOSMacros(input_api, output_api))
results.extend(_CheckAddedDepsHaveTargetApprovals(input_api, output_api))
results.extend(
input_api.canned_checks.CheckChangeHasNoTabs(
input_api,
output_api,
source_file_filter=lambda x: x.LocalPath().endswith('.grd')))
if any('PRESUBMIT.py' == f.LocalPath() for f in input_api.AffectedFiles()):
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api,
input_api.PresubmitLocalPath(),
whitelist=[r'^PRESUBMIT_test\.py$']))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _DidYouMeanOSMacro(bad_macro):
try:
return {'A': 'OS_ANDROID',
'B': 'OS_BSD',
'C': 'OS_CHROMEOS',
'F': 'OS_FREEBSD',
'L': 'OS_LINUX',
'M': 'OS_MACOSX',
'N': 'OS_NACL',
'O': 'OS_OPENBSD',
'P': 'OS_POSIX',
'S': 'OS_SOLARIS',
'W': 'OS_WIN'}[bad_macro[3].upper()]
except KeyError:
return ''
def _CheckForInvalidOSMacrosInFile(input_api, f):
"""Check for sensible looking, totally invalid OS macros."""
preprocessor_statement = input_api.re.compile(r'^\s*#')
os_macro = input_api.re.compile(r'defined\((OS_[^)]+)\)')
results = []
for lnum, line in f.ChangedContents():
if preprocessor_statement.search(line):
for match in os_macro.finditer(line):
if not match.group(1) in _VALID_OS_MACROS:
good = _DidYouMeanOSMacro(match.group(1))
did_you_mean = ' (did you mean %s?)' % good if good else ''
results.append(' %s:%d %s%s' % (f.LocalPath(),
lnum,
match.group(1),
did_you_mean))
return results
def _CheckForInvalidOSMacros(input_api, output_api):
"""Check all affected files for invalid OS macros."""
bad_macros = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.py', '.js', '.html', '.css')):
bad_macros.extend(_CheckForInvalidOSMacrosInFile(input_api, f))
if not bad_macros:
return []
return [output_api.PresubmitError(
'Possibly invalid OS macro[s] found. Please fix your code\n'
'or add your macro to src/PRESUBMIT.py.', bad_macros)]
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
files = change.LocalPaths()
if not files or all(re.search(r'[\\/]OWNERS$', f) for f in files):
return []
if all(re.search('\.(m|mm)$|(^|[/_])mac[/_.]', f) for f in files):
return ['mac_rel', 'mac:compile']
if all(re.search('(^|[/_])win[/_.]', f) for f in files):
return ['win_rel', 'win7_aura', 'win:compile']
if all(re.search('(^|[/_])android[/_.]', f) for f in files):
return ['android_aosp', 'android_dbg', 'android_clang_dbg']
if all(re.search('^native_client_sdk', f) for f in files):
return ['linux_nacl_sdk', 'win_nacl_sdk', 'mac_nacl_sdk']
if all(re.search('[/_]ios[/_.]', f) for f in files):
return ['ios_rel_device', 'ios_dbg_simulator']
trybots = [
'android_clang_dbg',
'android_dbg',
'ios_dbg_simulator',
'ios_rel_device',
'linux_asan',
'linux_aura',
'linux_chromeos',
'linux_clang:compile',
'linux_rel',
'mac_rel',
'mac:compile',
'win7_aura',
'win_rel',
'win:compile',
'win_x64_rel:compile',
]
# Match things like path/aura/file.cc and path/file_aura.cc.
# Same for chromeos.
if any(re.search('[/_](aura|chromeos)', f) for f in files):
trybots += ['linux_chromeos_clang:compile', 'linux_chromeos_asan']
# The AOSP bot doesn't build the chrome/ layer, so ignore any changes to it
# unless they're .gyp(i) files as changes to those files can break the gyp
# step on that bot.
if (not all(re.search('^chrome', f) for f in files) or
any(re.search('\.gypi?$', f) for f in files)):
trybots += ['android_aosp']
return trybots
|
indashnet/InDashNet.Open.UN2000
|
android/external/chromium_org/PRESUBMIT.py
|
Python
|
apache-2.0
| 37,746
|
[
"VisIt"
] |
ac0f8d4e47f44650559157b4127f8fbc3605208f3b5110c1ad4abfe32d484dd6
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import print_function
import MDAnalysis
from MDAnalysisTests import module_not_found
from MDAnalysisTests.datafiles import GRO
from MDAnalysisTests.util import block_import
from numpy.testing import TestCase, assert_equal, dec
import numpy as np
import warnings
from mock import Mock, patch
import sys
class TestContactMatrix(TestCase):
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def setUp(self):
import MDAnalysis.analysis.distances
self.coord = np.array([[1, 1, 1],
[5, 5, 5],
[1.1, 1.1, 1.1],
[11, 11, 11], # neighboring image with pbc
[21, 21, 21]], # non neighboring image with pbc
dtype=np.float32)
self.box = np.array([10, 10, 10], dtype=np.float32)
self.shape = (5, 5)
self.res_no_pbc = np.array([[1, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]], dtype=np.bool)
self.res_pbc = np.array([[1, 0, 1, 1, 1],
[0, 1, 0, 0, 0],
[1, 0, 1, 1, 1],
[1, 0, 1, 1, 1],
[1, 0, 1, 1, 1]], dtype=np.bool)
def test_np(self):
contacts = MDAnalysis.analysis.distances.contact_matrix(
self.coord, cutoff=1, returntype="numpy")
assert_equal(contacts.shape, self.shape,
"wrong shape (should be {0})".format(self.shape))
assert_equal(contacts, self.res_no_pbc)
def test_sparse(self):
contacts = MDAnalysis.analysis.distances.contact_matrix(
self.coord, cutoff=1.5, returntype="sparse")
assert_equal(contacts.shape, self.shape,
"wrong shape (should be {0})".format(self.shape))
assert_equal(contacts.toarray(), self.res_no_pbc)
def test_box_numpy(self):
contacts = MDAnalysis.analysis.distances.contact_matrix(
self.coord, box=self.box, cutoff=1)
assert_equal(contacts.shape, self.shape,
"wrong shape (should be {0})".format(self.shape))
assert_equal(contacts, self.res_pbc)
def test_box_sparse(self):
contacts = MDAnalysis.analysis.distances.contact_matrix(
self.coord, box=self.box, cutoff=1, returntype='sparse')
assert_equal(contacts.shape, self.shape,
"wrong shape (should be {0})".format(self.shape))
assert_equal(contacts.toarray(), self.res_pbc)
class TestDist(TestCase):
'''Tests for MDAnalysis.analysis.distances.dist().
Imports do not happen at the top level of the module
because of the scipy dependency.'''
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def setUp(self):
import MDAnalysis.analysis.distances
import scipy
import scipy.spatial
self.u = MDAnalysis.Universe(GRO)
self.ag = self.u.atoms[:20]
self.u2 = MDAnalysis.Universe(GRO)
self.ag2 = self.u2.atoms[:20]
self.ag2.positions = np.random.shuffle(self.ag2.positions)
self.expected = np.diag(scipy.spatial.distance.cdist(
self.ag.positions,
self.ag2.positions))
def tearDown(self):
del self.u
del self.ag
del self.u2
del self.ag2
del self.expected
def test_pairwise_dist(self):
'''Ensure that pairwise distances between atoms are
correctly calculated.'''
actual = MDAnalysis.analysis.distances.dist(self.ag, self.ag2)[2]
assert_equal(actual, self.expected)
def test_pairwise_dist_offset_effect(self):
'''Test that feeding in offsets to dist() doesn't alter
pairwise distance matrix.'''
actual = MDAnalysis.analysis.distances.dist(self.ag, self.ag2,
offset=229)[2]
assert_equal(actual, self.expected)
def test_offset_calculation(self):
'''Test that offsets fed to dist() are correctly calculated.'''
actual = MDAnalysis.analysis.distances.dist(self.ag, self.ag2,
offset=33)[:2]
assert_equal(actual, np.array([self.ag.atoms.resids + 33,
self.ag2.atoms.resids + 33]))
def test_mismatch_exception(self):
'''A ValueError should be raised if the two atomgroups
don't have the same number of atoms.'''
with self.assertRaises(ValueError):
MDAnalysis.analysis.distances.dist(self.ag[:19], self.ag2)
class TestBetween(TestCase):
'''Tests for MDAnalysis.analysis.distances.between().
Imports do not happen at the top level of the module
because of the scipy dependency.'''
@dec.skipif(module_not_found('scipy'),
"Test skipped because scipy is not available.")
def setUp(self):
import MDAnalysis.analysis.distances
import scipy
import scipy.spatial
self.u = MDAnalysis.Universe(GRO)
self.ag = self.u.atoms[:10]
self.ag2 = self.u.atoms[12:33]
self.group = self.u.atoms[40:]
self.distance = 5.9
self.distance_matrix_1 = scipy.spatial.distance.cdist(self.group.positions,
self.ag.positions)
self.mask_1 = np.unique(np.where(self.distance_matrix_1 <= self.distance)[0])
self.group_filtered = self.group[self.mask_1]
self.distance_matrix_2 = scipy.spatial.distance.cdist(self.group_filtered.positions,
self.ag2.positions)
self.mask_2 = np.unique(np.where(self.distance_matrix_2 <= self.distance)[0])
self.expected = self.group_filtered[self.mask_2].indices
def tearDown(self):
del self.u
del self.ag
del self.ag2
del self.group
del self.distance
del self.distance_matrix_1
del self.distance_matrix_2
del self.mask_1
del self.mask_2
del self.group_filtered
del self.expected
def test_between_simple_case_indices_only(self):
'''Test MDAnalysis.analysis.distances.between() for
a simple input case. Checks the sorted atom indices
of returned AtomGroup against sorted expected index
values.'''
actual = sorted(MDAnalysis.analysis.distances.between(self.group,
self.ag,
self.ag2,
self.distance).indices)
assert_equal(actual, self.expected)
class TestImportWarnings(TestCase):
# see unit testing for warnings:
# http://stackoverflow.com/a/3892301
def setUp(self):
sys.modules.pop('MDAnalysis.analysis.distances', None)
@block_import('scipy')
def test_warning_raised_no_scipy_module_level(self):
# an appropriate warning rather than an exception should be
# raised if scipy is absent when importing
# MDAnalysis.analysis.distances
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import MDAnalysis.analysis.distances
assert issubclass(w[-1].category, ImportWarning)
def test_silent_success_scipy_present_module_level(self):
# if scipy is present no module level ImportWarning should be
# raised when importing MDAnalysis.analysis.distances
mock = Mock() # mock presence of scipy
with patch.dict('sys.modules', {'scipy':mock}):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
import MDAnalysis.analysis.distances
assert w == []
@block_import('scipy')
def test_import_error_contact_matrix_no_scipy(self):
# contact_matrix should raise an ImportError if returntype is
# "sparse" and scipy is not available
with self.assertRaises(ImportError):
np.random.seed(321)
points = np.random.random_sample((10, 3))
import MDAnalysis.analysis.distances
MDAnalysis.analysis.distances.contact_matrix(points,
returntype="sparse")
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_distances.py
|
Python
|
gpl-2.0
| 9,822
|
[
"MDAnalysis"
] |
18c8625479df2586eafa067da163b44354c0c0da7e1c483129f1245cd4944f5d
|
import vtk
def setup():
# create a rendering window and renderer
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
# create a renderwindowinteractor
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
return ren,renWin,iren
def create_sphere(ren=None,r=5.0,center=(0,0,0)):
if ren is None:
ren,renWin,iren=setup()
# create source
source = vtk.vtkSphereSource()
source.SetCenter(0,0,0)
source.SetRadius(5.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInput(source.GetOutput())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
ren.AddActor(actor)
return source
|
martindurant/misc
|
vtk_simple.py
|
Python
|
mit
| 772
|
[
"VTK"
] |
08e0f636954a1399fc45eae00dc89e85c9dc34241deaac9ae678615ee89b51cb
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 15:37:51 2015
@author: Anton O Lindahl
"""
import h5py
import argparse
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import sys
import lmfit
import warnings
from aolPyModules import wiener, wavelet_filter
import time_to_energy_conversion as tof_to_energy
from aolPyModules import plotting as aol_plotting
import area_fill
prompt_roi = [1.508, 1.535]
streak_time_roi = [1.57, 1.66]
wt_th = 0.03
energy_scale_eV = np.linspace(40, 160, 2**9)
time_stamp = 'time_stamp'
data_dir = 'h5_files'
h5_file_name_template = data_dir + '/run{}_all.h5'
response_file_name = data_dir + '/response.h5'
nois_file_name = data_dir + '/noise.h5'
tof_to_energy_conversion_file_name = data_dir + '/time_to_energy.h5'
def h5_file_name_funk(run):
return h5_file_name_template.format(run)
def update_progress(i_evt, n_events, verbose=True):
if (verbose and
((i_evt % (n_events / 100) == 0) or (i_evt == n_events-1))):
progress = (100 * i_evt) / (n_events - 1)
num_squares = 40
base_string = '\r[{:' + str(num_squares) + '}] {}%'
print base_string.format('#'*(progress * num_squares / 100), progress),
sys.stdout.flush()
def list_hdf5_content(group, indent=' '):
for k, v in group.iteritems():
print '{}"{}"'.format(indent, k),
if isinstance(v, h5py.Group):
print 'group with members:'
list_hdf5_content(v, indent=indent + ' ')
elif isinstance(v, h5py.Dataset):
print '\t{} {}'.format(v.shape, v.dtype)
def make_dataset(h5, name, shape, dtype=np.float):
try:
dset = h5.require_dataset(name, shape=shape,
dtype=dtype, exact=True)
except TypeError:
del h5[name]
dset = h5.create_dataset(name, shape=shape, dtype=np.float)
if time_stamp not in dset.attrs.keys():
dset.attrs.create(time_stamp, 0)
return dset
def make_group(h5, name):
try:
group = h5.require_group(name)
except TypeError:
del h5[name]
group = h5.create_group(name)
if time_stamp not in group.attrs.keys():
group.attrs.create(time_stamp, 0)
return group
def older(dset, dset_list):
if (isinstance(dset_list, h5py.Dataset) or
isinstance(dset_list, h5py.Group)):
return dset.attrs[time_stamp] < dset_list.attrs[time_stamp]
return np.any([dset.attrs[time_stamp] < d.attrs[time_stamp] for
d in dset_list])
class Timer_object:
def __init__(self, t):
self.attrs = {'time_stamp': t}
class Tims_stamp_warning(Warning):
pass
def time_stamp_object(h5_object):
try:
h5_object.attrs['time_stamp'] = time.time()
except:
warnings.warn('Could not time stamp the object {}.'.format(
repr(h5_object)))
def get_response(plot=False, verbose=0):
try:
with h5py.File(response_file_name, 'r') as f:
response = f['signal'].value
t = f['signal'].attrs[time_stamp]
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.'
response, t = construct_response(verbose=verbose)
if plot:
with h5py.File(response_file_name, 'r') as f:
time_scale = f['time_scale'].value
plt.figure('response')
plt.clf()
plt.plot(time_scale, response)
return response, t
def construct_response(plot=False, verbose=0):
# The Kr runs
runs = [132, 133, 134, 135, 136]
if verbose > 0:
print 'Loading Kr files for prompt determination.'
h5_file_names = [h5_file_name_template.format(run) for run in runs]
h5_list = []
for file_name in h5_file_names:
update_run_contained_derived_data(file_name, verbose=verbose)
h5_list.append(h5py.File(file_name, 'r+'))
time_scale = h5_list[0]['raw/time_scale'].value
response = np.zeros_like(time_scale)
n_shots = 0
sl = slice(time_scale.searchsorted(prompt_roi[0]),
time_scale.searchsorted(prompt_roi[1], side='right'))
for h5 in h5_list:
response[sl] += h5['raw/time_signal'][:, sl].sum(0)
n_shots += h5['raw/event_time_s'].shape[0]
response /= n_shots
response[sl] = wiener.edgeSmoothing(response[sl], smoothPoints=15)
response /= response.sum()
with h5py.File(response_file_name, 'w') as res_file:
dset = res_file.create_dataset('signal', data=response)
dset.attrs.create(time_stamp, time.time())
res_file.create_dataset('time_scale', data=time_scale)
return get_response(plot=plot, verbose=verbose)
def get_file_names_for_noise_spectrum():
return ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
def get_nois_spectrum(plot=False, verbose=0):
try:
with h5py.File(nois_file_name, 'r') as f:
pass
new_noise = False
except IOError:
if verbose > 0:
print 'Could not open response file. Trying to make it.',
print 'In "get_nois_spectrum()".'
construct_nois_spectrum(plot=plot, verbose=verbose)
new_noise = True
if not new_noise:
make_new_noise = False
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
h5_file_names = get_file_names_for_noise_spectrum()
for h5_name in h5_file_names:
with h5py.File(h5_name, 'r') as h5:
if older(noise, h5['raw']):
make_new_noise = True
if verbose > 0:
print 'Noise was made earlier than the raw data',
print 'in the file', h5_name, 'Make new noise.'
break
elif False:
print 'Noise was made later than the raw data in',
print 'the file', h5_name
if make_new_noise:
construct_nois_spectrum(plot=plot, verbose=verbose)
with h5py.File(nois_file_name, 'r') as f:
noise = f['noise']
return noise.value, noise.attrs['time_stamp']
def construct_nois_spectrum(plot=False, verbose=0):
h5_file_names = get_file_names_for_noise_spectrum()
for file_name in h5_file_names:
update_run_contained_derived_data(file_name)
empty_shots = []
for i, h5_name in enumerate(h5_file_names):
with h5py.File(h5_name, 'r') as h5:
time_signal_dset = h5['raw/time_signal']
try:
max_signal = h5['max_signal'].value
except KeyError:
max_signal = np.max(time_signal_dset.value, axis=1)
no_x_rays = max_signal < 0.04
if no_x_rays.sum() > 0:
empty_shots.extend(time_signal_dset[no_x_rays, :])
if i == 0:
time_scale = h5['raw/time_scale'].value
if verbose > 0:
print h5_name, 'has', no_x_rays.sum(), 'empty shots'
empty_shots = np.array(empty_shots)
# print len(empty_shots)
# plt.figure('snr')
# plt.clf()
# for shot in empty_shots[:]:
# plt.plot(time_scale, shot)
freq = (np.linspace(0., 1., len(time_scale)) *
1e-3/(time_scale[1] - time_scale[0]))
fft_empty_shots = np.fft.fft(empty_shots, axis=1)
amp = np.mean(np.abs(fft_empty_shots)**2, axis=0)
wt_amp = amp[:]
wt_amp = wavelet_filter.wavelet_filt(amp[1:], thresh=wt_th)
wt_amp[1:] = (wt_amp[1:] + wt_amp[-1:0:-1]) / 2
# plt.figure('fft')
# plt.clf()
# plt.plot(freq, amp)
# plt.plot(freq, wt_amp, 'r')
with h5py.File(nois_file_name, 'w') as f:
dset = f.create_dataset('noise', data=wt_amp)
dset.attrs.create('time_stamp', time.time())
f.create_dataset('freq', data=freq)
return get_nois_spectrum()
def construct_snr_spectrum(h5, plot=False):
noise, t = get_nois_spectrum()
sig_spec = h5['fft_spectrum_mean'].value
freq = h5['fft_freq_axis'].value
wt_spec = wavelet_filter.wavelet_filt(sig_spec, thresh=wt_th)
wt_spec[1:] = (wt_spec[1:] + wt_spec[-1:0:-1]) / 2
snr = (wt_spec - noise) / noise
if plot:
plt.figure('signal and noise')
plt.clf()
plt.semilogy(freq, sig_spec, label='signal')
plt.semilogy(freq, noise, label='noise')
plt.semilogy(freq, wt_spec, label='wt signal')
plt.semilogy(freq, snr, label='snr')
plt.legend(loc='best')
return snr
def check_tof_to_energy_conversion_matrix(plot=False, verbose=0):
try:
with h5py.File(tof_to_energy_conversion_file_name, 'r'):
pass
except IOError:
if verbose > 0:
print 'Could not open the file. Making the conversion matrix.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
_, h5_dict, _ = tof_to_energy.load_tof_to_energy_data(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as trans_h5:
if not older(
trans_h5['matrix'],
[h5['streak_peak_integral'] for h5 in h5_dict.itervalues()] +
[Timer_object(1437117486)]):
return
if verbose > 0:
print 'Conversion to old, remaking it.'
construc_tof_to_energy_conversion_matrix(plot=plot, verbose=verbose)
def construc_tof_to_energy_conversion_matrix(plot=False, verbose=0):
M, t, E, time_to_energy_params, tof_prediction_params = \
tof_to_energy.make_tof_to_energy_matrix(
energy_scale_eV=energy_scale_eV, plot=plot, verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'w') as h5:
dset = h5.create_dataset('matrix', data=M)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('time_scale', data=t)
dset.attrs.create('time_stamp', time.time())
dset = h5.create_dataset('energy_scale_eV', data=E)
dset.attrs.create('time_stamp', time.time())
for k in time_to_energy_params:
dset = h5.create_dataset(k, data=time_to_energy_params[k].value)
dset.attrs.create('time_stamp', time.time())
for k in tof_prediction_params:
dset = h5.require_dataset(k, (), np.float)
dset[()] = tof_prediction_params[k].value
dset.attrs.create('time_stamp', time.time())
def open_hdf5_file(file_name, plot=False, verbose=0):
try:
# Open the file
h5 = h5py.File(file_name, 'r+')
except BaseException as e:
print 'Could not open the specified hdf5 file "{}".'.format(
file_name)
print 'Message was: {}'.format(e.message)
return -1
return h5
def get_com(x, y):
idx_l, idx_h = fwxm(x, y, 0.0, return_data='idx')
sl = slice(idx_l, idx_h)
return ((x[sl] * y[sl]).sum()) / (y[sl].sum())
def fwxm(x, y, fraction=0.5, return_data=''):
y_max = y.max()
idx_max = y.argmax()
y_f = y_max * fraction
for i in range(idx_max, -1, -1):
if y[i] < y_f:
idx_low = i
break
else:
idx_low = idx_max
for i in range(idx_max, len(x)):
if y[i] < y_f:
idx_high = i
break
else:
idx_high = idx_max
if return_data == 'idx':
return idx_low, idx_high
if return_data == 'limits':
return x[idx_low], x[idx_high]
return (x[idx_low] + x[idx_high]) / 2, x[idx_high] - x[idx_low]
def get_trace_bounds(x, y,
threshold=0.0, min_width=2,
energy_offset=0,
useRel=False, threshold_rel=0.5,
roi=slice(None)):
amp = y[roi]
scale = x[roi]
dx = np.mean(np.diff(x))
if useRel:
threshold_temp = threshold_rel * np.max(amp[np.isfinite(amp)])
if threshold_temp < threshold:
return [np.nan] * 3
else:
threshold_V = threshold_temp
else:
threshold_V = threshold
nPoints = np.round(min_width/dx)
i_min = 0
for i in range(1, amp.size):
if amp[i] < threshold_V:
i_min = i
continue
if i-i_min >= nPoints:
break
else:
return [np.nan] * 3
i_max = amp.size - 1
for i in range(amp.size-1, -1, -1):
if amp[i] < threshold_V:
i_max = i
continue
if i_max-i >= nPoints:
break
else:
return [np.nan] * 3
if i_min == 0 and i_max == amp.size - 1:
return [np.nan] * 3
# print 'min =', min, 'max =', max
val_max = (scale[i_max] + (threshold_V - amp[i_max]) *
(scale[i_max] - scale[i_max - 1]) /
(amp[i_max] - amp[i_max - 1]))
val_min = (scale[i_min] + (threshold_V - amp[i_min]) *
(scale[i_min + 1] - scale[i_min]) /
(amp[i_min + 1] - amp[i_min]))
return val_min, val_max, threshold_V
def update_run_contained_derived_data(file_name, plot=False, verbose=0):
"""Update derived data based on information only in given file.
Add some derived datasetd to the hdf5 file based on the raw data in the
file. The added datasets are:
- Mean of the FEE gas detectors for each shot: fee_mean
- Maximum TOF waveform signal for each shot: max_signal
- Frequency spectrum averaged over all shots: fft_spectrum_mean
- The corresponding frequency axis: fft_freq_axis
- BC2 energy calculated from the beam position: energy_BC2_MeV
- L3 energy corrected based on the BC2 energy: energy_L3_corrected_MeV
"""
if verbose > 0:
print 'Entering "update_run_contained_derived_data()" ',
print 'with file_name={}'.format(file_name)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
# Make the fee data set
raw_fee_dset = raw_group['FEE_energy_mJ']
fee_mean_dset = make_dataset(h5, 'fee_mean', (n_events,))
if older(fee_mean_dset, raw_group):
if verbose > 0:
print 'Updating fee mean dataset'
fee_mean_dset[:] = raw_fee_dset[:, 0: 4].mean(1)
fee_mean_dset.attrs[time_stamp] = time.time()
# Make max signal dataset
time_signal_dset = raw_group['time_signal']
max_sig_dset = make_dataset(h5, 'max_signal', (n_events,))
if older(max_sig_dset, raw_group):
if verbose > 0:
print 'Get the maximum signal for each shot.'
max_sig_dset[:] = np.max(time_signal_dset, axis=1)
max_sig_dset.attrs['time_stamp'] = time.time()
# Make the frequency spectrum
time_scale = raw_group['time_scale'].value
spectrum_dset = make_dataset(h5, 'fft_spectrum_mean', time_scale.shape)
if older(spectrum_dset, [raw_group, max_sig_dset]):
if verbose > 0:
print 'Compute the frequency spectrum of the data.'
max_signal = max_sig_dset.value
use = max_signal > np.sort(max_signal)[-500:][0]
signal = time_signal_dset[use, :]
spectrum_dset[:] = np.mean(np.abs(np.fft.fft(signal, axis=1))**2,
axis=0)
spectrum_dset.attrs['time_stamp'] = time.time()
freq_axis_dset = make_dataset(h5, 'fft_freq_axis', time_scale.shape)
if older(freq_axis_dset, raw_group):
if verbose > 0:
print 'Updating the frequency axis.'
freq_axis_dset[:] = (np.linspace(0., 1e-3, len(time_scale)) /
(time_scale[1] - time_scale[0]))
freq_axis_dset.attrs['time_stamp'] = time.time()
# Calculate the BC2 energy
bc2_energy_dset = make_dataset(h5, 'energy_BC2_MeV', (n_events, ))
if older(bc2_energy_dset, raw_group):
if verbose > 0:
print 'Calculating BC2 energy for the bpm reading.'
# Values comes from a mail from Timothy Maxwell
# The nominal BC2 energy is 5 GeV (was at least when this data was
# recorded). The measurement is the relative offset of the beam
# position in a BPM. The dispersion value is -364.7 mm.
bc2_energy_dset[:] = 5e3 * (1. - raw_group['position_BC2_mm'][:] /
364.7)
bc2_energy_dset.attrs['time_stamp'] = time.time()
# Calculate the corrected L3 energy
l3_energy_cor_dset = make_dataset(h5, 'energy_L3_corrected_MeV',
(n_events, ))
if older(l3_energy_cor_dset, [raw_group, bc2_energy_dset,
Timer_object(1434096408)]):
if verbose > 0:
print 'Calculating corrected L3 energy.'
l3_energy_cor_dset[:] = (raw_group['energy_L3_MeV'][:] -
(bc2_energy_dset[:] - 5000))
l3_energy_cor_dset.attrs['time_stamp'] = time.time()
# Make the phase cavity time filter
pct_filter_dset = make_dataset(h5, 'pct_filter', (n_events, ),
dtype=bool)
if older(pct_filter_dset, [raw_group, Timer_object(0)]):
print h5.filename
pct0 = raw_group['phase_cavity_times'][:, 0]
pct_filter_dset[:] = (0.4 < pct0) & (pct0 < 1.2)
pct_filter_dset.attrs[time_stamp] = time.time()
h5.close()
def update_with_noise_and_response(file_name, plot=False, verbose=0):
"""Update derived data based on noise and response spectra.
Noise spectrum and detector response are determined form many runs. With
these spectra a number of new paramters can be derived. These are:
- snr_spectrum: Signal to Noise ratio spectrum based on the given noise \
spectrum and the average spectrum in the current run.
- filtered_time_signal: Wiegner deconvolution of the time signal based on \
the signal to noise ratio and the detector response function.
- streak_peak_center: Center of the streaking peak in the sense of the \
center of mass of the peak in a given ROI. Based on the deconvoluted \
signal.
- streak_peak_integral: Photoline intensity by integration of the \
deconvoluted spectrum in time domain.
"""
# Make sure that the run contained information is up to date.
update_run_contained_derived_data(file_name, plot, verbose-1)
# Open the file.
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
time_scale = raw_group['time_scale'].value
# Make signal to noise ratio.
snr_dset = make_dataset(h5, 'snr_spectrum', time_scale.shape)
spectrum_dset = h5['fft_spectrum_mean']
if older(snr_dset, [spectrum_dset, raw_group, Timer_object(1434015914)]):
if verbose > 0:
print 'Updating the signal to noise ratio.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
snr_dset[:] = construct_snr_spectrum(h5, plot=plot)
snr_dset.attrs['time_stamp'] = time.time()
# Deconvolute the response function
time_signal_dset = raw_group['time_signal']
deconv_time_signal_dset = make_dataset(h5, 'filtered_time_signal',
time_signal_dset.shape)
if older(deconv_time_signal_dset, [raw_group, snr_dset]):
response, t_response = get_response(plot=plot, verbose=verbose-1)
if verbose > 0:
print 'Deconvolving traces.'
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name),
print ' {} events to process.'.format(n_events)
deconvolver = wiener.Deconcolver(snr_dset.value, response)
for i_evt in range(n_events):
deconv_time_signal_dset[i_evt, :] = deconvolver.deconvolve(
time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
print ''
deconv_time_signal_dset.attrs['time_stamp'] = time.time()
# Calculate the center of mass of the streak peak
time_com_dset = make_dataset(h5, 'streak_peak_center', (n_events, ))
photo_line_intensity_dset = make_dataset(h5, 'streak_peak_integral',
(n_events, ))
if older(time_com_dset, [deconv_time_signal_dset,
Timer_object(1443006988)]):
if verbose > 0:
print 'Calculating streak peak center in time.',
print ' In "update_with_noise_and_response()"',
print ' with file_name={}'.format(file_name)
streak_sl = slice(np.searchsorted(time_scale, streak_time_roi[0]),
np.searchsorted(time_scale, streak_time_roi[1],
side='right'))
time_scale_streak = time_scale[streak_sl]
####
# Center of mass calculation
# for i_evt in range(n_events):
# time_com_dset[i_evt] = get_com(
# time_scale_streak,
# deconv_time_signal_dset[i_evt, streak_sl])
# update_progress(i_evt, n_events, verbose)
####
# Fit of Gaussian
deconv_time_signal = deconv_time_signal_dset.value
time_com = np.zeros(time_com_dset.shape)
photo_line_intensity = np.zeros(photo_line_intensity_dset.shape)
mean_signal = deconv_time_signal[:, streak_sl].mean(axis=0)
mod = lmfit.models.GaussianModel()
params = lmfit.Parameters()
params.add_many(('amplitude', 1, True, 0),
('center', time_scale_streak[np.argmax(mean_signal)],
True, min(time_scale_streak), max(time_scale_streak)),
('sigma', 1e-3, True, 0))
# fit to mean in order to get start parameters for the shot fits
out = mod.fit(mean_signal, x=time_scale_streak, params=params)
for k in params:
params[k].value = out.params[k].value
for i_evt in range(n_events):
out = mod.fit(deconv_time_signal[i_evt, streak_sl],
params, x=time_scale_streak)
time_com[i_evt] = out.params['center'].value
photo_line_intensity[i_evt] = out.params['amplitude'].value
update_progress(i_evt, n_events, verbose)
if plot:
time_scale_streak = time_scale[streak_sl]
plt.figure('peak finding time domain')
plt.clf()
plt.plot(time_scale_streak, mean_signal)
plt.plot(time_scale_streak, out.best_fit)
if verbose > 0:
print ''
time_com_dset[:] = time_com
time_com_dset.attrs['time_stamp'] = time.time()
photo_line_intensity_dset[:] = photo_line_intensity
photo_line_intensity_dset.attrs['time_stamp'] = time.time()
h5.close()
def update_with_time_to_energy_conversion(file_name, plot=False, verbose=0):
""" Make derived data based on time to energy conversion."""
update_with_noise_and_response(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
deconv_time_signal_dset = h5['filtered_time_signal']
energy_scale_dset = make_dataset(h5, 'energy_scale_eV',
energy_scale_eV.shape)
energy_trace_dset = make_dataset(h5, 'energy_signal',
(n_events, len(energy_scale_eV)))
check_tof_to_energy_conversion_matrix(verbose=verbose)
with h5py.File(tof_to_energy_conversion_file_name, 'r') as tof_to_e_h5:
if older(energy_scale_dset, [tof_to_e_h5['matrix'],
deconv_time_signal_dset,
Timer_object(1443190000)]):
if verbose > 0:
print 'Updating time to energy conversion.',
print ' In "update_with_time_to_energy_conversion()"',
print ' with {}'.format(file_name)
# Get the transformation matrix from file
M = tof_to_e_h5['matrix'].value
# Update the energy scale
energy_scale_dset[:] = tof_to_e_h5['energy_scale_eV'].value
energy_scale_dset.attrs['time_stamp'] = time.time()
# Get the photon energy prediction parameters
params = (tof_to_energy.photon_energy_params() +
tof_to_energy.tof_prediction_params())
for k in params:
params[k].value = tof_to_e_h5[k].value
if verbose > 0:
print 'Computing energy spectra.'
for i_evt in range(n_events):
# Energy spectra
energy_trace_dset[i_evt, :] = M.dot(
deconv_time_signal_dset[i_evt, :])
update_progress(i_evt, n_events, verbose)
if verbose > 0:
print ''
energy_trace_dset.attrs['time_stamp'] = time.time()
# Calculate energy trace properties
spectral_properties_group = h5.require_group('spectral_properties')
spectral_center_dset = make_dataset(spectral_properties_group,
'center_eV', (n_events, ))
spectral_width_dset = make_dataset(spectral_properties_group,
'width_eV', (n_events, ))
spectral_threshold_dset = make_dataset(spectral_properties_group,
'threshold', (n_events, ))
spectral_gaussian_center_dset = make_dataset(spectral_properties_group,
'gaussian_center',
(n_events,))
if older(spectral_center_dset, [energy_trace_dset,
Timer_object(1443421560)]):
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 125))
energy_scale = energy_scale[sl]
model = lmfit.models.GaussianModel()
if verbose > 0:
print 'Calculating spectral center and width:',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
for i_evt in range(n_events):
energy_trace = energy_trace_dset[i_evt, sl]
t_start, t_end, spectral_threshold_dset[i_evt] = \
get_trace_bounds(energy_scale,
energy_trace,
threshold=8e-5,
min_width=3,
# useRel=True,
# threshold_rel=0.3
)
center = (t_start + t_end) / 2
spectral_center_dset[i_evt] = center
width = t_end - t_start
spectral_width_dset[i_evt] = width
# Calculate center of mass
peak_sl = slice(energy_scale.searchsorted(t_start - width/2),
energy_scale.searchsorted(t_end + width/2,
side='right'))
peak_trace = energy_trace[peak_sl]
peak_scale = energy_scale[peak_sl]
# spectral_com_dset[i_evt] = (np.sum(peak_scale * peak_trace) /
# np.sum(peak_trace))
if len(peak_trace) > 3:
out = model.fit(peak_trace, x=peak_scale,
center=center, sigma=width/4,
amplitude=peak_trace.max() * width / 2)
spectral_gaussian_center_dset[i_evt] = out.values['center']
else:
spectral_gaussian_center_dset[i_evt] = np.nan
update_progress(i_evt, n_events, verbose)
spectral_center_dset.attrs['time_stamp'] = time.time()
spectral_width_dset.attrs['time_stamp'] = time.time()
spectral_threshold_dset.attrs['time_stamp'] = time.time()
spectral_gaussian_center_dset.attrs['time_stamp'] = time.time()
if plot:
selected_shots = list(np.linspace(0, n_events, 16, endpoint=False))
plt.figure('peak properties')
plt.clf()
_, ax_list = plt.subplots(4, 4, sharex=True, sharey=True,
num='peak properties')
energy_scale = energy_scale_dset[:]
sl = slice(np.searchsorted(energy_scale, 75),
np.searchsorted(energy_scale, 130))
energy_scale = energy_scale[sl]
for i, shot in enumerate(selected_shots):
energy_trace = energy_trace_dset[shot, :]
ax = ax_list.flatten()[i]
# plt.plot(energy_scale - pe_energy_prediction_dset[shot],
ax.plot(energy_scale, energy_trace[sl])
c = spectral_center_dset[shot]
w = spectral_width_dset[shot]
th = spectral_threshold_dset[shot]
ax.plot([c-w/2, c+w/2], [th] * 2)
# Calculate main photoline area
main_photoline_area = make_dataset(spectral_properties_group,
'main_photoline_area', (n_events, ))
if older(main_photoline_area, energy_trace_dset):
if verbose:
print 'Computing photoline area'
e_scale = energy_scale_dset.value
dE = np.mean(np.diff(e_scale))
e_slice = slice(np.searchsorted(e_scale, 55), None)
for i_evt in range(n_events):
raw_A, _ = area_fill.zero_crossing_area(
energy_trace_dset[i_evt, e_slice])
main_photoline_area[i_evt] = raw_A * dE
update_progress(i_evt, n_events, verbose)
time_stamp_object(main_photoline_area)
##########
# Calculate electron energy prediction
e_energy_prediction_params_group = make_group(h5,
'e_energy_prediction_params')
if older(e_energy_prediction_params_group, [spectral_gaussian_center_dset,
Timer_object(1444931900)]):
if verbose > 0:
print 'Fit the electron energy prediction parameters.',
print 'In "update_with_time_to_energy_conversion()"',
print 'with {}'.format(file_name)
selection = np.isfinite(spectral_gaussian_center_dset.value)
# &
# (0.4 < raw_group['phase_cavity_times'][:, 0]) &
# (raw_group['phase_cavity_times'][:, 0] < 1.1))
spectral_gaussian_center = spectral_gaussian_center_dset[selection]
if len(spectral_gaussian_center) == 0:
return
var_dict = {
'l3_energy': raw_group['energy_L3_MeV'][selection],
'bc2_energy': h5['energy_BC2_MeV'][selection],
# 'fee': h5['fee_mean'][selection],
'e_energy': spectral_gaussian_center
}
prediction_params = \
tof_to_energy.e_energy_prediction_model_start_params(**var_dict)
try:
res = lmfit.minimize(tof_to_energy.e_energy_prediction_model,
prediction_params,
kws=var_dict)
fit_worked = True
except:
fit_worked = False
if verbose > 0 and fit_worked:
print '\nPrediction params:'
lmfit.report_fit(res)
# Create or update the parameters from the fit in the group
for k, v in prediction_params.iteritems():
d = e_energy_prediction_params_group.require_dataset(
k, (), np.float)
d[()] = v.value if fit_worked else np.nan
# Remove old parameters that should not be there
for k in set(e_energy_prediction_params_group.keys()).difference(
set(prediction_params.keys())):
del e_energy_prediction_params_group[k]
e_energy_prediction_params_group.attrs[time_stamp] = time.time()
if plot:
deviation = tof_to_energy.e_energy_prediction_model(
prediction_params, **var_dict)
plt.figure('e energy prediction {}'.format(
h5.filename.split('/')[-1]))
plt.clf()
plt.subplot(221)
# plt.plot(spectral_gaussian_center, deviation, '.')
plt.scatter(spectral_gaussian_center, deviation,
s=4, c=h5['energy_BC2_MeV'][selection],
linewidths=(0,), alpha=1)
plt.xlabel('electron energy (eV)')
plt.ylabel('prediction residual (eV)')
x_range = plt.xlim()
y_range = plt.ylim()
img, _, _ = np.histogram2d(spectral_gaussian_center, deviation,
bins=2**7, range=[x_range, y_range])
img = img.T
plt.subplot(222)
plt.imshow(img, aspect='auto', interpolation='none',
origin='lower', extent=x_range + y_range)
hist, hist_edges = np.histogram(deviation,
bins=2**5, range=(-3, 3))
hist_centers = (hist_edges[: -1] + hist_edges[1:])/2
plt.subplot(223)
gauss_model = lmfit.models.GaussianModel()
fit_out = gauss_model.fit(hist, x=hist_centers)
lmfit.report_fit(fit_out)
plt.bar(hist_edges[:-1], hist, width=np.diff(hist_edges))
plt.plot(hist_centers, fit_out.best_fit, 'r', linewidth=2)
plt.subplot(224)
plt.plot(spectral_gaussian_center, h5['energy_BC2_MeV'][selection],
'.')
def update_with_energy_prediction(file_name, plot=False, verbose=0):
update_with_time_to_energy_conversion(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
prediction_map = {'117': 'h5_files/run118_all.h5',
'114': 'h5_files/run115_all.h5',
'113': 'h5_files/run112_all.h5',
'108': 'h5_files/run109_all.h5',
'101': 'h5_files/run100_all.h5',
'102': 'h5_files/run100_all.h5'}
pe_energy_prediction_dset = make_dataset(
h5, 'photoelectron_energy_prediction_eV', (n_events,))
spectral_properties_group = h5['spectral_properties']
# spectral_gaussian_center_dset = spectral_properties_group[
# 'gaussian_center']
fee_dset = h5['fee_mean']
energy_BC2_dset = h5['energy_BC2_MeV']
energy_L3_dset = raw_group['energy_L3_MeV']
for k, v in prediction_map.iteritems():
if k in file_name:
update_with_time_to_energy_conversion(v, plot=False,
verbose=verbose-1)
ref_h5 = open_hdf5_file(file_name)
e_energy_prediction_params_group = \
ref_h5['e_energy_prediction_params']
break
else:
e_energy_prediction_params_group = h5['e_energy_prediction_params']
if older(pe_energy_prediction_dset, [e_energy_prediction_params_group,
fee_dset,
energy_BC2_dset,
raw_group,
Timer_object(1444981500)]):
if verbose > 0:
print 'Updating energy prediction.',
print ' In "update_with_energy_prediction()" with {}'.format(
file_name)
prediction_params = lmfit.Parameters()
for k in e_energy_prediction_params_group:
prediction_params.add(k, e_energy_prediction_params_group[k][()])
var_dict = {
'l3_energy': energy_L3_dset.value,
'bc2_energy': energy_BC2_dset.value,
'fee': fee_dset.value
}
try:
pe_energy_prediction_dset[:] = \
tof_to_energy.e_energy_prediction_model(prediction_params,
**var_dict)
except:
pe_energy_prediction_dset[:] = np.nan
pe_energy_prediction_dset.attrs[time_stamp] = time.time()
##########
# Make the christmas three histogram
n_spectral_center_bins = 2**7
n_spectral_width_bins = 2**7
spectral_center_axis_dset = make_dataset(spectral_properties_group,
'center_axis_eV',
(n_spectral_center_bins, ))
spectral_width_axis_dset = make_dataset(spectral_properties_group,
'width_axis_eV',
(n_spectral_width_bins, ))
spectral_histogram_dset = make_dataset(spectral_properties_group,
'histogram',
(n_spectral_width_bins,
n_spectral_center_bins))
spectral_center_dset = spectral_properties_group['center_eV']
spectral_width_dset = spectral_properties_group['width_eV']
pct_filter_dset = h5['pct_filter']
if older(spectral_histogram_dset, [spectral_center_dset,
spectral_width_dset,
pe_energy_prediction_dset,
pct_filter_dset,
Timer_object(2444203160)]):
if verbose > 0:
print 'Making the christmas tree plot.',
print ' In "update_with_energy_prediction()"',
print ' with {}'.format(file_name)
spectral_width_axis_dset[:] = np.linspace(0, 35, n_spectral_width_bins)
spectral_width_axis_dset.attrs['time_stamp'] = time.time()
spectral_center_axis_dset[:] = np.linspace(-20, 20,
n_spectral_center_bins)
spectral_center_axis_dset.attrs['time_stamp'] = time.time()
# I = (pct_filter_dset.value &
# (-0.1 < raw_group['phase_cavity_times'][:, 1]) &
## (raw_group['phase_cavity_times'][:, 1] < 0.05) &
## (0.75 < raw_group['phase_cavity_times'][:, 0]) &
## (raw_group['phase_cavity_times'][:, 0] < 0.85) &
# (0.065 < raw_group['power_meter_V'].value) &
# (raw_group['power_meter_V'].value < 0.1))
I = np.ones(pct_filter_dset.shape, dtype=bool)
hist = aol_plotting.center_histogram_2d(
spectral_center_dset[I] - pe_energy_prediction_dset[I],
spectral_width_dset[I],
spectral_center_axis_dset[:],
spectral_width_axis_dset[:])
hist[hist == 0] = np.nan
spectral_histogram_dset[:] = hist
spectral_histogram_dset.attrs['time_stamp'] = time.time()
if plot:
plt.figure('christmas tree {}'.format(h5.filename.split('/')[-1]))
plt.clf()
plt.imshow(spectral_histogram_dset[:], aspect='auto',
interpolation='none', origin='lower',
extent=(np.min(spectral_center_axis_dset),
np.max(spectral_center_axis_dset),
np.min(spectral_width_axis_dset),
np.max(spectral_width_axis_dset)))
plt.xlabel('center (eV)')
plt.ylabel('width (eV)')
plt.colorbar()
plt.savefig('figures/christmas_tree_{}.png'.format(
h5.filename.split('/')[-1].split('.')[0]))
h5.close()
def load_file(file_name, plot=False, verbose=0):
""" Load file and make sure it is up to date."""
# if verbose > 0:
# print 'Entering "load_file()" with file_name={}'.format(file_name)
update_with_energy_prediction(file_name, plot, verbose)
h5 = open_hdf5_file(file_name, plot, verbose)
raw_group = h5['raw']
n_events = raw_group['event_time_s'].shape[0]
if verbose > 0:
print 'File {} processed.'.format(h5.file)
print 'It contains', n_events, 'events.'
if verbose > 1:
list_hdf5_content(h5)
return h5
def touch_all_files(verbose=2):
file_names = ['/'.join([data_dir, f]) for f in os.listdir(data_dir) if
f.startswith('run') and f.endswith('_all.h5')]
for name in file_names:
load_file(name, verbose=verbose)
if __name__ == '__main__':
# Parset the command line.
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--hdf5_file', type=str,
default='h5_files/run108_all.h5',
help='Path to hdf5 file to process')
parser.add_argument('--plot', action='store_true',
help='Display plots. Default: no plots.')
parser.add_argument('-v', '--verbose', action='count',
help='increase output verbosity')
args = parser.parse_args()
# Unpack the parser arguments.
hdf5_file = args.hdf5_file
plot = args.plot
verbose = args.verbose
# If plotting is requested, ryn pyplot in the interactive mode.
if plot:
plt.ion()
if verbose > 0:
print 'Get the noise spectrum just to make sure it is up to date.'
get_nois_spectrum(plot=plot, verbose=verbose)
# Load the given file.
if verbose > 0:
print 'Load the requested file: {}'.format(hdf5_file)
h5 = load_file(hdf5_file, verbose=verbose, plot=plot)
# Get the raw group of the file.
raw_group = h5['raw']
# Number of events in the file.
n_events = len(raw_group['event_time_s'])
# Time trace rellated information.
raw_time = raw_group['time_scale'].value
raw_traces_dset = raw_group['time_signal']
filtered_traces = h5['filtered_time_signal']
# Pulse energy
raw_fee_dset = raw_group['FEE_energy_mJ']
n_fee = raw_fee_dset.shape[1]
# frequency domain
freq_axis = h5['fft_freq_axis'].value
fft_mean = h5['fft_spectrum_mean'].value
snr = h5['snr_spectrum'].value
if plot and False:
if verbose > 0:
print 'Plotting fee correlations.'
plt.figure('fee')
plt.clf()
ax = None
for i in range(n_fee):
for k in range(n_fee):
ax = plt.subplot(n_fee, n_fee, i + k*n_fee + 1,
sharex=ax, sharey=ax)
ax.plot(raw_fee_dset[:, i], raw_fee_dset[:, k], '.')
if i > 0:
plt.setp(ax.get_yticklabels(), visible=False)
if k < n_fee-1:
plt.setp(ax.get_xticklabels(), visible=False)
plt.xlim(xmin=0)
plt.ylim(ymin=0)
if verbose > 0:
print 'Plotting fee histogram.'
plt.figure('fee histogram')
plt.clf()
plt.hist(h5['fee_mean'].value, bins=100)
if plot:
if verbose > 0:
print 'Plot signal maximium histogram.'
plt.figure('signal hist')
plt.clf()
plt.hist(h5['max_signal'], bins=100)
if plot:
if verbose > 0:
print 'Plot spectr'
plt.figure('fft')
plt.clf()
plt.semilogy(freq_axis, fft_mean, label='average spectrum')
plt.semilogy(freq_axis, snr, label='snr')
plt.legend(loc='best')
# Plot some traces
if plot:
if verbose > 0:
print 'Plotting traces'
trace_fig = plt.figure('traces {}'.format(hdf5_file))
trace_fig.clf()
raw_mean_tr = raw_traces_dset.value.mean(0)
deconv_mean_tr = filtered_traces.value.mean(0)
rand_event = np.random.randint(n_events)
response, _ = get_response(plot=False, verbose=verbose)
plt.plot(raw_time, raw_traces_dset[rand_event, :],
label='single trace')
plt.plot(raw_time, filtered_traces[rand_event, :],
label='Deconv single trace')
plt.plot(raw_time, raw_mean_tr, label='mean trace')
plt.plot(raw_time, deconv_mean_tr,
label='Deconv mean')
plt.legend(loc='best')
# Plot the phase cavity times
pct = raw_group['phase_cavity_times']
plt.figure('Phase cavity times')
plt.clf()
# pc_selection = (np.isfinite(np.sum(pct, axis=1)) &
# (pct[:, 0] > -2) & (pct[:, 0] < 2) &
# (pct[:, 1] > -2) & (pct[:, 1] < 2))
# (pct[:, 0] > -50) & (pct[:, 0] < 50))
pc_selection = h5['pct_filter'].value
for i in range(2):
plt.subplot(1, 3, i+1)
plt.title('Time {}'.format(i))
hist, hist_edges = np.histogram(pct[pc_selection, i], bins=100)
plt.bar(hist_edges[: -1], hist, width=np.diff(hist_edges))
plt.subplot(133)
plt.plot(pct[pc_selection, 0], pct[pc_selection, 1], '.')
# Plot energy traces and photon energy diagnostics
pe_energy_dset = h5['photoelectron_energy_prediction_eV']
energy_scale = h5['energy_scale_eV'][:]
energy_signal_dset = h5['energy_signal']
selected_shots = np.linspace(0, n_events, 100, endpoint=False, dtype=int)
plt.figure('Energy spectra')
plt.clf()
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
dy = 1e-5
for i, shot in enumerate(selected_shots):
ax1.plot(energy_scale, energy_signal_dset[shot, :] + dy * i)
ax2.plot(energy_scale - pe_energy_dset[shot],
energy_signal_dset[shot, :] + dy * i)
ax2.set_xlim(-20, 25)
# %%
# Plot the photoline area
plt.figure('photoline area')
plt.clf()
spectral_properties_group = h5['spectral_properties']
main_photoline_area = spectral_properties_group[
'main_photoline_area'].value
fee = h5['fee_mean'].value
I = np.isfinite(main_photoline_area) & np.isfinite(fee)
p = np.polyfit(fee[I], main_photoline_area[I], 2)
fee_ax = np.linspace(min(fee[I]), max(fee[I]), 2**5)
plt.subplot(121)
plt.plot(fee, main_photoline_area, '.')
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
plt.subplot(122)
plt.hist2d(fee[I], main_photoline_area[I], bins=2**7)
plt.plot(fee_ax, np.polyval(p, fee_ax), 'r')
|
aolindahl/streaking
|
process_hdf5.py
|
Python
|
gpl-2.0
| 46,151
|
[
"Gaussian"
] |
6512bab1ab20d638fc5708b11ef844c7a0bc01614e6d062d94904deae0c22aca
|
"""
Python implementation of the fast ICA algorithms.
Reference: Tables 8.3 and 8.4 page 196 in the book:
Independent Component Analysis, by Hyvarinen et al.
"""
# Authors: Pierre Lafaye de Micheaux, Stefan van der Walt, Gael Varoquaux,
# Bertrand Thirion, Alexandre Gramfort, Denis A. Engemann
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..exceptions import ConvergenceWarning
from ..externals import six
from ..externals.six import moves
from ..externals.six import string_types
from ..utils import check_array, as_float_array, check_random_state
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
__all__ = ['fastica', 'FastICA']
def _gs_decorrelation(w, W, j):
"""
Orthonormalize w wrt the first j rows of W
Parameters
----------
w : ndarray of shape(n)
Array to be orthogonalized
W : ndarray of shape(p, n)
Null space definition
j : int < p
The no of (from the first) rows of Null space W wrt which w is
orthogonalized.
Notes
-----
Assumes that W is orthogonal
w changed in place
"""
w -= np.dot(np.dot(w, W[:j].T), W[:j])
return w
def _sym_decorrelation(W):
""" Symmetric decorrelation
i.e. W <- (W * W.T) ^{-1/2} * W
"""
s, u = linalg.eigh(np.dot(W, W.T))
# u (resp. s) contains the eigenvectors (resp. square roots of
# the eigenvalues) of W * W.T
return np.dot(np.dot(u * (1. / np.sqrt(s)), u.T), W)
def _ica_def(X, tol, g, fun_args, max_iter, w_init):
"""Deflationary FastICA using fun approx to neg-entropy function
Used internally by FastICA.
"""
n_components = w_init.shape[0]
W = np.zeros((n_components, n_components), dtype=X.dtype)
n_iter = []
# j is the index of the extracted component
for j in range(n_components):
w = w_init[j, :].copy()
w /= np.sqrt((w ** 2).sum())
for i in moves.xrange(max_iter):
gwtx, g_wtx = g(np.dot(w.T, X), fun_args)
w1 = (X * gwtx).mean(axis=1) - g_wtx.mean() * w
_gs_decorrelation(w1, W, j)
w1 /= np.sqrt((w1 ** 2).sum())
lim = np.abs(np.abs((w1 * w).sum()) - 1)
w = w1
if lim < tol:
break
n_iter.append(i + 1)
W[j, :] = w
return W, max(n_iter)
def _ica_par(X, tol, g, fun_args, max_iter, w_init):
"""Parallel FastICA.
Used internally by FastICA --main loop
"""
W = _sym_decorrelation(w_init)
del w_init
p_ = float(X.shape[1])
for ii in moves.xrange(max_iter):
gwtx, g_wtx = g(np.dot(W, X), fun_args)
W1 = _sym_decorrelation(np.dot(gwtx, X.T) / p_
- g_wtx[:, np.newaxis] * W)
del gwtx, g_wtx
# builtin max, abs are faster than numpy counter parts.
lim = max(abs(abs(np.diag(np.dot(W1, W.T))) - 1))
W = W1
if lim < tol:
break
else:
warnings.warn('FastICA did not converge. Consider increasing '
'tolerance or the maximum number of iterations.',
ConvergenceWarning)
return W, ii + 1
# Some standard non-linear functions.
# XXX: these should be optimized, as they can be a bottleneck.
def _logcosh(x, fun_args=None):
alpha = fun_args.get('alpha', 1.0) # comment it out?
x *= alpha
gx = np.tanh(x, x) # apply the tanh inplace
g_x = np.empty(x.shape[0])
# XXX compute in chunks to avoid extra allocation
for i, gx_i in enumerate(gx): # please don't vectorize.
g_x[i] = (alpha * (1 - gx_i ** 2)).mean()
return gx, g_x
def _exp(x, fun_args):
exp = np.exp(-(x ** 2) / 2)
gx = x * exp
g_x = (1 - x ** 2) * exp
return gx, g_x.mean(axis=-1)
def _cube(x, fun_args):
return x ** 3, (3 * x ** 2).mean(axis=-1)
def fastica(X, n_components=None, algorithm="parallel", whiten=True,
fun="logcosh", fun_args=None, max_iter=200, tol=1e-04, w_init=None,
random_state=None, return_X_mean=False, compute_sources=True,
return_n_iter=False):
"""Perform Fast Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
n_components : int, optional
Number of components to extract. If None no dimension reduction
is performed.
algorithm : {'parallel', 'deflation'}, optional
Apply a parallel or deflational FASTICA algorithm.
whiten : boolean, optional
If True perform an initial whitening of the data.
If False, the data is assumed to have already been
preprocessed: it should be centered, normed and white.
Otherwise you will get incorrect results.
In this case the parameter n_components will be ignored.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. The derivative should be averaged along its last dimension.
Example:
def my_g(x):
return x ** 3, np.mean(3 * x ** 2, axis=-1)
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty or None and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}
max_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
A positive scalar giving the tolerance at which the
un-mixing matrix is considered to have converged.
w_init : (n_components, n_components) array, optional
Initial un-mixing array of dimension (n.comp,n.comp).
If None (default) then an array of normal r.v.'s is used.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
return_X_mean : bool, optional
If True, X_mean is returned too.
compute_sources : bool, optional
If False, sources are not computed, but only the rotation matrix.
This can save memory when working with big data. Defaults to True.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
K : array, shape (n_components, n_features) | None.
If whiten is 'True', K is the pre-whitening matrix that projects data
onto the first n_components principal components. If whiten is 'False',
K is 'None'.
W : array, shape (n_components, n_components)
Estimated un-mixing matrix.
The mixing matrix can be obtained by::
w = np.dot(W, K.T)
A = w.T * (w * w.T).I
S : array, shape (n_samples, n_components) | None
Estimated source matrix
X_mean : array, shape (n_features, )
The mean over features. Returned only if return_X_mean is True.
n_iter : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge. This is
returned only when return_n_iter is set to `True`.
Notes
-----
The data matrix X is considered to be a linear combination of
non-Gaussian (independent) components i.e. X = AS where columns of S
contain the independent components and A is a linear mixing
matrix. In short ICA attempts to `un-mix' the data by estimating an
un-mixing matrix W where ``S = W K X.``
This implementation was originally made for data of shape
[n_features, n_samples]. Now the input is transposed
before the algorithm is applied. This makes it slightly
faster for Fortran-ordered input.
Implemented using FastICA:
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
random_state = check_random_state(random_state)
fun_args = {} if fun_args is None else fun_args
# make interface compatible with other decompositions
# a copy is required only for non whitened data
X = check_array(X, copy=whiten, dtype=FLOAT_DTYPES,
ensure_min_samples=2).T
alpha = fun_args.get('alpha', 1.0)
if not 1 <= alpha <= 2:
raise ValueError('alpha must be in [1,2]')
if fun == 'logcosh':
g = _logcosh
elif fun == 'exp':
g = _exp
elif fun == 'cube':
g = _cube
elif callable(fun):
def g(x, fun_args):
return fun(x, **fun_args)
else:
exc = ValueError if isinstance(fun, six.string_types) else TypeError
raise exc("Unknown function %r;"
" should be one of 'logcosh', 'exp', 'cube' or callable"
% fun)
n, p = X.shape
if not whiten and n_components is not None:
n_components = None
warnings.warn('Ignoring n_components with whiten=False.')
if n_components is None:
n_components = min(n, p)
if (n_components > min(n, p)):
n_components = min(n, p)
warnings.warn('n_components is too large: it will be set to %s' % n_components)
if whiten:
# Centering the columns (ie the variables)
X_mean = X.mean(axis=-1)
X -= X_mean[:, np.newaxis]
# Whitening and preprocessing by PCA
u, d, _ = linalg.svd(X, full_matrices=False)
del _
K = (u / d).T[:n_components] # see (6.33) p.140
del u, d
X1 = np.dot(K, X)
# see (13.6) p.267 Here X1 is white and data
# in X has been projected onto a subspace by PCA
X1 *= np.sqrt(p)
else:
# X must be casted to floats to avoid typing issues with numpy
# 2.0 and the line below
X1 = as_float_array(X, copy=False) # copy has been taken care of
if w_init is None:
w_init = np.asarray(random_state.normal(size=(n_components,
n_components)), dtype=X1.dtype)
else:
w_init = np.asarray(w_init)
if w_init.shape != (n_components, n_components):
raise ValueError('w_init has invalid shape -- should be %(shape)s'
% {'shape': (n_components, n_components)})
kwargs = {'tol': tol,
'g': g,
'fun_args': fun_args,
'max_iter': max_iter,
'w_init': w_init}
if algorithm == 'parallel':
W, n_iter = _ica_par(X1, **kwargs)
elif algorithm == 'deflation':
W, n_iter = _ica_def(X1, **kwargs)
else:
raise ValueError('Invalid algorithm: must be either `parallel` or'
' `deflation`.')
del X1
if whiten:
if compute_sources:
S = np.dot(np.dot(W, K), X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return K, W, S, X_mean, n_iter
else:
return K, W, S, X_mean
else:
if return_n_iter:
return K, W, S, n_iter
else:
return K, W, S
else:
if compute_sources:
S = np.dot(W, X).T
else:
S = None
if return_X_mean:
if return_n_iter:
return None, W, S, None, n_iter
else:
return None, W, S, None
else:
if return_n_iter:
return None, W, S, n_iter
else:
return None, W, S
class FastICA(BaseEstimator, TransformerMixin):
"""FastICA: a fast algorithm for Independent Component Analysis.
Read more in the :ref:`User Guide <ICA>`.
Parameters
----------
n_components : int, optional
Number of components to use. If none is passed, all are used.
algorithm : {'parallel', 'deflation'}
Apply parallel or deflational algorithm for FastICA.
whiten : boolean, optional
If whiten is false, the data is already considered to be
whitened, and no whitening is performed.
fun : string or function, optional. Default: 'logcosh'
The functional form of the G function used in the
approximation to neg-entropy. Could be either 'logcosh', 'exp',
or 'cube'.
You can also provide your own function. It should return a tuple
containing the value of the function, and of its derivative, in the
point. Example:
def my_g(x):
return x ** 3, 3 * x ** 2
fun_args : dictionary, optional
Arguments to send to the functional form.
If empty and if fun='logcosh', fun_args will take value
{'alpha' : 1.0}.
max_iter : int, optional
Maximum number of iterations during fit.
tol : float, optional
Tolerance on update at each iteration.
w_init : None of an (n_components, n_components) ndarray
The mixing matrix to be used to initialize the algorithm.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
components_ : 2D array, shape (n_components, n_features)
The unmixing matrix.
mixing_ : array, shape (n_features, n_components)
The mixing matrix.
n_iter_ : int
If the algorithm is "deflation", n_iter is the
maximum number of iterations run across all components. Else
they are just the number of iterations taken to converge.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import FastICA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = FastICA(n_components=7,
... random_state=0)
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
Notes
-----
Implementation based on
`A. Hyvarinen and E. Oja, Independent Component Analysis:
Algorithms and Applications, Neural Networks, 13(4-5), 2000,
pp. 411-430`
"""
def __init__(self, n_components=None, algorithm='parallel', whiten=True,
fun='logcosh', fun_args=None, max_iter=200, tol=1e-4,
w_init=None, random_state=None):
super(FastICA, self).__init__()
if max_iter < 1:
raise ValueError("max_iter should be greater than 1, got "
"(max_iter={})".format(max_iter))
self.n_components = n_components
self.algorithm = algorithm
self.whiten = whiten
self.fun = fun
self.fun_args = fun_args
self.max_iter = max_iter
self.tol = tol
self.w_init = w_init
self.random_state = random_state
def _fit(self, X, compute_sources=False):
"""Fit the model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
compute_sources : bool
If False, sources are not computes but only the rotation matrix.
This can save memory when working with big data. Defaults to False.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
fun_args = {} if self.fun_args is None else self.fun_args
whitening, unmixing, sources, X_mean, self.n_iter_ = fastica(
X=X, n_components=self.n_components, algorithm=self.algorithm,
whiten=self.whiten, fun=self.fun, fun_args=fun_args,
max_iter=self.max_iter, tol=self.tol, w_init=self.w_init,
random_state=self.random_state, return_X_mean=True,
compute_sources=compute_sources, return_n_iter=True)
if self.whiten:
self.components_ = np.dot(unmixing, whitening)
self.mean_ = X_mean
self.whitening_ = whitening
else:
self.components_ = unmixing
self.mixing_ = linalg.pinv(self.components_)
if compute_sources:
self.__sources = sources
return sources
def fit_transform(self, X, y=None):
"""Fit the model and recover the sources from X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
return self._fit(X, compute_sources=True)
def fit(self, X, y=None):
"""Fit the model to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : Ignored
Returns
-------
self
"""
self._fit(X, compute_sources=False)
return self
def transform(self, X, y='deprecated', copy=True):
"""Recover the sources from X (apply the unmixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to transform, where n_samples is the number of samples
and n_features is the number of features.
y : (ignored)
.. deprecated:: 0.19
This parameter will be removed in 0.21.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
if not isinstance(y, string_types) or y != 'deprecated':
warnings.warn("The parameter y on transform() is "
"deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=copy, dtype=FLOAT_DTYPES)
if self.whiten:
X -= self.mean_
return np.dot(X, self.components_.T)
def inverse_transform(self, X, copy=True):
"""Transform the sources back to the mixed data (apply mixing matrix).
Parameters
----------
X : array-like, shape (n_samples, n_components)
Sources, where n_samples is the number of samples
and n_components is the number of components.
copy : bool (optional)
If False, data passed to fit are overwritten. Defaults to True.
Returns
-------
X_new : array-like, shape (n_samples, n_features)
"""
check_is_fitted(self, 'mixing_')
X = check_array(X, copy=(copy and self.whiten), dtype=FLOAT_DTYPES)
X = np.dot(X, self.mixing_.T)
if self.whiten:
X += self.mean_
return X
|
vortex-ape/scikit-learn
|
sklearn/decomposition/fastica_.py
|
Python
|
bsd-3-clause
| 19,834
|
[
"Gaussian"
] |
76a0f5876ea09c1882ac8000050c4d7cc885a5aa659e6b8cf5ed647a54b24694
|
#Standard imports
import os
import inspect
#Non-standard imports
import catmap
from catmap import ReactionModelWrapper
from catmap.model import ReactionModel
from ase.atoms import string2symbols
class ParserBase(ReactionModelWrapper):
def __init__(self,reaction_model=ReactionModel()):
"""Class for `parsing' information from raw data
(databases, spreadsheets, text files, trajectories, etc.) into a
structure which is useful to the microkinetic model. This class acts
as a base class to be inherited by other parser classes, but it is
not functional on its own.
input_file: defines the file path or object to get data from
A functional derived parser class must also contain the methods:
parse(input_file): a function to parse the input_file file/object and
return properly formatted data. The parse function should save all
necessary attributes to the Parser class. After parsing the parent
microkinetic model class will update itself from the Parser attributes.
"""
self._rxm = reaction_model
self._required = {} #No user-defined attributes are required.
def _baseparse(self):
#Make dictionary of useful information about species in model
if not self.species_definitions:
self.species_definitions = {}
for species in (self.gas_names+self.adsorbate_names+
self.transition_state_names):
ads_info = {}
if '_' in species:
name,site = species.rsplit('_',1)
else:
name = species
site = self._default_site
ads_info['name'] = name
ads_info['site'] = site
if species in self.gas_names:
ads_info['type'] = 'gas'
ads_info['n_sites'] = 0
elif species in self.adsorbate_names:
ads_info['type'] = 'adsorbate'
ads_info['n_sites'] = 1
elif species in self.transition_state_names:
ads_info['type'] = 'transition_state'
ads_info['n_sites'] = 1
else:
ads_info['type'] = 'unknown'
composition = {}
try:
symbs = string2symbols(name.replace('-',''))
for a in set(symbs):
composition[a] = symbs.count(a)
except ValueError:
pass
ads_info['composition'] = composition
if species in self.species_definitions:
ads_info.update(self.species_definitions[species])
if not ads_info['composition']:
raise ValueError('Could not determine composition for '+species)
self.species_definitions[species] = ads_info
for species in self.species_definitions.keys(): #set site definitions
site = self.species_definitions[species].get('site',None)
if site:
ads_info = {}
ads_info['type'] = 'site'
ads_info['site'] = site
ads_info['formation_energy'] = 0
if site not in self._gas_sites:
ads_info['n_sites'] = 1
else:
ads_info['n_sites'] = 0
ads_info['site_names'] = ['gas']
ads_info['total'] = 0
ads_info['composition'] = {}
if self.site_definitions: #Deprecate later...
site_names = self.site_definitions[site]
if isinstance(site_names,basestring):
site_names = [site_names]
ads_info['site_names'] = site_names
if self.site_totals: #Deprecate later...
ads_info['total'] = self.site_totals[site]
if site in self.species_definitions:
ads_info.update(self.species_definitions[site])
self.species_definitions[site] = self.species_definitions['*_'+site] \
= ads_info
if not self.atomic_reservoir_list:
#Make list of valid reference sets for e.g. boltzmann coverages
cart_product = []
all_atoms = []
composition_dict = {}
dummy_dict = {}
for sp in self.gas_names:
composition_dict[sp] = self.species_definitions[sp]['composition']
dummy_dict[sp] = 0 #dummy dict of energies
for key in composition_dict[sp].keys():
if key not in all_atoms:
all_atoms.append(key)
for key in all_atoms:
possibles = []
for sp in self.gas_names:
if composition_dict[sp].get(key,None):
possibles.append(sp)
cart_product.append(possibles)
ref_sets = []
for prod in catmap.functions.cartesian_product(*cart_product):
refdict = {}
for ai,pi in zip(all_atoms,prod):
refdict[ai] = pi
if (sorted(list(refdict.values())) ==
sorted(list(set(refdict.values()))) and
sorted(list(refdict.values())) not in
[sorted(list(rs.values())) for rs in ref_sets]):
if refdict and dummy_dict and composition_dict:
try:
self.convert_formation_energies(dummy_dict,
refdict,composition_dict)
ref_sets.append(refdict)
except ValueError:
pass
self.atomic_reservoir_list = ref_sets
|
starry99/catmap
|
catmap/parsers/parser_base.py
|
Python
|
gpl-3.0
| 5,798
|
[
"ASE"
] |
09f34aea03ffc285f34cd5624e7ae5badaf6dbdd834d3588cad0cd1adcf0f1b5
|
"""
A first test for the ELBO on the catalysis problem.
The target is consisted of an uninformative prior and a Gaussian likelihood.
The approximating mixture has two components.
Author:
Panagiotis Tsilifis
Date:
6/6/2014
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import cPickle as pickle
from scipy.stats.distributions import norm
import math
from vuq import UncertaintyPropagationLikelihood
from vuq import FlatPDF
from vuq import MultivariateNormal
from vuq import PDFCollection
from vuq import Joint
from vuq import MixturePDF
from vuq import MixtureOfMultivariateNormals
from vuq import FirstOrderEntropyApproximation
from vuq import ThirdOrderExpectationFunctional
from vuq import EvidenceLowerBound
from vuq import Optimizer
from demos import TestModel0
# The number of components to use for the mixture
num_comp = 1
# The model
model = TestModel0()
# The prior
log_p_x = MultivariateNormal(mu=[0])
log_p_z_fake = FlatPDF(model.num_output)
log_p_x_ext = PDFCollection([log_p_x, log_p_z_fake])
# The isotropic Likelihood
log_p_z_given_x = UncertaintyPropagationLikelihood(model, alpha=100.)
# The joint
log_p = Joint(log_p_z_given_x, log_p_x_ext)
# The approximating distribution
log_q = MixtureOfMultivariateNormals.create(log_p.num_dim, num_comp)
# Build the ELBO
# Pick an entropy approximation
entropy = FirstOrderEntropyApproximation()
# Pick an approximation for the expectation of the joint
expectation_functional = ThirdOrderExpectationFunctional(log_p)
# Build the ELBO
elbo = EvidenceLowerBound(entropy, expectation_functional)
print 'ELBO:'
print str(elbo)
# Optimize the elbo
optimizer = Optimizer(elbo)
C_bounds = tuple((1e-32, None) for i in xrange(log_q.num_comp * log_q.num_dim))
L = optimizer.optimize(log_q, max_it=10, C_bounds=C_bounds)
print 'Result:'
print log_q
print 'The right answer is:'
print 'mu:', 0.
print 'sigma:', 1.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(L, linewidth=2)
ax.set_xlabel('Iteration', fontsize=16)
ax.set_ylabel('ELBO', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'test_up_1_elbo.png')
print 'Writing:', png_file
plt.savefig(png_file)
quit()
for i in xrange(log_q.num_dim):
mu = log_q.comp[0].mu[i]
s = math.sqrt(log_q.comp[0].C[i, i])
if i < 5:
name = 'kappa_{%s}' % (i+1)
else:
name = 'sigma^2'
print name, '=', mu, '+-', s
# Plot the calibration result
t = np.array([0.0, 30., 60., 90., 120., 150., 180.])
fig = plt.figure()
ax = fig.add_subplot(111)
m_state = catal_model(log_q.comp[0].mu[:5])
f = m_state['f']
Y = f.reshape(t.shape[0], f.shape[1] / t.shape[0])
styles = ['b', 'r', 'g', 'k', 'm']
for i in xrange(5):
ax.plot(t, Y[:, i], styles[i], linewidth=2)
ax.plot(t, data[:, 1:][:, i], '+' + styles[i], markersize=10, markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'catalysis_1_cali_output.png')
print 'Writing:', png_file
plt.savefig(png_file)
# Do an uncertainty propagation test.
uq_file = os.path.join('demos', 'catalysis_1_cali_uq.pcl')
if os.path.exists(uq_file):
with open(uq_file, 'rb') as fd:
uq_results = pickle.load(fd)
Y_m = uq_results['Y_m']
Y_p05 = uq_results['Y_p05']
Y_p95 = uq_results['Y_p95']
else:
num_mcmc = 100
Y_s = []
for i in xrange(num_mcmc):
print 'taking sample', i + 1
omega = log_q.sample().flatten()
x = omega[:5]
sigma = omega[5]
y = catal_model(x)['f']
Y_s.append(y + sigma * np.random.randn(*y.shape))
Y_s = np.vstack(Y_s)
Y_m = np.percentile(Y_s, 50, axis=0).reshape(Y.shape)
Y_p05 = np.percentile(Y_s, 5, axis=0).reshape(Y.shape)
Y_p95 = np.percentile(Y_s, 95, axis=0).reshape(Y.shape)
uq_results = {}
uq_results['Y_m'] = Y_m
uq_results['Y_p05'] = Y_p05
uq_results['Y_p95'] = Y_p95
with open(uq_file, 'wb') as fd:
pickle.dump(uq_results, fd)
fig = plt.figure()
ax = fig.add_subplot(111)
for i in xrange(5):
ax.plot(t, Y_m[:, i], styles[i], linewidth=2)
ax.fill_between(t, Y_p05[:, i], Y_p95[:, i], color=styles[i], alpha=0.5)
ax.plot(t, data[:, 1:][:, i], '+' + styles[i], markersize=10,
markeredgewidth=2)
ax.set_xlabel('Time (t)', fontsize=16)
ax.set_ylabel('Concentration', fontsize=16)
plt.setp(ax.get_xticklabels(), fontsize=16)
plt.setp(ax.get_yticklabels(), fontsize=16)
png_file = os.path.join('figures', 'catalysis_1_cali_uq.png')
print 'Writing:', png_file
plt.savefig(png_file)
|
ebilionis/variational-reformulation-of-inverse-problems
|
unittests/test_up_0.py
|
Python
|
gpl-2.0
| 4,715
|
[
"Gaussian"
] |
e9507deb223c14b868f907bccfcc0525eabf8d569daf1df1499f2681a870c7ad
|
## Program to simulate short-tE event observing sequence.
from astropy.time import Time, TimeDelta
import mulens_class
from astropy import constants
import matplotlib.pyplot as plt
from sys import argv, exit
from os import path
import numpy as np
import copy
def sim_short_te( params ):
"""Main driver function to simulate observing sequences for short-tE events
"""
# Create event and configure its parameters:
event = mulens_class.MicrolensingEvent()
for key, value in params.items():
setattr(event,key,value)
# Compute the microlensing lightcurve:
event.calc_D_lens_source()
event.calc_einstein_radius()
event.gen_event_timeline()
event.calc_pspl_curve()
mag = params['I_base'] - 2.5 * np.log10( event.A_t_pspl )
# Simulate photometry from observing sequence.
# This is done by taking a copy of the event, then resetting the timestamps array,
# and re-calculating the lightcurve for just those timestamps:
I_event = copy.copy(event)
time_start = params['t_obs_start']
time_end = time_start + TimeDelta( (I_event.t_E.value / 2.0), format='sec' )
visit_duration = TimeDelta( (30.0*60.0), format='sec' )
visit_cadence = TimeDelta( 355.0, format='sec' )
I_event.t = gen_time_sequence( time_start, time_end, params['exposure_sequences'], params['visit_intervals'] )
I_event.calc_pspl_curve()
I_mag = params['I_base'] - 2.5 * np.log10( I_event.A_t_pspl )
V_event = copy.copy(event)
time_start = params['t_obs_start'] + visit_duration
time_end = time_start + TimeDelta( (I_event.t_E.value / 2.0), format='sec' )
visit_duration = TimeDelta( (30.0*60.0), format='sec' )
visit_cadence = TimeDelta( 355.0, format='sec' )
V_event.t = gen_time_sequence( time_start, time_end, params['exposure_sequences'], params['visit_intervals'] )
V_event.calc_pspl_curve()
V_mag = params['I_base'] - 2.5 * np.log10( V_event.A_t_pspl )
# Plot event lightcurve:
def select_plot_data( time_stamps, data, t_min, t_max ):
"""Function to select from the arrays given those datapoints within the time range.
Timestamps should have been corrected for any plotting offsets before
the function is called (e.g. ts-2450000.0).
"""
i = np.where( time_stamps >= t_min )
j = np.where( time_stamps <= t_max )
idx = np.intersect1d( i, j )
return time_stamps[idx], data[idx]
font = 22
fig = plt.figure(1,(12,12))
ax = fig.add_axes([0.15, 0.55, 0.775, 0.35]) # [left, bottom, width, height]
dt = 1.0
t_min = event.t_o.jd - 2450000.0 - dt
t_max = event.t_o.jd - 2450000.0 + dt
(I_xplot, I_yplot) = select_plot_data( I_event.t-2450000.0, I_mag, t_min, t_max )
(V_xplot, V_yplot) = select_plot_data( V_event.t-2450000.0, V_mag, t_min, t_max )
(model_xplot, model_yplot) = select_plot_data( event.t-2450000.0, mag, t_min, t_max )
plt.plot( model_xplot, model_yplot, 'k-', label='PSPL' )
plt.plot( I_xplot, I_yplot, 'rd',label='I data' )
plt.plot( V_xplot, V_yplot, 'bv',label='V data' )
(xmin,xmax,ymin,ymax) = plt.axis()
plt.xlabel('JD-2450000.0', fontsize=font)
plt.ylabel('Magnitude', fontsize=font)
plt.title('Simulated lightcurve of a $t_{E}$=' + \
str( round( ( event.t_E.value/(24.0*60.0*60.0) ), 1) ) + 'd event, ', fontsize=font)
plt.legend(loc='upper right',frameon=False, numpoints=1)
ax.tick_params(labelsize=font)
plt.axis([xmin,xmax,ymax,ymin])
# Plot time differential of event lightcurve
# Note the gradient of the lightcurve is calculated this way and not in normal Python array difference
# style because the interval between visits is variable. Normal array subtraction ends up differencing
# datapoints from very different points in the lightcurve, and stepping over the array would be complicated
# due to the variable visit duration.
dt = 1.0
def calc_lc_gradient( event, mag, interval ):
grad = []
ts = []
for i in range( 0, len( event.t )-2, 2 ):
if event.t[i] - event.t[i+1] < interval:
grad.append( ( mag[i] - mag[i+1] ) / ( event.t[i] - event.t[i+1] ) )
ts.append( event.t[i] )
return np.array( grad ), np.array( ts )
( grad_I, ts_I ) = calc_lc_gradient(I_event, I_mag, params['visit_intervals'][0].value)
( grad_V, ts_V ) = calc_lc_gradient(V_event, V_mag, params['visit_intervals'][0].value)
ax = fig.add_axes([0.15, 0.1, 0.775, 0.35]) # [left, bottom, width, height]
plt.plot( ts_I-2450000.0, grad_I, 'rd')
plt.plot( ts_V-2450000.0, grad_V, 'bv')
plt.xlabel('JD-2450000.0', fontsize=font)
plt.ylabel('Gradient [mag/d]', fontsize=font)
plt.title('Lightcurve rate of change', fontsize=font)
ax.tick_params(labelsize=font)
(xmin,xmax,ymin,ymax) = plt.axis()
xmin = I_event.t_o.jd - 2450000.0 - dt
xmax = I_event.t_o.jd - 2450000.0 + dt
plt.axis([xmin,xmax,ymax,ymin])
plt.savefig(params['plot_file'])
def read_event_file( file_path ):
"""Function to read the event parameters from a file"""
if path.isfile( file_path ) == False:
print 'Error: Cannot find input file ' + file_path
file_lines = open( file_path, 'r' ).readlines()
# Parse parameters to convert to standard units:
print 'Input parameters:'
params = {}
for line in file_lines:
( key, value ) = line.replace('\n','').split( '=' )
key = key.lstrip().rstrip()
value = str( value ).lstrip().rstrip()
if key in [ 'u_o', 't_E', 'M_L', 'D_L', 'D_S', 'phi', 'I_base' ]:
value = float(value)
if key == 't_E':
value = TimeDelta((value * 24.0 * 3600.0),format='sec')
elif key == 'M_L':
value = constants.M_sun * value
elif key == 'D_L' or key == 'D_S':
value = value * constants.pc
elif key == 'phi':
value = ( value * np.pi ) / 180.0
elif key in [ 't_o', 't_obs_start' ]:
value = Time( value , format='isot', scale='utc' )
elif key == 'visit_intervals':
tlist = value.split(',')
value = []
for entry in tlist: value.append( TimeDelta( (float( entry )*3600.0), format='sec' ) )
elif key == 'exposure_sequences':
tlist1 = value.split(',')
value = []
for entry in tlist1:
tlist2 = entry.split(':')
sequence = []
for exp in tlist2:
sequence.append( TimeDelta( float( exp ), format='sec' ) )
value.append( sequence )
params[key] = value
print key, value
return params
def gen_time_sequence(time_start, time_end, exposure_sequences, visit_intervals ):
"""Function to generate timestamps of simulated data"""
# Generate an array of incremental timestamps throughout the event in JD:
ts = []
t = time_start
v = -1
while t <= time_end:
# 1 visit consists of a set of sequential exposures:
v = v + 1
if v >= len(exposure_sequences):
sequence = exposure_sequences[-1]
else:
sequence = exposure_sequences[v]
for exptime in sequence:
t = t + exptime
ts.append(t.jd)
# Then there is a gap of length visit_interval before the next visit:
if v >= len(visit_intervals):
interval = visit_intervals[-1]
else:
interval = visit_intervals[v]
t = t + interval
ts = np.array(ts)
return ts
#################################################
if __name__ == '__main__':
help_text = """Simulator for short-tE event observations.
Useage:
> python observe_shortte.py path_parameter_file
"""
if len(argv) > 1:
file_path = argv[1]
params = read_event_file( file_path )
sim_short_te(params)
else:
print help_text
|
rachel3834/mulens_modeler
|
trunk/scripts/observe_shortte.py
|
Python
|
gpl-2.0
| 7,738
|
[
"VisIt"
] |
6306e66fa9f2987b7a8087430f85293f2e3854dfeb0bcac491decb901f6b7245
|
from __future__ import division
import numpy as np
from numpy import dot
from dipy.core.geometry import sphere2cart
from dipy.core.geometry import vec2vec_rotmat
from dipy.reconst.utils import dki_design_matrix
# Diffusion coefficients for white matter tracts, in mm^2/s
#
# Based roughly on values from:
#
# Pierpaoli, Basser, "Towards a Quantitative Assessment of Diffusion
# Anisotropy", Magnetic Resonance in Medicine, 1996; 36(6):893-906.
#
diffusion_evals = np.array([1500e-6, 400e-6, 400e-6])
def _check_directions(angles):
"""
Helper function to check if direction ground truth have the right format
and are in cartesian coordinates
Parameters
-----------
angles : array (K,2) or (K, 3)
List of K polar angles (in degrees) for the sticks or array of K
sticks as unit vectors.
Returns
--------
sticks : (K,3)
Sticks in cartesian coordinates.
"""
angles = np.array(angles)
if angles.shape[-1] == 3:
sticks = angles
else:
sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
for pair in angles]
sticks = np.array(sticks)
return sticks
def _add_gaussian(sig, noise1, noise2):
"""
Helper function to add_noise
This one simply adds one of the Gaussians to the sig and ignores the other
one.
"""
return sig + noise1
def _add_rician(sig, noise1, noise2):
"""
Helper function to add_noise.
This does the same as abs(sig + complex(noise1, noise2))
"""
return np.sqrt((sig + noise1) ** 2 + noise2 ** 2)
def _add_rayleigh(sig, noise1, noise2):
"""
Helper function to add_noise
The Rayleigh distribution is $\sqrt\{Gauss_1^2 + Gauss_2^2}$.
"""
return sig + np.sqrt(noise1 ** 2 + noise2 ** 2)
def add_noise(signal, snr, S0, noise_type='rician'):
r""" Add noise of specified distribution to the signal from a single voxel.
Parameters
-----------
signal : 1-d ndarray
The signal in the voxel.
snr : float
The desired signal-to-noise ratio. (See notes below.)
If `snr` is None, return the signal as-is.
S0 : float
Reference signal for specifying `snr`.
noise_type : string, optional
The distribution of noise added. Can be either 'gaussian' for Gaussian
distributed noise, 'rician' for Rice-distributed noise (default) or
'rayleigh' for a Rayleigh distribution.
Returns
--------
signal : array, same shape as the input
Signal with added noise.
Notes
-----
SNR is defined here, following [1]_, as ``S0 / sigma``, where ``sigma`` is
the standard deviation of the two Gaussian distributions forming the real
and imaginary components of the Rician noise distribution (see [2]_).
References
----------
.. [1] Descoteaux, Angelino, Fitzgibbons and Deriche (2007) Regularized,
fast and robust q-ball imaging. MRM, 58: 497-510
.. [2] Gudbjartson and Patz (2008). The Rician distribution of noisy MRI
data. MRM 34: 910-914.
Examples
--------
>>> signal = np.arange(800).reshape(2, 2, 2, 100)
>>> signal_w_noise = add_noise(signal, 10., 100., noise_type='rician')
"""
if snr is None:
return signal
sigma = S0 / snr
noise_adder = {'gaussian': _add_gaussian,
'rician': _add_rician,
'rayleigh': _add_rayleigh}
noise1 = np.random.normal(0, sigma, size=signal.shape)
if noise_type == 'gaussian':
noise2 = None
else:
noise2 = np.random.normal(0, sigma, size=signal.shape)
return noise_adder[noise_type](signal, noise1, noise2)
def sticks_and_ball(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)],
fractions=[35, 35], snr=20):
""" Simulate the signal for a Sticks & Ball model.
Parameters
-----------
gtab : GradientTable
Signal measurement directions.
d : float
Diffusivity value.
S0 : float
Unweighted signal value.
angles : array (K,2) or (K, 3)
List of K polar angles (in degrees) for the sticks or array of K
sticks as unit vectors.
fractions : float
Percentage of each stick. Remainder to 100 specifies isotropic
component.
snr : float
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal.
sticks : (M,3)
Sticks in cartesian coordinates.
References
----------
.. [1] Behrens et al., "Probabilistic diffusion
tractography with multiple fiber orientations: what can we gain?",
Neuroimage, 2007.
"""
fractions = [f / 100. for f in fractions]
f0 = 1 - np.sum(fractions)
S = np.zeros(len(gtab.bvals))
sticks = _check_directions(angles)
for (i, g) in enumerate(gtab.bvecs[1:]):
S[i + 1] = f0*np.exp(-gtab.bvals[i + 1]*d) + \
np.sum([fractions[j]*np.exp(-gtab.bvals[i + 1]*d*np.dot(s, g)**2)
for (j, s) in enumerate(sticks)])
S[i + 1] = S0 * S[i + 1]
S[gtab.b0s_mask] = S0
S = add_noise(S, snr, S0)
return S, sticks
def single_tensor(gtab, S0=1, evals=None, evecs=None, snr=None):
""" Simulated Q-space signal with a single tensor.
Parameters
-----------
gtab : GradientTable
Measurement directions.
S0 : double,
Strength of signal in the presence of no diffusion gradient (also
called the ``b=0`` value).
evals : (3,) ndarray
Eigenvalues of the diffusion tensor. By default, values typical for
prolate white matter are used.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of this as a rotation
matrix that transforms the direction of the tensor. The eigenvectors
needs to be column wise.
snr : float
Signal to noise ratio, assuming Rician noise. None implies no noise.
Returns
--------
S : (N,) ndarray
Simulated signal: ``S(q, tau) = S_0 e^(-b g^T R D R.T g)``.
References
----------
.. [1] M. Descoteaux, "High Angular Resolution Diffusion MRI: from Local
Estimation to Segmentation and Tractography", PhD thesis,
University of Nice-Sophia Antipolis, p. 42, 2008.
.. [2] E. Stejskal and J. Tanner, "Spin diffusion measurements: spin echos
in the presence of a time-dependent field gradient", Journal of
Chemical Physics, nr. 42, pp. 288--292, 1965.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = gtab.bvecs.shape[:gtab.bvecs.ndim - 1]
gradients = gtab.bvecs.reshape(-1, 3)
R = np.asarray(evecs)
S = np.zeros(len(gradients))
D = dot(dot(R, np.diag(evals)), R.T)
for (i, g) in enumerate(gradients):
S[i] = S0 * np.exp(-gtab.bvals[i] * dot(dot(g.T, D), g))
S = add_noise(S, snr, S0)
return S.reshape(out_shape)
def multi_tensor(gtab, mevals, S0=100, angles=[(0, 0), (90, 0)],
fractions=[50, 50], snr=20):
r""" Simulate a Multi-Tensor signal.
Parameters
-----------
gtab : GradientTable
mevals : array (K, 3)
each tensor's eigenvalues in each row
S0 : float
Unweighted signal value (b0 signal).
angles : array (K,2) or (K,3)
List of K tensor directions in polar angles (in degrees) or unit
vectors
fractions : float
Percentage of the contribution of each tensor. The sum of fractions
should be equal to 100%.
snr : float
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal.
sticks : (M,3)
Sticks in cartesian coordinates.
Examples
--------
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor
>>> from dipy.data import get_data
>>> from dipy.core.gradients import gradient_table
>>> from dipy.io.gradients import read_bvals_bvecs
>>> fimg, fbvals, fbvecs = get_data('small_101D')
>>> bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
>>> gtab = gradient_table(bvals, bvecs)
>>> mevals=np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
>>> e0 = np.array([1, 0, 0.])
>>> e1 = np.array([0., 1, 0])
>>> S = multi_tensor(gtab, mevals)
"""
if np.round(np.sum(fractions), 2) != 100.0:
raise ValueError('Fractions should sum to 100')
fractions = [f / 100. for f in fractions]
S = np.zeros(len(gtab.bvals))
sticks = _check_directions(angles)
for i in range(len(fractions)):
S = S + fractions[i] * single_tensor(gtab, S0=S0, evals=mevals[i],
evecs=all_tensor_evecs(
sticks[i]), snr=None)
return add_noise(S, snr, S0), sticks
def multi_tensor_dki(gtab, mevals, S0=100, angles=[(90., 0.), (90., 0.)],
fractions=[50, 50], snr=20):
r""" Simulate the diffusion-weight signal, diffusion and kurtosis tensors
based on the DKI model
Parameters
-----------
gtab : GradientTable
mevals : array (K, 3)
eigenvalues of the diffusion tensor for each individual compartment
S0 : float (optional)
Unweighted signal value (b0 signal).
angles : array (K,2) or (K,3) (optional)
List of K tensor directions of the diffusion tensor of each compartment
in polar angles (in degrees) or unit vectors
fractions : float (K,) (optional)
Percentage of the contribution of each tensor. The sum of fractions
should be equal to 100%.
snr : float (optional)
Signal to noise ratio, assuming Rician noise. If set to None, no
noise is added.
Returns
--------
S : (N,) ndarray
Simulated signal based on the DKI model.
dt : (6,)
elements of the diffusion tensor.
kt : (15,)
elements of the kurtosis tensor.
Notes
-----
Simulations are based on multicompartmental models which assumes that
tissue is well described by impermeable diffusion compartments
characterized by their only diffusion tensor. Since simulations are based
on the DKI model, coefficients larger than the fourth order of the signal's
taylor expansion approximation are neglected.
Examples
--------
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor_dki
>>> from dipy.data import get_data
>>> from dipy.core.gradients import gradient_table
>>> from dipy.io.gradients import read_bvals_bvecs
>>> fimg, fbvals, fbvecs = get_data('small_64D')
>>> bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
>>> bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
>>> bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
>>> gtab = gradient_table(bvals_2s, bvecs_2s)
>>> mevals = np.array([[0.00099, 0, 0],[0.00226, 0.00087, 0.00087]])
>>> S, dt, kt = multi_tensor_dki(gtab, mevals)
References
----------
.. [1] R. Neto Henriques et al., "Exploring the 3D geometry of the
diffusion kurtosis tensor - Impact on the development of robust
tractography procedures and novel biomarkers", NeuroImage (2015)
111, 85-99.
"""
if np.round(np.sum(fractions), 2) != 100.0:
raise ValueError('Fractions should sum to 100')
fractions = [f / 100. for f in fractions]
S = np.zeros(len(gtab.bvals))
sticks = _check_directions(angles)
# computing a 3D matrix containing the individual DT components
D_comps = np.zeros((len(fractions), 3, 3))
for i in range(len(fractions)):
R = all_tensor_evecs(sticks[i])
D_comps[i] = dot(dot(R, np.diag(mevals[i])), R.T)
# compute voxel's DT
DT = np.zeros((3, 3))
for i in range(len(fractions)):
DT = DT + fractions[i]*D_comps[i]
dt = np.array([DT[0][0], DT[0][1], DT[1][1], DT[0][2], DT[1][2], DT[2][2]])
# compute voxel's MD
MD = (DT[0][0] + DT[1][1] + DT[2][2]) / 3
# compute voxel's KT
kt = np.zeros((15))
kt[0] = kurtosis_element(D_comps, fractions, 0, 0, 0, 0, DT, MD)
kt[1] = kurtosis_element(D_comps, fractions, 1, 1, 1, 1, DT, MD)
kt[2] = kurtosis_element(D_comps, fractions, 2, 2, 2, 2, DT, MD)
kt[3] = kurtosis_element(D_comps, fractions, 0, 0, 0, 1, DT, MD)
kt[4] = kurtosis_element(D_comps, fractions, 0, 0, 0, 2, DT, MD)
kt[5] = kurtosis_element(D_comps, fractions, 0, 1, 1, 1, DT, MD)
kt[6] = kurtosis_element(D_comps, fractions, 1, 1, 1, 2, DT, MD)
kt[7] = kurtosis_element(D_comps, fractions, 0, 2, 2, 2, DT, MD)
kt[8] = kurtosis_element(D_comps, fractions, 1, 2, 2, 2, DT, MD)
kt[9] = kurtosis_element(D_comps, fractions, 0, 0, 1, 1, DT, MD)
kt[10] = kurtosis_element(D_comps, fractions, 0, 0, 2, 2, DT, MD)
kt[11] = kurtosis_element(D_comps, fractions, 1, 1, 2, 2, DT, MD)
kt[12] = kurtosis_element(D_comps, fractions, 0, 0, 1, 2, DT, MD)
kt[13] = kurtosis_element(D_comps, fractions, 0, 1, 1, 2, DT, MD)
kt[14] = kurtosis_element(D_comps, fractions, 0, 1, 2, 2, DT, MD)
# compute S based on the DT and KT
S = DKI_signal(gtab, dt, kt, S0, snr)
return S, dt, kt
def kurtosis_element(D_comps, frac, ind_i, ind_j, ind_k, ind_l, DT=None,
MD=None):
r""" Computes the diffusion kurtosis tensor element (with indexes i, j, k
and l) based on the individual diffusion tensor components of a
multicompartmental model.
Parameters
-----------
D_comps : (K,3,3) ndarray
Diffusion tensors for all K individual compartment of the
multicompartmental model.
frac : float
Percentage of the contribution of each tensor. The sum of fractions
should be equal to 100%.
ind_i : int
Element's index i (0 for x, 1 for y, 2 for z)
ind_j : int
Element's index j (0 for x, 1 for y, 2 for z)
ind_k : int
Element's index k (0 for x, 1 for y, 2 for z)
ind_l: int
Elements index l (0 for x, 1 for y, 2 for z)
DT : (3,3) ndarray (optional)
Voxel's global diffusion tensor.
MD : float (optional)
Voxel's global mean diffusivity.
Returns
--------
wijkl : float
kurtosis tensor element of index i, j, k, l
Notes
--------
wijkl is calculated using equation 8 given in [1]_
References
----------
.. [1] R. Neto Henriques et al., "Exploring the 3D geometry of the
diffusion kurtosis tensor - Impact on the development of robust
tractography procedures and novel biomarkers", NeuroImage (2015)
111, 85-99.
"""
if DT is None:
DT = np.zeros((3, 3))
for i in range(len(frac)):
DT = DT + frac[i]*D_comps[i]
if MD is None:
MD = (DT[0][0] + DT[1][1] + DT[2][2]) / 3
wijkl = 0
for f in range(len(frac)):
wijkl = wijkl + frac[f] * (
D_comps[f][ind_i][ind_j]*D_comps[f][ind_k][ind_l] +
D_comps[f][ind_i][ind_k]*D_comps[f][ind_j][ind_l] +
D_comps[f][ind_i][ind_l]*D_comps[f][ind_j][ind_k])
wijkl = (wijkl - DT[ind_i][ind_j]*DT[ind_k][ind_l] -
DT[ind_i][ind_k]*DT[ind_j][ind_l] -
DT[ind_i][ind_l]*DT[ind_j][ind_k]) / (MD**2)
return wijkl
def DKI_signal(gtab, dt, kt, S0=150, snr=None):
r""" Simulated signal based on the diffusion and diffusion kurtosis
tensors of a single voxel. Simulations are preformed assuming the DKI
model.
Parameters
-----------
gtab : GradientTable
Measurement directions.
dt : (6,) ndarray
Elements of the diffusion tensor.
kt : (15, ) ndarray
Elements of the diffusion kurtosis tensor.
S0 : float (optional)
Strength of signal in the presence of no diffusion gradient.
snr : float (optional)
Signal to noise ratio, assuming Rician noise. None implies no noise.
Returns
--------
S : (N,) ndarray
Simulated signal based on the DKI model:
.. math::
S=S_{0}e^{-bD+\frac{1}{6}b^{2}D^{2}K}
References
----------
.. [1] R. Neto Henriques et al., "Exploring the 3D geometry of the
diffusion kurtosis tensor - Impact on the development of robust
tractography procedures and novel biomarkers", NeuroImage (2015)
111, 85-99.
"""
dt = np.array(dt)
kt = np.array(kt)
A = dki_design_matrix(gtab)
# define vector of DKI parameters
MD = (dt[0] + dt[2] + dt[5]) / 3
X = np.concatenate((dt, kt*MD*MD, np.array([np.log(S0)])), axis=0)
# Compute signals based on the DKI model
S = np.exp(dot(A, X))
S = add_noise(S, snr, S0)
return S
def single_tensor_odf(r, evals=None, evecs=None):
""" Simulated ODF with a single tensor.
Parameters
----------
r : (N,3) or (M,N,3) ndarray
Measurement positions in (x, y, z), either as a list or on a grid.
evals : (3,)
Eigenvalues of diffusion tensor. By default, use values typical for
prolate white matter.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of these as the
rotation matrix that determines the orientation of the diffusion
tensor.
Returns
-------
ODF : (N,) ndarray
The diffusion probability at ``r`` after time ``tau``.
References
----------
.. [1] Aganj et al., "Reconstruction of the Orientation Distribution
Function in Single- and Multiple-Shell q-Ball Imaging Within
Constant Solid Angle", Magnetic Resonance in Medicine, nr. 64,
pp. 554--566, 2010.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = r.shape[:r.ndim - 1]
R = np.asarray(evecs)
D = dot(dot(R, np.diag(evals)), R.T)
Di = np.linalg.inv(D)
r = r.reshape(-1, 3)
P = np.zeros(len(r))
for (i, u) in enumerate(r):
P[i] = (dot(dot(u.T, Di), u)) ** (3 / 2)
return (1 / (4 * np.pi * np.prod(evals) ** (1 / 2) * P)).reshape(out_shape)
def all_tensor_evecs(e0):
"""Given the principle tensor axis, return the array of all
eigenvectors column-wise (or, the rotation matrix that orientates the
tensor).
Parameters
----------
e0 : (3,) ndarray
Principle tensor axis.
Returns
-------
evecs : (3,3) ndarray
Tensor eigenvectors.
"""
axes = np.eye(3)
mat = vec2vec_rotmat(axes[0], e0)
e1 = np.dot(mat, axes[1])
e2 = np.dot(mat, axes[2])
return np.array([e0, e1, e2]).T
def multi_tensor_odf(odf_verts, mevals, angles, fractions):
r'''Simulate a Multi-Tensor ODF.
Parameters
----------
odf_verts : (N,3) ndarray
Vertices of the reconstruction sphere.
mevals : sequence of 1D arrays,
Eigen-values for each tensor.
angles : sequence of 2d tuples,
Sequence of principal directions for each tensor in polar angles
or cartesian unit coordinates.
fractions : sequence of floats,
Percentages of the fractions for each tensor.
Returns
-------
ODF : (N,) ndarray
Orientation distribution function.
Examples
--------
Simulate a MultiTensor ODF with two peaks and calculate its exact ODF.
>>> import numpy as np
>>> from dipy.sims.voxel import multi_tensor_odf, all_tensor_evecs
>>> from dipy.data import get_sphere
>>> sphere = get_sphere('symmetric724')
>>> vertices, faces = sphere.vertices, sphere.faces
>>> mevals = np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
>>> angles = [(0, 0), (90, 0)]
>>> odf = multi_tensor_odf(vertices, mevals, angles, [50, 50])
'''
mf = [f / 100. for f in fractions]
sticks = _check_directions(angles)
odf = np.zeros(len(odf_verts))
mevecs = []
for s in sticks:
mevecs += [all_tensor_evecs(s)]
for (j, f) in enumerate(mf):
odf += f * single_tensor_odf(odf_verts,
evals=mevals[j], evecs=mevecs[j])
return odf
def single_tensor_rtop(evals=None, tau=1.0 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
evals : 1D arrays,
Eigen-values for the tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
rtop : float,
Return to origin probability.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
if evals is None:
evals = diffusion_evals
rtop = 1.0 / np.sqrt((4 * np.pi * tau) ** 3 * np.prod(evals))
return rtop
def multi_tensor_rtop(mf, mevals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
mf : sequence of floats, bounded [0,1]
Percentages of the fractions for each tensor.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
rtop : float,
Return to origin probability.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
rtop = 0
if mevals is None:
mevals = [None, ] * len(mf)
for j, f in enumerate(mf):
rtop += f * single_tensor_rtop(mevals[j], tau=tau)
return rtop
def single_tensor_pdf(r, evals=None, evecs=None, tau=1 / (4 * np.pi ** 2)):
"""Simulated ODF with a single tensor.
Parameters
----------
r : (N,3) or (M,N,3) ndarray
Measurement positions in (x, y, z), either as a list or on a grid.
evals : (3,)
Eigenvalues of diffusion tensor. By default, use values typical for
prolate white matter.
evecs : (3, 3) ndarray
Eigenvectors of the tensor. You can also think of these as the
rotation matrix that determines the orientation of the diffusion
tensor.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
pdf : (N,) ndarray
The diffusion probability at ``r`` after time ``tau``.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
"""
if evals is None:
evals = diffusion_evals
if evecs is None:
evecs = np.eye(3)
out_shape = r.shape[:r.ndim - 1]
R = np.asarray(evecs)
D = dot(dot(R, np.diag(evals)), R.T)
Di = np.linalg.inv(D)
r = r.reshape(-1, 3)
P = np.zeros(len(r))
for (i, u) in enumerate(r):
P[i] = (-dot(dot(u.T, Di), u)) / (4 * tau)
pdf = (1 / np.sqrt((4 * np.pi * tau) ** 3 * np.prod(evals))) * np.exp(P)
return pdf.reshape(out_shape)
def multi_tensor_pdf(pdf_points, mevals, angles, fractions,
tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor ODF.
Parameters
----------
pdf_points : (N, 3) ndarray
Points to evaluate the PDF.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
angles : sequence,
Sequence of principal directions for each tensor in polar angles
or cartesian unit coordinates.
fractions : sequence of floats,
Percentages of the fractions for each tensor.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
pdf : (N,) ndarray,
Probability density function of the water displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and its Features in Diffusion MRI", PhD Thesis, 2012.
'''
mf = [f / 100. for f in fractions]
sticks = _check_directions(angles)
pdf = np.zeros(len(pdf_points))
mevecs = []
for s in sticks:
mevecs += [all_tensor_evecs(s)]
for j, f in enumerate(mf):
pdf += f * single_tensor_pdf(pdf_points,
evals=mevals[j], evecs=mevecs[j], tau=tau)
return pdf
def single_tensor_msd(evals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
evals : 1D arrays,
Eigen-values for the tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
msd : float,
Mean square displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
if evals is None:
evals = diffusion_evals
msd = 2 * tau * np.sum(evals)
return msd
def multi_tensor_msd(mf, mevals=None, tau=1 / (4 * np.pi ** 2)):
r'''Simulate a Multi-Tensor rtop.
Parameters
----------
mf : sequence of floats, bounded [0,1]
Percentages of the fractions for each tensor.
mevals : sequence of 1D arrays,
Eigen-values for each tensor. By default, values typical for prolate
white matter are used.
tau : float,
diffusion time. By default the value that makes q=sqrt(b).
Returns
-------
msd : float,
Mean square displacement.
References
----------
.. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
and Its Features in Diffusion MRI", PhD Thesis, 2012.
'''
msd = 0
if mevals is None:
mevals = [None, ] * len(mf)
for j, f in enumerate(mf):
msd += f * single_tensor_msd(mevals[j], tau=tau)
return msd
# Use standard naming convention, but keep old names
# for backward compatibility
SticksAndBall = sticks_and_ball
SingleTensor = single_tensor
MultiTensor = multi_tensor
|
oesteban/dipy
|
dipy/sims/voxel.py
|
Python
|
bsd-3-clause
| 26,641
|
[
"Gaussian"
] |
ff796a9ad41d97c7b5bb9b312f9a3404349011a5df9a39524c5015068dd84676
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import absolute_import
import six
from six.moves import range, cStringIO, StringIO
import numpy as np
from numpy.testing import (TestCase, dec,
assert_equal, assert_almost_equal,
assert_array_almost_equal,
)
import MDAnalysis
import MDAnalysis.lib.util as util
import MDAnalysis.tests.datafiles as datafiles
from MDAnalysisTests.coordinates.reference import RefAdKSmall
from MDAnalysisTests.plugins.knownfailure import knownfailure
from MDAnalysisTests import tempdir
import os
class TestIsstream(TestCase):
def test_hasmethod(self):
obj = "random string"
assert_equal(util.hasmethod(obj, "rfind"), True)
assert_equal(util.hasmethod(obj, "bogusXXX"), False)
def test_string(self):
obj = datafiles.PSF # filename
assert_equal(util.isstream(obj), False)
def test_list(self):
obj = [1, 2, 3]
assert_equal(util.isstream(obj), False)
def test_iterator(self):
obj = (i for i in range(3))
assert_equal(util.isstream(obj), False)
def test_file(self):
with open(datafiles.PSF) as obj:
assert_equal(util.isstream(obj), True)
def test_cStringIO_read(self):
with open(datafiles.PSF, "r") as f:
obj = cStringIO(f.read())
assert_equal(util.isstream(obj), True)
obj.close()
def test_cStringIO_write(self):
obj = cStringIO()
assert_equal(util.isstream(obj), True)
obj.close()
def test_StringIO_read(self):
with open(datafiles.PSF, "r") as f:
obj = StringIO(f)
assert_equal(util.isstream(obj), True)
obj.close()
def test_StringIO_write(self):
obj = StringIO()
assert_equal(util.isstream(obj), True)
obj.close()
class TestNamedStream(TestCase):
def setUp(self):
self.filename = datafiles.PSF
self.numlines = 12326 # len(open(self.filename).readlines())
self.text = [
"The Jabberwock, with eyes of flame,\n",
"Came whiffling through the tulgey wood,\n",
"And burbled as it came!"]
self.textname = "jabberwock.txt"
self.numtextlines = len(self.text)
def test_closing(self):
obj = cStringIO("".join(self.text))
ns = util.NamedStream(obj, self.textname, close=True)
assert_equal(ns.closed, False)
ns.close()
assert_equal(ns.closed, True)
def test_closing_force(self):
obj = cStringIO("".join(self.text))
ns = util.NamedStream(obj, self.textname)
assert_equal(ns.closed, False)
ns.close()
assert_equal(ns.closed, False)
ns.close(force=True)
assert_equal(ns.closed, True)
def test_cStringIO_read(self):
obj = cStringIO("".join(self.text))
ns = util.NamedStream(obj, self.textname)
assert_equal(ns.name, self.textname)
assert_equal(str(ns), self.textname)
assert_equal(len(ns.readlines()), self.numtextlines)
ns.reset()
assert_equal(len(ns.readlines()), self.numtextlines)
ns.close(force=True)
def test_File_read(self):
obj = open(self.filename, 'r')
ns = util.NamedStream(obj, self.filename)
assert_equal(ns.name, self.filename)
assert_equal(str(ns), self.filename)
assert_equal(len(ns.readlines()), self.numlines)
ns.reset()
assert_equal(len(ns.readlines()), self.numlines)
ns.close(force=True)
def test_cStringIO_write(self):
obj = cStringIO()
ns = util.NamedStream(obj, self.textname)
ns.writelines(self.text)
assert_equal(ns.name, self.textname)
assert_equal(str(ns), self.textname)
ns.reset()
assert_equal(len(ns.readlines()), len(self.text))
ns.reset()
assert_equal(ns.read(20), "".join(self.text)[:20])
ns.close(force=True)
def test_File_write(self):
with tempdir.in_tempdir():
outfile = "lookingglas.txt"
try:
obj = open(outfile, "w")
ns = util.NamedStream(obj, outfile, close=True)
ns.writelines(self.text)
ns.close()
text = open(outfile).readlines()
assert_equal(ns.name, outfile)
assert_equal(str(ns), outfile)
assert_equal(len(text), len(self.text))
assert_equal("".join(text), "".join(self.text))
finally:
ns.close()
obj.close()
class TestNamedStream_filename_behavior(object):
textname = "~/stories/jabberwock.txt" # with tilde ~ to test regular expanduser()
# note: no setUp() because classes with generators would run it
# *for each generated test* and we need it for the generator method
def create_NamedStream(self, name=None):
if name is None:
name = self.textname
obj = cStringIO()
return util.NamedStream(obj, name)
def test_ospath_funcs(self):
ns = self.create_NamedStream()
# - "expandvars" gave Segmentation fault (OS X 10.6, Python 2.7.11 -- orbeckst)
# - "expanduser" will either return a string if it carried out interpolation
# or "will do nothing" and return the NamedStream (see extra test below).
# On systems without a user or HOME, it will also do nothing and the test
# below will fail.
funcs = ("abspath", "basename", "dirname", "expanduser",
"normpath", "relpath", "split", "splitext")
def _test_func(funcname, fn=self.textname, ns=ns):
func = getattr(os.path, funcname)
reference = func(fn)
value = func(ns)
assert_equal(value, reference,
err_msg=("os.path.{0}() does not work with "
"NamedStream").format(funcname))
# join not included because of different call signature
# but added first argument for the sake of it showing up in the verbose
# nose output
def _test_join(funcname="join", fn=self.textname, ns=ns, path="/tmp/MDAnalysisTests"):
reference = os.path.join(path, fn)
value = os.path.join(path, ns)
assert_equal(value, reference,
err_msg=("os.path.{0}() does not work with "
"NamedStream").format(funcname))
for func in funcs:
yield _test_func, func
yield _test_join, "join"
# Segmentation fault when run as a test on Mac OS X 10.6, Py 2.7.11 [orbeckst]
@dec.skipif(True)
def test_expanduser_noexpansion_returns_NamedStream(self):
ns = self.create_NamedStream("de/zipferlack.txt") # no tilde ~ in name!
reference = ns
value = os.path.expanduser(ns)
assert_equal(value, reference,
err_msg=("os.path.expanduser() without '~' did not "
"return NamedStream --- weird!!"))
# expandvars(NamedStream) does not work interactively, so it is a knownfailure
# Segmentation fault when run as a test on Mac OS X 10.6, Py 2.7.11 [orbeckst]
@dec.skipif(True)
@dec.skipif("HOME" not in os.environ)
@knownfailure
def test_expandvars(self):
name = "${HOME}/stories/jabberwock.txt"
ns = self.create_NamedStream(name)
reference = os.path.expandvars(name)
value = os.path.expandvars(ns)
assert_equal(value, reference,
err_msg="os.path.expandvars() did not expand HOME")
# Segmentation fault when run as a test on Mac OS X 10.6, Py 2.7.11 [orbeckst]
@dec.skipif(True)
def test_expandvars_noexpansion_returns_NamedStream(self):
ns = self.create_NamedStream() # no $VAR constructs
reference = ns
value = os.path.expandvars(ns)
assert_equal(value, reference,
err_msg=("os.path.expandvars() without '$VARS' did not "
"return NamedStream --- weird!!"))
def test_add(self):
ns = self.create_NamedStream()
try:
assert_equal(ns + "foo", self.textname + "foo")
except TypeError:
raise AssertionError("NamedStream does not support "
"string concatenation, NamedStream + str")
def test_radd(self):
ns = self.create_NamedStream()
try:
assert_equal("foo" + ns, "foo" + self.textname)
except TypeError:
raise AssertionError("NamedStream does not support right "
"string concatenation, str + NamedStream")
class _StreamData(object):
"""Data for StreamIO functions."""
filenames = {
'PSF': datafiles.PSF,
'CRD': datafiles.CRD,
'PDB': datafiles.PDB_small,
'PQR': datafiles.PQR,
'GRO': datafiles.GRO_velocity,
'MOL2': datafiles.mol2_molecules,
'PDBQT': datafiles.PDBQT_input,
}
def __init__(self):
self.buffers = {name: "".join(open(fn).readlines())
for name, fn in six.iteritems(self.filenames)}
self.filenames['XYZ_PSF'] = u"bogus/path/mini.psf"
self.buffers['XYZ_PSF'] = u"""\
PSF CMAP
1 !NTITLE
Mini PSF for in memory XYZ
8 !NATOM
1 A 380 THR N NH1 -0.470000 14.0070 0
2 A 380 THR HN H 0.310000 1.0080 0
3 A 380 THR CA CT1 0.070000 12.0110 0
4 A 380 THR CB CT1 0.140000 12.0110 0
5 A 380 THR OG1 OH1 -0.660000 15.9990 0
6 A 380 THR CG2 CT3 -0.270000 12.0110 0
7 A 380 THR C C 0.510000 12.0110 0
8 A 380 THR O O -0.510000 15.9990 0
"""
self.filenames['XYZ'] = "bogus/path/mini.xyz"
self.buffers['XYZ'] = """\
8
frame 1
N 0.93100 17.31800 16.42300
HN 1.86100 17.06500 16.17100
CA 0.48600 18.66500 16.14300
CB 1.65900 19.66600 15.88700
OG1 2.53100 19.43000 14.75700
CG2 2.56700 19.70400 17.04500
C -0.38500 18.72400 14.93500
O -0.22300 17.81000 14.13400
8
frame 2
N 1.00200 17.11400 16.52100
HN 1.85100 16.93900 16.02800
CA 0.45600 18.48700 16.26500
CB 1.49700 19.58900 16.08900
OG1 2.38300 19.42200 14.96500
CG2 2.47300 19.54600 17.26500
C -0.31500 18.63800 14.99300
O -0.23100 17.83800 14.10800
8
frame 3
N 0.94000 16.97600 16.44500
HN 1.85800 16.71700 16.15500
CA 0.53300 18.34800 16.17400
CB 1.79500 19.24700 15.93000
OG1 2.61400 18.84000 14.91900
CG2 2.54700 19.25800 17.26500
C -0.27300 18.58100 14.94400
O -0.23800 17.82300 13.97300
"""
def as_StringIO(self, name):
return StringIO(self.buffers[name])
def as_cStringIO(self, name):
return cStringIO(self.buffers[name])
def as_NamedStream(self, name):
return util.NamedStream(self.as_cStringIO(name), self.filenames[name])
streamData = _StreamData()
del _StreamData
# possibly add tests to individual readers instead?
class TestStreamIO(TestCase, RefAdKSmall):
def test_PrimitivePDBReader(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('PDB'))
assert_equal(u.atoms.n_atoms, self.ref_n_atoms)
def test_PDBReader(self):
try:
u = MDAnalysis.Universe(streamData.as_NamedStream('PDB'))
except Exception as err:
raise AssertionError("StreamIO not supported:\n>>>>> {0}".format(err))
assert_equal(u.atoms.n_atoms, self.ref_n_atoms)
def test_CRDReader(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('CRD'))
assert_equal(u.atoms.n_atoms, self.ref_n_atoms)
def test_PSFParser(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('PSF'))
assert_equal(u.atoms.n_atoms, self.ref_n_atoms)
def test_PSF_CRD(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('PSF'),
streamData.as_NamedStream('CRD'))
assert_equal(u.atoms.n_atoms, self.ref_n_atoms)
def test_PQRReader(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('PQR'))
assert_equal(u.atoms.n_atoms, self.ref_n_atoms)
assert_almost_equal(u.atoms.total_charge(), self.ref_charmm_totalcharge, 3,
"Total charge (in CHARMM) does not match expected value.")
assert_almost_equal(u.atoms.H.charges, self.ref_charmm_Hcharges, 3,
"Charges for H atoms do not match.")
def test_PDBQTReader(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('PDBQT'))
sel = u.select_atoms('backbone')
assert_equal(sel.n_atoms, 796)
sel = u.select_atoms('segid A')
assert_equal(sel.n_atoms, 909, "failed to select segment A")
sel = u.select_atoms('segid B')
assert_equal(sel.n_atoms, 896, "failed to select segment B")
def test_GROReader(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('GRO'))
assert_equal(u.atoms.n_atoms, 6)
assert_almost_equal(u.atoms[3].position,
10. * np.array([1.275, 0.053, 0.622]), 3, # manually convert nm -> A
err_msg="wrong coordinates for water 2 OW")
assert_almost_equal(u.atoms[3].velocity,
10. * np.array([0.2519, 0.3140, -0.1734]), 3, # manually convert nm/ps -> A/ps
err_msg="wrong velocity for water 2 OW")
def test_MOL2Reader(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('MOL2'))
assert_equal(len(u.atoms), 49)
assert_equal(u.trajectory.n_frames, 200)
u.trajectory[199]
assert_array_almost_equal(u.atoms.positions[0], [1.7240, 11.2730, 14.1200])
def test_XYZReader(self):
u = MDAnalysis.Universe(streamData.as_NamedStream('XYZ_PSF'),
streamData.as_NamedStream('XYZ'))
assert_equal(len(u.atoms), 8)
assert_equal(u.trajectory.n_frames, 3)
assert_equal(u.trajectory.frame, 0) # weird, something odd with XYZ reader
u.trajectory.next() # (should really only need one next()... )
assert_equal(u.trajectory.frame, 1) # !!!! ???
u.trajectory.next() # frame 2
assert_equal(u.trajectory.frame, 2)
assert_almost_equal(u.atoms[2].position, np.array([0.45600, 18.48700, 16.26500]), 3,
err_msg="wrong coordinates for atom CA at frame 2")
|
kain88-de/mdanalysis
|
testsuite/MDAnalysisTests/test_streamio.py
|
Python
|
gpl-2.0
| 16,041
|
[
"CHARMM",
"MDAnalysis"
] |
49957ce8080bd032a4f4a08036a56c47c3e5e5cdde7cf0f0ff113028fa1f7878
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
import El
import time
m = 2000
n = 4000
lambda1 = 3
lambda2 = 4
display = True
worldRank = El.mpi.WorldRank()
# Make a sparse matrix with the last column dense
def Rectang(height,width):
A = El.DistSparseMatrix()
A.Resize(height,width)
firstLocalRow = A.FirstLocalRow()
localHeight = A.LocalHeight()
A.Reserve(5*localHeight)
for sLoc in xrange(localHeight):
s = firstLocalRow + sLoc
if s < width:
A.QueueLocalUpdate( sLoc, s, 11 )
if s >= 1 and s-1 < width:
A.QueueLocalUpdate( sLoc, s-1, -1 )
if s+1 < width:
A.QueueLocalUpdate( sLoc, s+1, 2 )
if s >= height and s-height < width:
A.QueueLocalUpdate( sLoc, s-height, -3 )
if s+height < width:
A.QueueLocalUpdate( sLoc, s+height, 4 )
# The dense last column
A.QueueLocalUpdate( sLoc, width-1, -5/height );
A.MakeConsistent()
return A
A = Rectang(m,n)
b = El.DistMultiVec()
El.Gaussian( b, m, 1 )
if display:
El.Display( A, "A" )
El.Display( b, "b" )
ctrl = El.QPAffineCtrl_d()
ctrl.mehrotraCtrl.progress = True
if worldRank == 0:
print "lambda1 =", lambda1, "lambda2 =", lambda2
startEN = time.clock()
x = El.EN( A, b, lambda1, lambda2, ctrl )
endEN = time.clock()
if worldRank == 0:
print "EN time: ", endEN-startEN
if display:
El.Display( x, "x" )
xOneNorm = El.EntrywiseNorm( x, 1 )
xTwoNorm = El.Nrm2( x )
e = El.DistMultiVec()
El.Copy( b, e )
El.SparseMultiply( El.NORMAL, -1., A, x, 1., e )
if display:
El.Display( e, "e" )
eTwoNorm = El.Nrm2( e )
if worldRank == 0:
print "|| x ||_1 =", xOneNorm
print "|| x ||_2 =", xTwoNorm
print "|| A x - b ||_2 =", eTwoNorm
# Require the user to press a button before the figures are closed
commSize = El.mpi.Size( El.mpi.COMM_WORLD() )
El.Finalize()
if commSize == 1:
raw_input('Press Enter to exit')
|
sg0/Elemental
|
examples/interface/EN.py
|
Python
|
bsd-3-clause
| 2,102
|
[
"Gaussian"
] |
95e7dd2bfe1fcd39b1af98d03b7f89a1fadbec7112a4bf93cad791b2556f26c7
|
"""Data structure for genomic intervals and their annotation."""
import pandas as pd
import numpy as np
from natsort import natsorted
import pyranges as pr
from pyranges.tostring2 import tostring
from pyranges.methods.intersection import _intersection, _overlap
from pyranges.multithreaded import pyrange_apply, pyrange_apply_single, pyrange_apply_chunks, _extend, _tes, _tss
__all__ = ["PyRanges"]
def fill_kwargs(kwargs):
"""Give the kwargs dict default options."""
defaults = {
"strandedness": None,
"overlap": True,
"how": None,
"invert": None,
"new_pos": None,
"suffixes": ["_a", "_b"],
"suffix": "_b",
"sparse": {
"self": False,
"other": False
}
}
defaults.update(kwargs)
return defaults
class PyRanges():
"""Two-dimensional representation of genomic intervals and their annotations.
A PyRanges object must have the columns Chromosome, Start and End. These
describe the genomic position and function as implicit row labels. A Strand
column is optional and adds strand information to the intervals. Any other
columns are allowed and are considered metadata.
Operations between PyRanges align intervals based on their position.
If a PyRanges is built using the arguments chromosomes, starts, ends and
optionally strands, all non-scalars must be of the same length.
Parameters
----------
df : pandas.DataFrame or dict of pandas.DataFrame, default None
The data to be stored in the PyRanges.
chromosomes : array-like or scalar value, default None
The chromosome(s) in the PyRanges.
starts : array-like, default None
The start postions in the PyRanges.
ends : array-like, default None
The end postions in the PyRanges.
strands : array-like or scalar value, default None
The strands in the PyRanges.
int64 : bool, default False
Use np.int64 to represent starts and ends
copy_df : bool, default True
Copy input pandas.DataFrame
See Also
--------
pyranges.read_bed: read bed-file into PyRanges
pyranges.read_bam: read bam-file into PyRanges
pyranges.read_gff: read gff-file into PyRanges
pyranges.read_gtf: read gtf-file into PyRanges
pyranges.from_dict: create PyRanges from dict of columns
pyranges.from_string: create PyRanges from multiline string
Notes
-----
A PyRanges object is represented internally as a dictionary efficiency. The keys are
chromosomes or chromosome/strand tuples and the values are pandas DataFrames.
Examples
--------
>>> pr.PyRanges()
Empty PyRanges
>>> pr.PyRanges(chromosomes="chr1", starts=(1, 5), ends=[3, 149],
... strands=("+", "-"), int64=True)
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int64) | (int64) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 3 | + |
| chr1 | 5 | 149 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> df = pd.DataFrame({"Chromosome": ["chr1", "chr2"], "Start": [100, 200],
... "End": [150, 201]})
>>> df
Chromosome Start End
0 chr1 100 150
1 chr2 200 201
>>> pr.PyRanges(df)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 100 | 150 |
| chr2 | 200 | 201 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 2 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr = pr.from_dict({"Chromosome": [1, 1], "Strand": ["+", "-"], "Start": [1, 4], "End": [2, 27],
... "TP": [0, 1], "FP": [12, 11], "TN": [10, 9], "FN": [2, 3]})
>>> gr
+--------------+--------------+-----------+-----------+-----------+-----------+-----------+-----------+
| Chromosome | Strand | Start | End | TP | FP | TN | FN |
| (category) | (category) | (int32) | (int32) | (int64) | (int64) | (int64) | (int64) |
|--------------+--------------+-----------+-----------+-----------+-----------+-----------+-----------|
| 1 | + | 1 | 2 | 0 | 12 | 10 | 2 |
| 1 | - | 4 | 27 | 1 | 11 | 9 | 3 |
+--------------+--------------+-----------+-----------+-----------+-----------+-----------+-----------+
Stranded PyRanges object has 2 rows and 8 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
dfs = None
"""Dict mapping chromosomes or chromosome/strand pairs to pandas DataFrames."""
features = None
"""Namespace for genomic-features methods.
See Also
--------
pyranges.genomicfeatures : namespace for feature-functionality
pyranges.genomicfeatures.GenomicFeaturesMethods : namespace for feature-functionality
"""
stats = None
"""Namespace for statistcal methods.
See Also
--------
pyranges.statistics : namespace for statistics
pyranges.stats.StatisticsMethods : namespace for statistics
"""
def __init__(self,
df=None,
chromosomes=None,
starts=None,
ends=None,
strands=None,
int64=False,
copy_df=True):
from pyranges.methods.init import _init
if df is None and chromosomes is None:
df = pd.DataFrame(columns="Chromosome Start End".split())
_init(self, df, chromosomes, starts, ends, strands, int64, copy_df)
def __array_ufunc__(self, *args, **kwargs):
"""Apply unary numpy-function.
Apply function to all columns which are not index, i.e. Chromosome,
Start, End nor Strand.
Notes
-----
Function must produce a vector of equal length.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 2, 3], "Start": [1, 2, 3],
... "End": [2, 3, 4], "Score": [9, 16, 25], "Score2": [121, 144, 169],
... "Name": ["n1", "n2", "n3"]})
>>> gr
+--------------+-----------+-----------+-----------+-----------+------------+
| Chromosome | Start | End | Score | Score2 | Name |
| (category) | (int32) | (int32) | (int64) | (int64) | (object) |
|--------------+-----------+-----------+-----------+-----------+------------|
| 1 | 1 | 2 | 9 | 121 | n1 |
| 2 | 2 | 3 | 16 | 144 | n2 |
| 3 | 3 | 4 | 25 | 169 | n3 |
+--------------+-----------+-----------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 6 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> np.sqrt(gr)
+--------------+-----------+-----------+-------------+-------------+------------+
| Chromosome | Start | End | Score | Score2 | Name |
| (category) | (int32) | (int32) | (float64) | (float64) | (object) |
|--------------+-----------+-----------+-------------+-------------+------------|
| 1 | 1 | 2 | 3 | 11 | n1 |
| 2 | 2 | 3 | 4 | 12 | n2 |
| 3 | 3 | 4 | 5 | 13 | n3 |
+--------------+-----------+-----------+-------------+-------------+------------+
Unstranded PyRanges object has 3 rows and 6 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
func, call, gr = args
columns = list(gr.columns)
non_index = [c for c in columns if c not in ["Chromosome", "Start", "End", "Strand"]]
for chromosome, df in gr:
subset = df.head(1)[non_index].select_dtypes(include=np.number).columns
_v = getattr(func, call)(df[subset], **kwargs)
# print(_v)
# print(df[_c])
df[subset] = _v
return gr
# self.apply()
def __getattr__(self, name):
"""Return column.
Parameters
----------
name : str
Column to return
Returns
-------
pandas.Series
Example
-------
>>> gr = pr.from_dict({"Chromosome": [1, 1, 1], "Start": [0, 100, 250], "End": [10, 125, 251]})
>>> gr.Start
0 0
1 100
2 250
Name: Start, dtype: int32
"""
from pyranges.methods.attr import _getattr
return _getattr(self, name)
def __setattr__(self, column_name, column):
"""Insert or update column.
Parameters
----------
column_name : str
Name of column to update or insert.
column : list, np.array or pd.Series
Data to insert.
Example
-------
>>> gr = pr.from_dict({"Chromosome": [1, 1, 1], "Start": [0, 100, 250], "End": [10, 125, 251]})
>>> gr.Start = np.array([1, 1, 2])
>>> gr
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int64) | (int32) |
|--------------+-----------+-----------|
| 1 | 1 | 10 |
| 1 | 1 | 125 |
| 1 | 2 | 251 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
from pyranges.methods.attr import _setattr
if column_name == "columns":
dfs = {}
for k, df in self:
df.columns = column
dfs[k] = df
self.__dict__["dfs"] = dfs
else:
_setattr(self, column_name, column)
if column_name in ["Start", "End"]:
if self.dtypes["Start"] != self.dtypes["End"]:
print("Warning! Start and End columns now have different dtypes: {} and {}".format(
self.dtypes["Start"], self.dtypes["End"]))
def __getitem__(self, val):
"""Fetch columns or subset on position.
If a list is provided, the column(s) in the list is returned. This subsets on columns.
If a numpy array is provided, it must be of type bool and the same length as the PyRanges.
Otherwise, a subset of the rows is returned with the location info provided.
Parameters
----------
val : bool array/Series, tuple, list, str or slice
Data to fetch.
Examples
--------
>>> gr = pr.data.ensembl_gtf()
>>> gr.columns
Index(['Chromosome', 'Source', 'Feature', 'Start', 'End', 'Score', 'Strand',
'Frame', 'gene_biotype', 'gene_id', 'gene_name', 'gene_source',
'gene_version', 'tag', 'transcript_biotype', 'transcript_id',
'transcript_name', 'transcript_source', 'transcript_support_level',
'transcript_version', 'exon_id', 'exon_number', 'exon_version',
'(assigned', 'previous', 'protein_id', 'protein_version', 'ccds_id'],
dtype='object')
>>> gr = gr[["Source", "Feature", "gene_id"]]
>>> gr
+--------------+------------+--------------+-----------+-----------+--------------+-----------------+
| Chromosome | Source | Feature | Start | End | Strand | gene_id |
| (category) | (object) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+------------+--------------+-----------+-----------+--------------+-----------------|
| 1 | havana | gene | 11868 | 14409 | + | ENSG00000223972 |
| 1 | havana | transcript | 11868 | 14409 | + | ENSG00000223972 |
| 1 | havana | exon | 11868 | 12227 | + | ENSG00000223972 |
| 1 | havana | exon | 12612 | 12721 | + | ENSG00000223972 |
| ... | ... | ... | ... | ... | ... | ... |
| 1 | havana | gene | 1173055 | 1179555 | - | ENSG00000205231 |
| 1 | havana | transcript | 1173055 | 1179555 | - | ENSG00000205231 |
| 1 | havana | exon | 1179364 | 1179555 | - | ENSG00000205231 |
| 1 | havana | exon | 1173055 | 1176396 | - | ENSG00000205231 |
+--------------+------------+--------------+-----------+-----------+--------------+-----------------+
Stranded PyRanges object has 2,446 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
Create boolean Series and use it to subset:
>>> s = (gr.Feature == "gene") | (gr.gene_id == "ENSG00000223972")
>>> gr[s]
+--------------+----------------+--------------+-----------+-----------+--------------+-----------------+
| Chromosome | Source | Feature | Start | End | Strand | gene_id |
| (category) | (object) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+----------------+--------------+-----------+-----------+--------------+-----------------|
| 1 | havana | gene | 11868 | 14409 | + | ENSG00000223972 |
| 1 | havana | transcript | 11868 | 14409 | + | ENSG00000223972 |
| 1 | havana | exon | 11868 | 12227 | + | ENSG00000223972 |
| 1 | havana | exon | 12612 | 12721 | + | ENSG00000223972 |
| ... | ... | ... | ... | ... | ... | ... |
| 1 | havana | gene | 1062207 | 1063288 | - | ENSG00000273443 |
| 1 | ensembl_havana | gene | 1070966 | 1074306 | - | ENSG00000237330 |
| 1 | ensembl_havana | gene | 1081817 | 1116361 | - | ENSG00000131591 |
| 1 | havana | gene | 1173055 | 1179555 | - | ENSG00000205231 |
+--------------+----------------+--------------+-----------+-----------+--------------+-----------------+
Stranded PyRanges object has 95 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> cs = pr.data.chipseq()
>>> cs[10000:100000]
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr2 | 33241 | 33266 | U0 | 0 | + |
| chr2 | 13611 | 13636 | U0 | 0 | - |
| chr2 | 32620 | 32645 | U0 | 0 | - |
| chr3 | 87179 | 87204 | U0 | 0 | + |
| chr4 | 45413 | 45438 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 5 rows and 6 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> cs["chr1", "-"]
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 100079649 | 100079674 | U0 | 0 | - |
| chr1 | 223587418 | 223587443 | U0 | 0 | - |
| chr1 | 202450161 | 202450186 | U0 | 0 | - |
| chr1 | 156338310 | 156338335 | U0 | 0 | - |
| ... | ... | ... | ... | ... | ... |
| chr1 | 203557775 | 203557800 | U0 | 0 | - |
| chr1 | 28114107 | 28114132 | U0 | 0 | - |
| chr1 | 21622765 | 21622790 | U0 | 0 | - |
| chr1 | 80668132 | 80668157 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 437 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> cs["chr5", "-", 90000:]
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr5 | 399682 | 399707 | U0 | 0 | - |
| chr5 | 1847502 | 1847527 | U0 | 0 | - |
| chr5 | 5247533 | 5247558 | U0 | 0 | - |
| chr5 | 5300394 | 5300419 | U0 | 0 | - |
| ... | ... | ... | ... | ... | ... |
| chr5 | 178786234 | 178786259 | U0 | 0 | - |
| chr5 | 179268931 | 179268956 | U0 | 0 | - |
| chr5 | 179289594 | 179289619 | U0 | 0 | - |
| chr5 | 180513795 | 180513820 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 285 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> cs["chrM"]
Empty PyRanges
"""
from pyranges.methods.getitem import _getitem
return _getitem(self, val)
def __iter__(self):
"""Iterate over the keys and values.
See Also
--------
pyranges.iter : iterate over multiple PyRanges
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 1, 1], "Start": [0, 100, 250],
... "End": [10, 125, 251], "Strand": ["+", "+", "-"]})
>>> for k, v in gr:
... print(k)
... print(v)
('1', '+')
Chromosome Start End Strand
0 1 0 10 +
1 1 100 125 +
('1', '-')
Chromosome Start End Strand
2 1 250 251 -
"""
return iter(self.items())
def __len__(self):
"""Return the number of intervals in the PyRanges."""
return sum([len(d) for d in self.values()])
def __str__(self):
"""Return string representation."""
return tostring(self)
def __repr__(self):
"""Return REPL representation."""
return str(self)
def _repr_html_(self):
"""Return REPL HTML representation for Jupyter Noteboooks."""
return self.df._repr_html_()
def apply(self, f, strand=None, as_pyranges=True, nb_cpu=1, **kwargs):
"""Apply a function to the PyRanges.
Parameters
----------
f : function
Function to apply on each DataFrame in a PyRanges
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
as_pyranges : bool, default True
Whether to return as a PyRanges or dict. If `f` does not return a DataFrame valid for
PyRanges, `as_pyranges` must be False.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
**kwargs
Additional keyword arguments to pass as keyword arguments to `f`
Returns
-------
PyRanges or dict
Result of applying f to each DataFrame in the PyRanges
See also
--------
pyranges.PyRanges.apply_pair: apply a function to a pair of PyRanges
pyranges.PyRanges.apply_chunks: apply a row-based function to a PyRanges in parallel
Note
----
This is the function used internally to carry out almost all unary PyRanges methods.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 1, 2, 2], "Strand": ["+", "+", "-", "+"],
... "Start": [1, 4, 2, 9], "End": [2, 27, 13, 10]})
>>> gr
+--------------+--------------+-----------+-----------+
| Chromosome | Strand | Start | End |
| (category) | (category) | (int32) | (int32) |
|--------------+--------------+-----------+-----------|
| 1 | + | 1 | 2 |
| 1 | + | 4 | 27 |
| 2 | + | 9 | 10 |
| 2 | - | 2 | 13 |
+--------------+--------------+-----------+-----------+
Stranded PyRanges object has 4 rows and 4 columns from 2 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.apply(lambda df: len(df), as_pyranges=False)
{('1', '+'): 2, ('2', '+'): 1, ('2', '-'): 1}
>>> gr.apply(lambda df: len(df), as_pyranges=False, strand=False)
{'1': 2, '2': 2}
>>> def add_to_ends(df, **kwargs):
... df.loc[:, "End"] = kwargs["slack"] + df.End
... return df
>>> gr.apply(add_to_ends, slack=500)
+--------------+--------------+-----------+-----------+
| Chromosome | Strand | Start | End |
| (category) | (category) | (int32) | (int32) |
|--------------+--------------+-----------+-----------|
| 1 | + | 1 | 502 |
| 1 | + | 4 | 527 |
| 2 | + | 9 | 510 |
| 2 | - | 2 | 513 |
+--------------+--------------+-----------+-----------+
Stranded PyRanges object has 4 rows and 4 columns from 2 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
if strand is None:
strand = self.stranded
kwargs.update({"strand": strand})
kwargs.update(kwargs.get("kwargs", {}))
kwargs = fill_kwargs(kwargs)
result = pyrange_apply_single(f, self, **kwargs)
if not as_pyranges:
return result
else:
return PyRanges(result)
def apply_chunks(self, f, as_pyranges=False, nb_cpu=1, **kwargs):
"""Apply a row-based function to arbitrary partitions of the PyRanges.
apply_chunks speeds up the application of functions where the result is not affected by
applying the function to ordered, non-overlapping splits of the data.
Parameters
----------
f : function
Row-based or associative function to apply on the partitions.
as_pyranges : bool, default False
Whether to return as a PyRanges or dict.
nb_cpu: int, default 1
How many cpus to use. The data is split into nb_cpu partitions.
**kwargs
Additional keyword arguments to pass as keyword arguments to `f`
Returns
-------
dict of lists
Result of applying f to each partition of the DataFrames in the PyRanges.
See also
--------
pyranges.PyRanges.apply_pair: apply a function to a pair of PyRanges
pyranges.PyRanges.apply_chunks: apply a row-based function to a PyRanges in parallel
Note
----
apply_chunks will only lead to speedups on large datasets or slow-running functions. Using
it with nb_cpu=1 is pointless; use apply instead.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 1, 1], "Start": [2, 3, 5], "End": [9, 4, 6]})
>>> gr
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| 1 | 2 | 9 |
| 1 | 3 | 4 |
| 1 | 5 | 6 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.apply_chunks(
... lambda df, **kwargs: list(df.End + kwargs["add"]), nb_cpu=1, add=1000)
{'1': [[1009, 1004, 1006]]}
"""
kwargs.update(kwargs.get("kwargs", {}))
kwargs = fill_kwargs(kwargs)
result = pyrange_apply_chunks(f, self, as_pyranges, **kwargs)
return result
def apply_pair(self,
other,
f,
strandedness=None,
as_pyranges=True,
**kwargs):
"""Apply a function to a pair of PyRanges.
The function is applied to each chromosome or chromosome/strand pair found in at least one
of the PyRanges.
Parameters
----------
f : function
Row-based or associative function to apply on the DataFrames.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
as_pyranges : bool, default False
Whether to return as a PyRanges or dict. If `f` does not return a DataFrame valid for
PyRanges, `as_pyranges` must be False.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
**kwargs
Additional keyword arguments to pass as keyword arguments to `f`
Returns
-------
dict of lists
Result of applying f to each partition of the DataFrames in the PyRanges.
See also
--------
pyranges.PyRanges.apply_pair: apply a function to a pair of PyRanges
pyranges.PyRanges.apply_chunks: apply a row-based function to a PyRanges in parallel
pyranges.iter: iterate over two or more PyRanges
Note
----
This is the function used internally to carry out almost all comparison functions in
PyRanges.
Examples
--------
>>> gr = pr.data.chipseq()
>>> gr2 = pr.data.chipseq_background()
>>> gr.apply_pair(gr2, pr.methods.intersection._intersection) # same as gr.intersect(gr2)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 226987603 | 226987617 | U0 | 0 | + |
| chr8 | 38747236 | 38747251 | U0 | 0 | - |
| chr15 | 26105515 | 26105518 | U0 | 0 | + |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1 = pr.data.f1()
>>> f1
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 3 | 6 | interval1 | 0 | + |
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f2 = pr.data.f2()
>>> f2
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 1 | 2 | a | 0 | + |
| chr1 | 6 | 7 | b | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1.apply_pair(f2, lambda df, df2: (len(df), len(df2)), as_pyranges=False)
{('chr1', '+'): (2, 2), ('chr1', '-'): (1, 2)}
"""
kwargs.update({"strandedness": strandedness})
kwargs.update(kwargs.get("kwargs", {}))
kwargs = fill_kwargs(kwargs)
result = pyrange_apply(f, self, other, **kwargs)
if not as_pyranges:
return result
else:
return PyRanges(result)
def as_df(self):
"""Return PyRanges as DataFrame.
Returns
-------
DataFrame
A DataFrame natural sorted on Chromosome and Strand. The ordering of rows within
chromosomes and strands is preserved.
See also
--------
PyRanges.df : Return PyRanges as DataFrame.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 1, 2, 2], "Start": [1, 2, 3, 9],
... "End": [3, 3, 10, 12], "Gene": ["A", "B", "C", "D"]})
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Gene |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| 1 | 1 | 3 | A |
| 1 | 2 | 3 | B |
| 2 | 3 | 10 | C |
| 2 | 9 | 12 | D |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 4 rows and 4 columns from 2 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.as_df()
Chromosome Start End Gene
0 1 1 3 A
1 1 2 3 B
2 2 3 10 C
3 2 9 12 D
"""
if len(self) == 0:
return pd.DataFrame()
elif len(self) == 1:
return self.values()[0]
else:
return pd.concat(self.values()).reset_index(drop=True)
def assign(self, col, f, strand=None, nb_cpu=1, **kwargs):
"""Add or replace a column.
Does not change the original PyRanges.
Parameters
----------
col : str
Name of column.
f : function
Function to create new column.
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
**kwargs
Additional keyword arguments to pass as keyword arguments to `f`
Returns
-------
PyRanges
A copy of the PyRanges with the column inserted.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 1], "Start": [1, 2], "End": [3, 5],
... "Name": ["a", "b"]})
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Name |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| 1 | 1 | 3 | a |
| 1 | 2 | 5 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.assign("Blabla", lambda df: df.Chromosome.astype(str) + "_yadayada")
+--------------+-----------+-----------+------------+------------+
| Chromosome | Start | End | Name | Blabla |
| (category) | (int32) | (int32) | (object) | (object) |
|--------------+-----------+-----------+------------+------------|
| 1 | 1 | 3 | a | 1_yadayada |
| 1 | 2 | 5 | b | 1_yadayada |
+--------------+-----------+-----------+------------+------------+
Unstranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
Note that assigning to an existing name replaces the column:
>>> gr.assign("Name",
... lambda df, **kwargs: df.Start.astype(str) + kwargs["sep"] +
... df.Name.str.capitalize(), sep="_")
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Name |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| 1 | 1 | 3 | 1_A |
| 1 | 2 | 5 | 2_B |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
self = self.copy()
if strand is None:
strand = self.stranded
kwargs["strand"] = strand
kwargs = fill_kwargs(kwargs)
result = pyrange_apply_single(f, self, **kwargs)
first_result = next(iter(result.values()))
assert isinstance(
first_result, pd.Series
), "result of assign function must be Series, but is {}".format(
type(first_result))
# do a deepcopy of object
new_self = self.copy()
new_self.__setattr__(col, result)
return new_self
@property
def chromosomes(self):
"""Return chromosomes in natsorted order."""
if self.stranded:
return natsorted(set([k[0] for k in self.keys()]))
else:
return natsorted(set([k for k in self.keys()]))
def cluster(self, strand=None, by=None, slack=0, count=False, nb_cpu=1):
"""Give overlapping intervals a common id.
Parameters
----------
strand : bool, default None, i.e. auto
Whether to ignore strand information if PyRanges is stranded.
by : str or list, default None
Only intervals with an equal value in column(s) `by` are clustered.
slack : int, default 0
Consider intervals separated by less than `slack` to be in the same cluster. If `slack`
is negative, intervals overlapping less than `slack` are not considered to be in the
same cluster.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
PyRanges with an ID-column "Cluster" added.
See also
--------
PyRanges.merge: combine overlapping intervals into one
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 1, 1, 1], "Start": [1, 2, 3, 9],
... "End": [3, 3, 10, 12], "Gene": [1, 2, 3, 3]})
>>> gr
+--------------+-----------+-----------+-----------+
| Chromosome | Start | End | Gene |
| (category) | (int32) | (int32) | (int64) |
|--------------+-----------+-----------+-----------|
| 1 | 1 | 3 | 1 |
| 1 | 2 | 3 | 2 |
| 1 | 3 | 10 | 3 |
| 1 | 9 | 12 | 3 |
+--------------+-----------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.cluster()
+--------------+-----------+-----------+-----------+-----------+
| Chromosome | Start | End | Gene | Cluster |
| (category) | (int32) | (int32) | (int64) | (int32) |
|--------------+-----------+-----------+-----------+-----------|
| 1 | 1 | 3 | 1 | 1 |
| 1 | 2 | 3 | 2 | 1 |
| 1 | 3 | 10 | 3 | 1 |
| 1 | 9 | 12 | 3 | 1 |
+--------------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.cluster(by="Gene", count=True)
+--------------+-----------+-----------+-----------+-----------+-----------+
| Chromosome | Start | End | Gene | Cluster | Count |
| (category) | (int32) | (int32) | (int64) | (int32) | (int64) |
|--------------+-----------+-----------+-----------+-----------+-----------|
| 1 | 1 | 3 | 1 | 1 | 1 |
| 1 | 2 | 3 | 2 | 2 | 1 |
| 1 | 3 | 10 | 3 | 3 | 2 |
| 1 | 9 | 12 | 3 | 3 | 2 |
+--------------+-----------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
Avoid clustering bookended intervals with slack=-1:
>>> gr.cluster(slack=-1)
+--------------+-----------+-----------+-----------+-----------+
| Chromosome | Start | End | Gene | Cluster |
| (category) | (int32) | (int32) | (int64) | (int32) |
|--------------+-----------+-----------+-----------+-----------|
| 1 | 1 | 3 | 1 | 1 |
| 1 | 2 | 3 | 2 | 1 |
| 1 | 3 | 10 | 3 | 2 |
| 1 | 9 | 12 | 3 | 2 |
+--------------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2 = pr.data.ensembl_gtf()[["Feature", "Source"]]
>>> gr2.cluster(by=["Feature", "Source"])
+--------------+--------------+---------------+-----------+-----------+--------------+-----------+
| Chromosome | Feature | Source | Start | End | Strand | Cluster |
| (category) | (category) | (object) | (int32) | (int32) | (category) | (int32) |
|--------------+--------------+---------------+-----------+-----------+--------------+-----------|
| 1 | CDS | ensembl | 69090 | 70005 | + | 1 |
| 1 | CDS | ensembl | 925941 | 926013 | + | 2 |
| 1 | CDS | ensembl | 925941 | 926013 | + | 2 |
| 1 | CDS | ensembl | 925941 | 926013 | + | 2 |
| ... | ... | ... | ... | ... | ... | ... |
| 1 | transcript | havana_tagene | 167128 | 169240 | - | 1142 |
| 1 | transcript | mirbase | 17368 | 17436 | - | 1143 |
| 1 | transcript | mirbase | 187890 | 187958 | - | 1144 |
| 1 | transcript | mirbase | 632324 | 632413 | - | 1145 |
+--------------+--------------+---------------+-----------+-----------+--------------+-----------+
Stranded PyRanges object has 2,446 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
if strand is None:
strand = self.stranded
kwargs = {"strand": strand, "slack": slack, "count": count, "by": by}
kwargs = fill_kwargs(kwargs)
_stranded = self.stranded
if not strand and _stranded:
self.Strand2 = self.Strand
self = self.unstrand()
if not by:
from pyranges.methods.cluster import _cluster
df = pyrange_apply_single(_cluster, self, **kwargs)
else:
from pyranges.methods.cluster import _cluster_by
kwargs["by"] = by
df = pyrange_apply_single(_cluster_by, self, **kwargs)
gr = PyRanges(df)
# each chromosome got overlapping ids (0 to len). Need to make unique!
new_dfs = {}
first = True
max_id = 0
for k, v in gr.items():
if first:
max_id = v.Cluster.max()
new_dfs[k] = v
first = False
continue
v.loc[:, "Cluster"] += max_id
max_id = v.Cluster.max()
new_dfs[k] = v
if not strand and _stranded:
new_dfs = {
k: d.rename(columns={"Strand2": "Strand"})
for k, d in new_dfs.items()
}
self = PyRanges(new_dfs)
return self
def copy(self):
"""Make a deep copy of the PyRanges.
Notes
-----
See the pandas docs for deep-copying caveats."""
return self.apply(lambda df: df.copy(deep=True))
@property
def columns(self):
"""Return the column labels of the PyRanges.
Returns
-------
pandas.Index
See also
--------
PyRanges.chromosomes : return the chromosomes in the PyRanges
Examples
--------
>>> f2 = pr.data.f2()
>>> f2
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 1 | 2 | a | 0 | + |
| chr1 | 6 | 7 | b | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f2.columns
Index(['Chromosome', 'Start', 'End', 'Name', 'Score', 'Strand'], dtype='object')
>>> f2.columns = f2.columns.str.replace("Sco|re", "NYAN")
>>> f2
+--------------+-----------+-----------+------------+------------+--------------+
| Chromosome | Start | End | Name | NYANNYAN | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+------------+--------------|
| chr1 | 1 | 2 | a | 0 | + |
| chr1 | 6 | 7 | b | 0 | - |
+--------------+-----------+-----------+------------+------------+--------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
if not len(self.values()):
return []
first = next(iter(self.values()))
columns = first.columns
return columns
def count_overlaps(self, other, strandedness=None, keep_nonoverlapping=True, overlap_col="NumberOverlaps"):
"""Count number of overlaps per interval.
Count how many intervals in self overlap with those in other.
Parameters
----------
strandedness : {"same", "opposite", None, False}, default None, i.e. auto
Whether to perform the operation on the same, opposite or no strand. Use False to
ignore the strand. None means use "same" if both PyRanges are stranded, otherwise
ignore.
keep_nonoverlapping : bool, default True
Keep intervals without overlaps.
overlap_col : str, default "NumberOverlaps"
Name of column with overlap counts.
nb_cpu : int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
PyRanges with a column of overlaps added.
See also
--------
PyRanges.coverage: find coverage of PyRanges
pyranges.count_overlaps: count overlaps from multiple PyRanges
Examples
--------
>>> f1 = pr.data.f1().drop()
>>> f1
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 6 | + |
| chr1 | 8 | 9 | + |
| chr1 | 5 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f2 = pr.data.f2().drop()
>>> f2
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 2 | + |
| chr1 | 6 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1.count_overlaps(f2, overlap_col="Count")
+--------------+-----------+-----------+--------------+-----------+
| Chromosome | Start | End | Strand | Count |
| (category) | (int32) | (int32) | (category) | (int64) |
|--------------+-----------+-----------+--------------+-----------|
| chr1 | 3 | 6 | + | 0 |
| chr1 | 8 | 9 | + | 0 |
| chr1 | 5 | 7 | - | 1 |
+--------------+-----------+-----------+--------------+-----------+
Stranded PyRanges object has 3 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
kwargs = {"strandedness": strandedness, "keep_nonoverlapping": keep_nonoverlapping,
"overlap_col": overlap_col}
kwargs = fill_kwargs(kwargs)
from pyranges.methods.coverage import _number_overlapping
counts = pyrange_apply(_number_overlapping, self, other, **kwargs)
return pr.PyRanges(counts)
def coverage(self, other, strandedness=None, keep_nonoverlapping=True, overlap_col="NumberOverlaps", fraction_col="FractionOverlaps", nb_cpu=1):
"""Count number of overlaps and their fraction per interval.
Count how many intervals in self overlap with those in other.
Parameters
----------
strandedness : {"same", "opposite", None, False}, default None, i.e. auto
Whether to perform the operation on the same, opposite or no strand. Use False to
ignore the strand. None means use "same" if both PyRanges are stranded, otherwise
ignore.
keep_nonoverlapping : bool, default True
Keep intervals without overlaps.
overlap_col : str, default "NumberOverlaps"
Name of column with overlap counts.
fraction_col : str, default "FractionOverlaps"
Name of column with fraction of counts.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
PyRanges with a column of overlaps added.
See also
--------
pyranges.count_overlaps: count overlaps from multiple PyRanges
Examples
--------
>>> f1 = pr.from_dict({"Chromosome": [1, 1, 1], "Start": [3, 8, 5],
... "End": [6, 9, 7]})
>>> f1
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| 1 | 3 | 6 |
| 1 | 8 | 9 |
| 1 | 5 | 7 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> f2 = pr.from_dict({"Chromosome": [1, 1], "Start": [1, 6],
... "End": [2, 7]})
>>> f2
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| 1 | 1 | 2 |
| 1 | 6 | 7 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> f1.coverage(f2, overlap_col="C", fraction_col="F")
+--------------+-----------+-----------+-----------+-------------+
| Chromosome | Start | End | C | F |
| (category) | (int32) | (int32) | (int64) | (float64) |
|--------------+-----------+-----------+-----------+-------------|
| 1 | 3 | 6 | 0 | 0 |
| 1 | 8 | 9 | 0 | 0 |
| 1 | 5 | 7 | 1 | 0.5 |
+--------------+-----------+-----------+-----------+-------------+
Unstranded PyRanges object has 3 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
kwargs = {"strandedness": strandedness, "keep_nonoverlapping": keep_nonoverlapping,
"overlap_col": overlap_col, "fraction_col": fraction_col, "nb_cpu": nb_cpu}
kwargs = fill_kwargs(kwargs)
counts = self.count_overlaps(other, keep_nonoverlapping=True, overlap_col=overlap_col, strandedness=strandedness)
strand = True if kwargs["strandedness"] else False
other = other.merge(count=True, strand=strand)
from pyranges.methods.coverage import _coverage
counts = pr.PyRanges(pyrange_apply(_coverage, counts, other, **kwargs))
return counts
@property
def df(self):
"""Return PyRanges as DataFrame.
See also
--------
PyRanges.as_df : return PyRanges as DataFrame."""
return self.as_df()
def drop(self, drop=None, like=None):
"""Drop column(s).
If no arguments are given, all the columns except Chromosome, Start, End and Strand are
dropped.
Parameters
----------
drop : str or list, default None
Columns to drop.
like : str, default None
Regex-string matching columns to drop. Matches with Chromosome, Start, End or Strand
are ignored.
See also
--------
PyRanges.unstrand : drop strand information
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1, 1], "Start": [1, 4], "End": [5, 6],
... "Strand": ["+", "-"], "Count": [1, 2],
... "Type": ["exon", "exon"]})
>>> gr
+--------------+-----------+-----------+--------------+-----------+------------+
| Chromosome | Start | End | Strand | Count | Type |
| (category) | (int32) | (int32) | (category) | (int64) | (object) |
|--------------+-----------+-----------+--------------+-----------+------------|
| 1 | 1 | 5 | + | 1 | exon |
| 1 | 4 | 6 | - | 2 | exon |
+--------------+-----------+-----------+--------------+-----------+------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.drop()
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| 1 | 1 | 5 | + |
| 1 | 4 | 6 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
Matches with position-columns are ignored:
>>> gr.drop(like="Chromosome|Strand")
+--------------+-----------+-----------+--------------+-----------+------------+
| Chromosome | Start | End | Strand | Count | Type |
| (category) | (int32) | (int32) | (category) | (int64) | (object) |
|--------------+-----------+-----------+--------------+-----------+------------|
| 1 | 1 | 5 | + | 1 | exon |
| 1 | 4 | 6 | - | 2 | exon |
+--------------+-----------+-----------+--------------+-----------+------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.drop(like="e$")
+--------------+-----------+-----------+--------------+-----------+
| Chromosome | Start | End | Strand | Count |
| (category) | (int32) | (int32) | (category) | (int64) |
|--------------+-----------+-----------+--------------+-----------|
| 1 | 1 | 5 | + | 1 |
| 1 | 4 | 6 | - | 2 |
+--------------+-----------+-----------+--------------+-----------+
Stranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
from pyranges.methods.drop import _drop
return _drop(self, drop, like)
def drop_duplicate_positions(self, strand=None, keep="first"):
"""Return PyRanges with duplicate postion rows removed.
Parameters
----------
strand : bool, default None, i.e. auto
Whether to take strand-information into account when considering duplicates.
keep : {"first", "last", False}
Whether to keep first, last or drop all duplicates.
Examples
--------
>>> gr = pr.from_string('''Chromosome Start End Strand Name
... 1 1 2 + A
... 1 1 2 - B
... 1 1 2 + Z''')
>>> gr
+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Name |
| (category) | (int32) | (int32) | (category) | (object) |
|--------------+-----------+-----------+--------------+------------|
| 1 | 1 | 2 | + | A |
| 1 | 1 | 2 | + | Z |
| 1 | 1 | 2 | - | B |
+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 3 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.drop_duplicate_positions()
+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Name |
| (category) | (int32) | (int32) | (category) | (object) |
|--------------+-----------+-----------+--------------+------------|
| 1 | 1 | 2 | + | A |
| 1 | 1 | 2 | - | B |
+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.drop_duplicate_positions(keep="last")
+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Name |
| (category) | (int32) | (int32) | (category) | (object) |
|--------------+-----------+-----------+--------------+------------|
| 1 | 1 | 2 | + | Z |
| 1 | 1 | 2 | - | B |
+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
Note that the reverse strand is considered to be behind the forward strand:
>>> gr.drop_duplicate_positions(keep="last", strand=False)
+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Name |
| (category) | (int32) | (int32) | (category) | (object) |
|--------------+-----------+-----------+--------------+------------|
| 1 | 1 | 2 | - | B |
+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 1 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.drop_duplicate_positions(keep=False, strand=False)
Empty PyRanges
"""
from pyranges.methods.drop_duplicates import _drop_duplicate_positions
if strand is None:
strand = self.stranded
kwargs = {}
kwargs["sparse"] = {"self": False}
kwargs["keep"] = keep
kwargs = fill_kwargs(kwargs)
kwargs["strand"] = strand and self.stranded
return PyRanges(
pyrange_apply_single(_drop_duplicate_positions, self, **kwargs))
@property
def dtypes(self):
"""Return the dtypes of the PyRanges.
Examples
--------
>>> gr = pr.data.chipseq()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 212609534 | 212609559 | U0 | 0 | + |
| chr1 | 169887529 | 169887554 | U0 | 0 | + |
| chr1 | 216711011 | 216711036 | U0 | 0 | + |
| chr1 | 144227079 | 144227104 | U0 | 0 | + |
| ... | ... | ... | ... | ... | ... |
| chrY | 15224235 | 15224260 | U0 | 0 | - |
| chrY | 13517892 | 13517917 | U0 | 0 | - |
| chrY | 8010951 | 8010976 | U0 | 0 | - |
| chrY | 7405376 | 7405401 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 10,000 rows and 6 columns from 24 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.dtypes
Chromosome category
Start int32
End int32
Name object
Score int64
Strand category
dtype: object
"""
df = next(iter(self.dfs.values()))
return df.dtypes
@property
def empty(self):
"""Indicate whether PyRanges is empty."""
return len(self) == 0
def extend(self, ext):
"""Extend the intervals from the ends.
Parameters
----------
ext : int or dict of ints with "3" and/or "5" as keys.
The number of nucleotides to extend the ends with.
If an int is provided, the same extension is applied to both
the start and end of intervals, while a dict input allows to control
differently the two ends. Note also that 5' and 3' extensions take
the strand into account, if the intervals are stranded.
See Also
--------
PyRanges.subsequence : obtain subsequences of intervals
PyRanges.spliced_subsequence : obtain subsequences of intervals, providing transcript-level coordinates
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1', 'chr1'], 'Start': [3, 8, 5], 'End': [6, 9, 7],
... 'Strand': ['+', '+', '-']}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 6 | + |
| chr1 | 8 | 9 | + |
| chr1 | 5 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.extend(4)
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 0 | 10 | + |
| chr1 | 4 | 13 | + |
| chr1 | 1 | 11 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.extend({"3": 1})
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 7 | + |
| chr1 | 8 | 10 | + |
| chr1 | 4 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.extend({"3": 1, "5": 2})
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 7 | + |
| chr1 | 6 | 10 | + |
| chr1 | 4 | 9 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.extend(-1)
Traceback (most recent call last):
...
AssertionError: Some intervals are negative or zero length after applying extend!
"""
if isinstance(ext, dict):
assert self.stranded, "PyRanges must be stranded to add 5/3-end specific extend."
kwargs = fill_kwargs({"ext": ext, "strand": self.stranded})
prg = PyRanges(
pyrange_apply_single(_extend, self, **kwargs))
return prg
# # TODO: use subtract code here instead, easier
# def no_overlap(self, other, **kwargs):
# kwargs = fill_kwargs(kwargs)
# kwargs["invert"] = True
# kwargs["sparse"] = {"self": False, "other": True}
# # if kwargs["strandedness"] in ["same", "opposite"]:
# # kwargs["strandedness"] = {
# # "same": "opposite",
# # "opposite": "same"
# # }[kwargs["strandedness"]]
# dfs = pyrange_apply(_overlap, self, other, **kwargs)
# return PyRanges(dfs)
# @profile
def five_end(self):
"""Return the five prime end of intervals.
The five prime end is the start of a forward strand or the end of a reverse strand.
Returns
-------
PyRanges
PyRanges with the five prime ends
Notes
-----
Requires the PyRanges to be stranded.
See Also
--------
PyRanges.three_end : return the 3' end
Examples
--------
>>> gr = pr.from_dict({'Chromosome': ['chr1', 'chr1'], 'Start': [3, 5], 'End': [9, 7],
... 'Strand': ["+", "-"]})
>>> gr
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 9 | + |
| chr1 | 5 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.five_end()
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 4 | + |
| chr1 | 7 | 8 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
assert self.stranded, "Need stranded pyrange to find 5'."
kwargs = fill_kwargs({"strand": self.stranded})
return PyRanges(
pyrange_apply_single(_tss, self, **kwargs))
def head(self, n=8):
"""Return the n first rows.
Parameters
----------
n : int, default 8
Return n rows.
Returns
-------
PyRanges
PyRanges with the n first rows.
See Also
--------
PyRanges.tail : return the last rows
PyRanges.sample : return random rows
Examples
--------
>>> gr = pr.data.chipseq()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 212609534 | 212609559 | U0 | 0 | + |
| chr1 | 169887529 | 169887554 | U0 | 0 | + |
| chr1 | 216711011 | 216711036 | U0 | 0 | + |
| chr1 | 144227079 | 144227104 | U0 | 0 | + |
| ... | ... | ... | ... | ... | ... |
| chrY | 15224235 | 15224260 | U0 | 0 | - |
| chrY | 13517892 | 13517917 | U0 | 0 | - |
| chrY | 8010951 | 8010976 | U0 | 0 | - |
| chrY | 7405376 | 7405401 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 10,000 rows and 6 columns from 24 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.head(3)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 212609534 | 212609559 | U0 | 0 | + |
| chr1 | 169887529 | 169887554 | U0 | 0 | + |
| chr1 | 216711011 | 216711036 | U0 | 0 | + |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
subsetter = np.zeros(len(self), dtype=np.bool)
subsetter[:n] = True
return self[subsetter]
def insert(self, other, loc=None):
"""Add one or more columns to the PyRanges.
Parameters
----------
other : Series, DataFrame or dict
Data to insert into the PyRanges. `other` must have the same number of rows as the PyRanges.
loc : int, default None, i.e. after last column of PyRanges.
Insertion index.
Returns
-------
PyRanges
A copy of the PyRanges with the column(s) inserted starting at `loc`.
Note
----
If a Series, or a dict of Series is used, the Series must have a name.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": ["L", "E", "E", "T"], "Start": [1, 1, 2, 3], "End": [5, 8, 13, 21]})
>>> gr
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| E | 1 | 8 |
| E | 2 | 13 |
| L | 1 | 5 |
| T | 3 | 21 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 3 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> s = pd.Series(data = [1, 3, 3, 7], name="Column")
>>> gr.insert(s)
+--------------+-----------+-----------+-----------+
| Chromosome | Start | End | Column |
| (category) | (int32) | (int32) | (int64) |
|--------------+-----------+-----------+-----------|
| E | 1 | 8 | 1 |
| E | 2 | 13 | 3 |
| L | 1 | 5 | 3 |
| T | 3 | 21 | 7 |
+--------------+-----------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 4 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> df = pd.DataFrame({"NY": s, "AN": s})
>>> df
NY AN
0 1 1
1 3 3
2 3 3
3 7 7
Note that the original PyRanges was not affected by previously inserting Column:
>>> gr.insert(df, 1)
+--------------+-----------+-----------+-----------+-----------+
| Chromosome | NY | AN | Start | End |
| (category) | (int64) | (int64) | (int32) | (int32) |
|--------------+-----------+-----------+-----------+-----------|
| E | 1 | 1 | 1 | 8 |
| E | 3 | 3 | 2 | 13 |
| L | 3 | 3 | 1 | 5 |
| T | 7 | 7 | 3 | 21 |
+--------------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> arbitrary_result = gr.apply(
... lambda df: pd.Series(df.Start + df.End, name="Hi!"), as_pyranges=False)
>>> arbitrary_result
{'E': 1 9
2 15
Name: Hi!, dtype: int32, 'L': 0 6
Name: Hi!, dtype: int32, 'T': 3 24
Name: Hi!, dtype: int32}
>>> gr.insert(arbitrary_result)
+--------------+-----------+-----------+-----------+
| Chromosome | Start | End | Hi! |
| (category) | (int32) | (int32) | (int32) |
|--------------+-----------+-----------+-----------|
| E | 1 | 8 | 9 |
| E | 2 | 13 | 15 |
| L | 1 | 5 | 6 |
| T | 3 | 21 | 24 |
+--------------+-----------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 4 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
if loc is None:
loc = len(self.columns)
self = self.copy()
from pyranges.methods.attr import _setattr
if isinstance(other, (pd.Series, pd.DataFrame)):
assert len(other) == len(self), "Pandas Series or DataFrame must be same length as PyRanges!"
if isinstance(other, pd.Series):
if not other.name:
raise Exception("Series must have a name!")
_setattr(self, other.name, other, loc)
if isinstance(other, pd.DataFrame):
for c in other:
_setattr(self, c, other[c], loc)
loc += 1
elif isinstance(other, dict) and other:
first = next(iter(other.values()))
is_dataframe = isinstance(first, pd.DataFrame)
if is_dataframe:
columns = first.columns
ds = []
for c in columns:
ds.append({k: v[c] for k, v in other.items()})
for c, d in zip(columns, ds):
_setattr(self, str(c), d, loc)
loc += 1
else:
if not first.name:
raise Exception("Series must have a name!")
d = {k: v for k, v in other.items()}
_setattr(self, first.name, d, loc)
return self
def intersect(self, other, strandedness=None, how=None, invert=False, nb_cpu=1):
"""Return overlapping subintervals.
Returns the segments of the intervals in self which overlap with those in other.
Parameters
----------
other : PyRanges
PyRanges to intersect.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
how : {None, "first", "last", "containment"}, default None, i.e. all
What intervals to report. By default reports all overlapping intervals. "containment"
reports intervals where the overlapping is contained within it.
invert : bool, default False
Whether to return the intervals without overlaps.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
A PyRanges with overlapping subintervals.
See also
--------
PyRanges.set_intersect : set-intersect PyRanges
PyRanges.overlap : report overlapping intervals
Examples
--------
>>> gr = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [1, 4, 10],
... "End": [3, 9, 11], "ID": ["a", "b", "c"]})
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 3 | a |
| chr1 | 4 | 9 | b |
| chr1 | 10 | 11 | c |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2 = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [2, 2, 9], "End": [3, 9, 10]})
>>> gr2
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 2 | 3 |
| chr1 | 2 | 9 |
| chr1 | 9 | 10 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.intersect(gr2)
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 2 | 3 | a |
| chr1 | 2 | 3 | a |
| chr1 | 4 | 9 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.intersect(gr2, how="first")
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 2 | 3 | a |
| chr1 | 4 | 9 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.intersect(gr2, how="containment")
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 4 | 9 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 1 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
kwargs = {"how": how, "strandedness": strandedness, "nb_cpu": nb_cpu}
kwargs = fill_kwargs(kwargs)
kwargs["sparse"] = {"self": False, "other": True}
if len(self) == 0:
return self
if invert:
self.__ix__ = np.arange(len(self))
dfs = pyrange_apply(_intersection, self, other, **kwargs)
result = pr.PyRanges(dfs)
if invert:
found_idxs = getattr(result, "__ix__", [])
result = self[~self.__ix__.isin(found_idxs)]
result = result.drop("__ix__")
return result
def items(self):
"""Return the pairs of keys and DataFrames.
Returns
-------
dict
The dict mapping keys to DataFrames in the PyRanges.
See Also
--------
PyRanges.chromosomes : return the chromosomes
PyRanges.keys : return the keys
PyRanges.values : return the DataFrames in the PyRanges
Examples
--------
>>> gr = pr.data.f1()
>>> gr.items()
[(('chr1', '+'), Chromosome Start End Name Score Strand
0 chr1 3 6 interval1 0 +
2 chr1 8 9 interval3 0 +), (('chr1', '-'), Chromosome Start End Name Score Strand
1 chr1 5 7 interval2 0 -)]
"""
return natsorted([(k, df) for (k, df) in self.dfs.items()])
def join(self, other, strandedness=None, how=None, report_overlap=False, slack=0, suffix="_b", nb_cpu=1, apply_strand_suffix=None):
"""Join PyRanges on genomic location.
Parameters
----------
other : PyRanges
PyRanges to join.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
how : {None, "left", "right"}, default None, i.e. "inner"
How to handle intervals without overlap. None means only keep overlapping intervals.
"left" keeps all intervals in self, "right" keeps all intervals in other.
report_overlap : bool, default False
Report amount of overlap in base pairs.
slack : int, default 0
Lengthen intervals in self before joining.
suffix : str or tuple, default "_b"
Suffix to give overlapping columns in other.
apply_strand_suffix : bool, default None
If first pyranges is unstranded, but the second is not, the first will be given a strand column.
apply_strand_suffix makes the added strand column a regular data column instead by adding a suffix.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
A PyRanges appended with columns of another.
Notes
-----
The chromosome from other will never be reported as it is always the same as in self.
As pandas did not have NaN for non-float datatypes until recently, "left" and "right" join
give non-overlapping rows the value -1 to avoid promoting columns to object. This will
change to NaN in a future version as general NaN becomes stable in pandas.
See also
--------
PyRanges.new_position : give joined PyRanges new coordinates
Examples
--------
>>> f1 = pr.from_dict({'Chromosome': ['chr1', 'chr1', 'chr1'], 'Start': [3, 8, 5],
... 'End': [6, 9, 7], 'Name': ['interval1', 'interval3', 'interval2']})
>>> f1
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Name |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 3 | 6 | interval1 |
| chr1 | 8 | 9 | interval3 |
| chr1 | 5 | 7 | interval2 |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> f2 = pr.from_dict({'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [2, 7], 'Name': ['a', 'b']})
>>> f2
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Name |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 2 | a |
| chr1 | 6 | 7 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> f1.join(f2)
+--------------+-----------+-----------+------------+-----------+-----------+------------+
| Chromosome | Start | End | Name | Start_b | End_b | Name_b |
| (category) | (int32) | (int32) | (object) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------+-----------+-----------+------------|
| chr1 | 5 | 7 | interval2 | 6 | 7 | b |
+--------------+-----------+-----------+------------+-----------+-----------+------------+
Unstranded PyRanges object has 1 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> f1.join(f2, how="right")
+--------------+-----------+-----------+------------+-----------+-----------+------------+
| Chromosome | Start | End | Name | Start_b | End_b | Name_b |
| (category) | (int32) | (int32) | (object) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------+-----------+-----------+------------|
| chr1 | 5 | 7 | interval2 | 6 | 7 | b |
| chr1 | -1 | -1 | -1 | 1 | 2 | a |
+--------------+-----------+-----------+------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
With slack 1, bookended features are joined (see row 1):
>>> f1.join(f2, slack=1)
+--------------+-----------+-----------+------------+-----------+-----------+------------+
| Chromosome | Start | End | Name | Start_b | End_b | Name_b |
| (category) | (int32) | (int32) | (object) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------+-----------+-----------+------------|
| chr1 | 3 | 6 | interval1 | 6 | 7 | b |
| chr1 | 5 | 7 | interval2 | 6 | 7 | b |
+--------------+-----------+-----------+------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
from pyranges.methods.join import _write_both
kwargs = {"strandedness": strandedness, "how": how, "report_overlap":report_overlap, "suffix": suffix, "nb_cpu": nb_cpu, "apply_strand_suffix": apply_strand_suffix}
if slack:
self = self.copy()
self.Start__slack = self.Start
self.End__slack = self.End
self = self.extend(slack)
if "suffix" in kwargs and isinstance(kwargs["suffix"], str):
suffixes = "", kwargs["suffix"]
kwargs["suffixes"] = suffixes
kwargs = fill_kwargs(kwargs)
how = kwargs.get("how")
if how in ["left", "outer"]:
kwargs["example_header_other"] = other.head(1).df
if how in ["right", "outer"]:
kwargs["example_header_self"] = self.head(1).df
dfs = pyrange_apply(_write_both, self, other, **kwargs)
gr = PyRanges(dfs)
if slack and len(gr) > 0:
gr.Start = gr.Start__slack
gr.End = gr.End__slack
gr = gr.drop(like="(Start|End).*__slack")
if not self.stranded and other.stranded:
if apply_strand_suffix is None:
import sys
print("join: Strand data from other will be added as strand data to self.\nIf this is undesired use the flag apply_strand_suffix=False.\nTo turn off the warning set apply_strand_suffix to True or False.", file=sys.stderr)
elif apply_strand_suffix:
gr.columns = gr.columns.str.replace("Strand", "Strand" + kwargs["suffix"])
return gr
def keys(self):
"""Return the keys.
Returns
-------
Returns the keys (chromosomes or chromosome/strand pairs) as strings or tuples of strings
in natsorted order.
See Also
--------
PyRanges.chromosomes : return the chromosomes
Examples
--------
>>> gr = pr.data.chipseq()
>>> gr.keys()
[('chr1', '+'), ('chr1', '-'), ('chr2', '+'), ('chr2', '-'), ('chr3', '+'), ('chr3', '-'), ('chr4', '+'), ('chr4', '-'), ('chr5', '+'), ('chr5', '-'), ('chr6', '+'), ('chr6', '-'), ('chr7', '+'), ('chr7', '-'), ('chr8', '+'), ('chr8', '-'), ('chr9', '+'), ('chr9', '-'), ('chr10', '+'), ('chr10', '-'), ('chr11', '+'), ('chr11', '-'), ('chr12', '+'), ('chr12', '-'), ('chr13', '+'), ('chr13', '-'), ('chr14', '+'), ('chr14', '-'), ('chr15', '+'), ('chr15', '-'), ('chr16', '+'), ('chr16', '-'), ('chr17', '+'), ('chr17', '-'), ('chr18', '+'), ('chr18', '-'), ('chr19', '+'), ('chr19', '-'), ('chr20', '+'), ('chr20', '-'), ('chr21', '+'), ('chr21', '-'), ('chr22', '+'), ('chr22', '-'), ('chrX', '+'), ('chrX', '-'), ('chrY', '+'), ('chrY', '-')]
>>> gr.unstrand().keys()
['chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', 'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', 'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', 'chrX', 'chrY']
"""
return natsorted(self.dfs.keys())
def k_nearest(self, other, k=1, ties=None, strandedness=None, overlap=True, how=None, suffix="_b", nb_cpu=1, apply_strand_suffix=None):
"""Find k nearest intervals.
Parameters
----------
other : PyRanges
PyRanges to find nearest interval in.
k : int or list/array/Series of int
Number of closest to return. If iterable, must be same length as PyRanges.
ties : {None, "first", "last", "different"}, default None
How to resolve ties, i.e. closest intervals with equal distance. None means that the k nearest intervals are kept.
"first" means that the first tie is kept, "last" meanst that the last is kept.
"different" means that all nearest intervals with the k unique nearest distances are kept.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are stranded,
otherwise ignore the strand information.
overlap : bool, default True
Whether to include overlaps.
how : {None, "upstream", "downstream"}, default None, i.e. both directions
Whether to only look for nearest in one direction. Always with respect to the PyRanges
it is called on.
suffix : str, default "_b"
Suffix to give columns with shared name in other.
apply_strand_suffix : bool, default None
If first pyranges is unstranded, but the second is not, the first will be given a strand column.
apply_strand_suffix makes the added strand column a regular data column instead by adding a suffix.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
A PyRanges with columns of nearest interval horizontally appended.
Notes
-----
nearest also exists, and is more performant.
See also
--------
PyRanges.new_position : give joined PyRanges new coordinates
PyRanges.nearest : find nearest intervals
Examples
--------
>>> f1 = pr.from_dict({'Chromosome': ['chr1', 'chr1', 'chr1'], 'Start': [3, 8, 5],
... 'End': [6, 9, 7], 'Strand': ['+', '+', '-']})
>>> f1
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 6 | + |
| chr1 | 8 | 9 | + |
| chr1 | 5 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f2 = pr.from_dict({'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [2, 7], 'Strand': ['+', '-']})
>>> f2
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 2 | + |
| chr1 | 6 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1.k_nearest(f2, k=2)
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Start_b | End_b | Strand_b | Distance |
| (category) | (int32) | (int32) | (category) | (int32) | (int32) | (category) | (int32) |
|--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------|
| chr1 | 3 | 6 | + | 6 | 7 | - | 1 |
| chr1 | 3 | 6 | + | 1 | 2 | + | -2 |
| chr1 | 8 | 9 | + | 6 | 7 | - | -2 |
| chr1 | 8 | 9 | + | 1 | 2 | + | -7 |
| chr1 | 5 | 7 | - | 6 | 7 | - | 0 |
| chr1 | 5 | 7 | - | 1 | 2 | + | 4 |
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 6 rows and 8 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1.k_nearest(f2, how="upstream", k=2)
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Start_b | End_b | Strand_b | Distance |
| (category) | (int32) | (int32) | (category) | (int32) | (int32) | (category) | (int32) |
|--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------|
| chr1 | 3 | 6 | + | 1 | 2 | + | -2 |
| chr1 | 8 | 9 | + | 6 | 7 | - | -2 |
| chr1 | 8 | 9 | + | 1 | 2 | + | -7 |
| chr1 | 5 | 7 | - | 6 | 7 | - | 0 |
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 4 rows and 8 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1.k_nearest(f2, k=[1, 2, 1])
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Start_b | End_b | Strand_b | Distance |
| (category) | (int32) | (int32) | (category) | (int32) | (int32) | (category) | (int32) |
|--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------|
| chr1 | 3 | 6 | + | 6 | 7 | - | 1 |
| chr1 | 8 | 9 | + | 6 | 7 | - | -2 |
| chr1 | 8 | 9 | + | 1 | 2 | + | -7 |
| chr1 | 5 | 7 | - | 6 | 7 | - | 0 |
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 4 rows and 8 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> d1 = {"Chromosome": [1], "Start": [5], "End": [6]}
>>> d2 = {"Chromosome": 1, "Start": [1] * 2 + [5] * 2 + [9] * 2,
... "End": [3] * 2 + [7] * 2 + [11] * 2, "ID": range(6)}
>>> gr, gr2 = pr.from_dict(d1), pr.from_dict(d2)
>>> gr
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| 1 | 5 | 6 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 1 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2
+--------------+-----------+-----------+-----------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (int64) |
|--------------+-----------+-----------+-----------|
| 1 | 1 | 3 | 0 |
| 1 | 1 | 3 | 1 |
| 1 | 5 | 7 | 2 |
| 1 | 5 | 7 | 3 |
| 1 | 9 | 11 | 4 |
| 1 | 9 | 11 | 5 |
+--------------+-----------+-----------+-----------+
Unstranded PyRanges object has 6 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.k_nearest(gr2, k=2)
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
| Chromosome | Start | End | Start_b | End_b | ID | Distance |
| (category) | (int32) | (int32) | (int32) | (int32) | (int64) | (int64) |
|--------------+-----------+-----------+-----------+-----------+-----------+------------|
| 1 | 5 | 6 | 5 | 7 | 2 | 0 |
| 1 | 5 | 6 | 5 | 7 | 3 | 0 |
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.k_nearest(gr2, k=2, ties="different")
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
| Chromosome | Start | End | Start_b | End_b | ID | Distance |
| (category) | (int32) | (int32) | (int32) | (int32) | (int64) | (int64) |
|--------------+-----------+-----------+-----------+-----------+-----------+------------|
| 1 | 5 | 6 | 5 | 7 | 2 | 0 |
| 1 | 5 | 6 | 5 | 7 | 3 | 0 |
| 1 | 5 | 6 | 1 | 3 | 1 | -3 |
| 1 | 5 | 6 | 1 | 3 | 0 | -3 |
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
Unstranded PyRanges object has 4 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.k_nearest(gr2, k=3, ties="first")
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
| Chromosome | Start | End | Start_b | End_b | ID | Distance |
| (category) | (int32) | (int32) | (int32) | (int32) | (int64) | (int64) |
|--------------+-----------+-----------+-----------+-----------+-----------+------------|
| 1 | 5 | 6 | 5 | 7 | 2 | 0 |
| 1 | 5 | 6 | 1 | 3 | 1 | -3 |
| 1 | 5 | 6 | 9 | 11 | 4 | 4 |
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.k_nearest(gr2, k=1, overlap=False)
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
| Chromosome | Start | End | Start_b | End_b | ID | Distance |
| (category) | (int32) | (int32) | (int32) | (int32) | (int64) | (int32) |
|--------------+-----------+-----------+-----------+-----------+-----------+------------|
| 1 | 5 | 6 | 1 | 3 | 1 | -3 |
| 1 | 5 | 6 | 1 | 3 | 0 | -3 |
+--------------+-----------+-----------+-----------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
from pyranges.methods.k_nearest import _nearest
from sorted_nearest import get_all_ties, get_different_ties
kwargs = {"strandedness": strandedness, "how": how, "overlap": overlap, "nb_cpu": nb_cpu,
"k": k, "ties": ties}
kwargs = fill_kwargs(kwargs)
kwargs["stranded"] = self.stranded and other.stranded
overlap = kwargs.get("overlap", True)
ties = kwargs.get("ties", False)
self = self.copy()
try: # if k is a Series
k = k.values
except:
pass
# how many to nearest to find; might be different for each
self.__k__ = k
# give each their own unique ID
self.__IX__ = np.arange(len(self))
dfs = pyrange_apply(_nearest, self, other, **kwargs)
nearest = PyRanges(dfs)
if not overlap:
result = nearest
else:
from collections import defaultdict
overlap_how = defaultdict(lambda: None, {"first": "first", "last": "last"})[kwargs.get("ties")]
overlaps = self.join(other, strandedness=strandedness, how=overlap_how, nb_cpu=nb_cpu, apply_strand_suffix=apply_strand_suffix)
overlaps.Distance = 0
result = pr.concat([overlaps, nearest])
if not len(result):
return pr.PyRanges()
new_result = {}
if ties in ["first", "last"]:
for c, df in result:
df = df.sort_values(["__IX__", "Distance"])
grpby = df.groupby("__k__", sort=False)
dfs = []
for k, kdf in grpby:
grpby2 = kdf.groupby("__IX__", sort=False)
_df = grpby2.head(k)
dfs.append(_df)
if dfs:
new_result[c] = pd.concat(dfs)
elif ties == "different" or not ties:
for c, df in result:
if df.empty:
continue
dfs = []
df = df.sort_values(["__IX__", "Distance"])
grpby = df.groupby("__k__", sort=False)
for k, kdf in grpby:
if ties:
lx = get_different_ties(kdf.index.values, kdf.__IX__.values, kdf.Distance.astype(np.int64).values, k)
_df = kdf.reindex(lx)
else:
lx = get_all_ties(kdf.index.values, kdf.__IX__.values, kdf.Distance.astype(np.int64).values, k)
_df = kdf.reindex(lx)
_df = _df.groupby("__IX__").head(k)
dfs.append(_df)
if dfs:
new_result[c] = pd.concat(dfs)
result = pr.PyRanges(new_result)
if not result.__IX__.is_monotonic:
result = result.sort("__IX__")
result = result.drop(like="__IX__|__k__")
self = self.drop(like="__k__|__IX__")
def prev_to_neg(df, **kwargs):
strand = df.Strand.iloc[0] if "Strand" in df else "+"
suffix = kwargs["suffix"]
bools = df["End" + suffix] < df.Start
if not strand == "+":
bools = ~bools
df.loc[bools, "Distance"] = -df.loc[bools, "Distance"]
return df
result = result.apply(prev_to_neg, suffix=kwargs["suffix"])
if not self.stranded and other.stranded:
if apply_strand_suffix is None:
import sys
print("join: Strand data from other will be added as strand data to self.\nIf this is undesired use the flag apply_strand_suffix=False.\nTo turn off the warning set apply_strand_suffix to True or False.", file=sys.stderr)
elif apply_strand_suffix:
result.columns = result.columns.str.replace("Strand", "Strand" + kwargs["suffix"])
return result
@property
def length(self):
"""Return the total length of the intervals.
See Also
--------
PyRanges.lengths : return the intervals lengths
Examples
--------
>>> gr = pr.data.f1()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 3 | 6 | interval1 | 0 | + |
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.length
6
To find the length of the genome covered by the intervals, use merge first:
>>> gr.merge(strand=False).length
5
"""
return int(self.lengths(as_dict=False).sum())
def lengths(self, as_dict=False):
"""Return the length of each interval.
Parameters
----------
as_dict : bool, default False
Whether to return lengths as Series or dict of Series per key.
Returns
-------
Series or dict of Series with the lengths of each interval.
See Also
--------
PyRanges.lengths : return the intervals lengths
Examples
--------
>>> gr = pr.data.f1()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 3 | 6 | interval1 | 0 | + |
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.lengths()
0 3
1 1
2 2
dtype: int32
To find the length of the genome covered by the intervals, use merge first:
>>> gr.Length = gr.lengths()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+-----------+
| Chromosome | Start | End | Name | Score | Strand | Length |
| (category) | (int32) | (int32) | (object) | (int64) | (category) | (int32) |
|--------------+-----------+-----------+------------+-----------+--------------+-----------|
| chr1 | 3 | 6 | interval1 | 0 | + | 3 |
| chr1 | 8 | 9 | interval3 | 0 | + | 1 |
| chr1 | 5 | 7 | interval2 | 0 | - | 2 |
+--------------+-----------+-----------+------------+-----------+--------------+-----------+
Stranded PyRanges object has 3 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
if as_dict:
if not len(self):
return {}
lengths = {}
for k, df in self.items():
lengths[k] = df.End - df.Start
return lengths
else:
_lengths = []
if not len(self):
return np.array(_lengths, dtype=int)
for _, df in self:
lengths = df.End - df.Start
_lengths.append(lengths)
return pd.concat(_lengths).reset_index(drop=True)
def max_disjoint(self, strand=None, slack=0, **kwargs):
"""Find the maximal disjoint set of intervals.
Parameters
----------
strand : bool, default None, i.e. auto
Find the max disjoint set separately for each strand.
slack : int, default 0
Consider intervals within a distance of slack to be overlapping.
Returns
-------
PyRanges
PyRanges with maximal disjoint set of intervals.
Examples
--------
>>> gr = pr.data.f1()
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 3 | 6 | interval1 | 0 | + |
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.max_disjoint(strand=False)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 3 | 6 | interval1 | 0 | + |
| chr1 | 8 | 9 | interval3 | 0 | + |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
if strand is None:
strand = self.stranded
kwargs = {"strand": strand, "slack": slack}
kwargs = fill_kwargs(kwargs)
from pyranges.methods.max_disjoint import _max_disjoint
df = pyrange_apply_single(_max_disjoint, self, **kwargs)
return pr.PyRanges(df)
def merge(self, strand=None, count=False, count_col="Count", by=None, slack=0):
"""Merge overlapping intervals into one.
Parameters
----------
strand : bool, default None, i.e. auto
Only merge intervals on same strand.
count : bool, default False
Count intervals in each superinterval.
count_col : str, default "Count"
Name of column with counts.
by : str or list of str, default None
Only merge intervals with equal values in these columns.
slack : int, default 0
Allow this many nucleotides between each interval to merge.
Returns
-------
PyRanges
PyRanges with superintervals.
Notes
-----
To avoid losing metadata, use cluster instead. If you want to perform a reduction function
on the metadata, use pandas groupby.
See Also
--------
PyRanges.cluster : annotate overlapping intervals with common ID
Examples
--------
>>> gr = pr.data.ensembl_gtf()[["Feature", "gene_name"]]
>>> gr
+--------------+--------------+-----------+-----------+--------------+-------------+
| Chromosome | Feature | Start | End | Strand | gene_name |
| (category) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+--------------+-----------+-----------+--------------+-------------|
| 1 | gene | 11868 | 14409 | + | DDX11L1 |
| 1 | transcript | 11868 | 14409 | + | DDX11L1 |
| 1 | exon | 11868 | 12227 | + | DDX11L1 |
| 1 | exon | 12612 | 12721 | + | DDX11L1 |
| ... | ... | ... | ... | ... | ... |
| 1 | gene | 1173055 | 1179555 | - | TTLL10-AS1 |
| 1 | transcript | 1173055 | 1179555 | - | TTLL10-AS1 |
| 1 | exon | 1179364 | 1179555 | - | TTLL10-AS1 |
| 1 | exon | 1173055 | 1176396 | - | TTLL10-AS1 |
+--------------+--------------+-----------+-----------+--------------+-------------+
Stranded PyRanges object has 2,446 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.merge(count=True, count_col="Count")
+--------------+-----------+-----------+--------------+-----------+
| Chromosome | Start | End | Strand | Count |
| (category) | (int32) | (int32) | (category) | (int32) |
|--------------+-----------+-----------+--------------+-----------|
| 1 | 11868 | 14409 | + | 12 |
| 1 | 29553 | 31109 | + | 11 |
| 1 | 52472 | 53312 | + | 3 |
| 1 | 57597 | 64116 | + | 7 |
| ... | ... | ... | ... | ... |
| 1 | 1062207 | 1063288 | - | 4 |
| 1 | 1070966 | 1074306 | - | 10 |
| 1 | 1081817 | 1116361 | - | 319 |
| 1 | 1173055 | 1179555 | - | 4 |
+--------------+-----------+-----------+--------------+-----------+
Stranded PyRanges object has 62 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.merge(by="Feature", count=True)
+--------------+-----------+-----------+--------------+--------------+-----------+
| Chromosome | Start | End | Strand | Feature | Count |
| (category) | (int32) | (int32) | (category) | (category) | (int32) |
|--------------+-----------+-----------+--------------+--------------+-----------|
| 1 | 65564 | 65573 | + | CDS | 1 |
| 1 | 69036 | 70005 | + | CDS | 2 |
| 1 | 924431 | 924948 | + | CDS | 1 |
| 1 | 925921 | 926013 | + | CDS | 11 |
| ... | ... | ... | ... | ... | ... |
| 1 | 1062207 | 1063288 | - | transcript | 1 |
| 1 | 1070966 | 1074306 | - | transcript | 1 |
| 1 | 1081817 | 1116361 | - | transcript | 19 |
| 1 | 1173055 | 1179555 | - | transcript | 1 |
+--------------+-----------+-----------+--------------+--------------+-----------+
Stranded PyRanges object has 748 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.merge(by=["Feature", "gene_name"], count=True)
+--------------+-----------+-----------+--------------+--------------+-------------+-----------+
| Chromosome | Start | End | Strand | Feature | gene_name | Count |
| (category) | (int32) | (int32) | (category) | (category) | (object) | (int32) |
|--------------+-----------+-----------+--------------+--------------+-------------+-----------|
| 1 | 1020172 | 1020373 | + | CDS | AGRN | 1 |
| 1 | 1022200 | 1022462 | + | CDS | AGRN | 2 |
| 1 | 1034555 | 1034703 | + | CDS | AGRN | 2 |
| 1 | 1035276 | 1035324 | + | CDS | AGRN | 4 |
| ... | ... | ... | ... | ... | ... | ... |
| 1 | 347981 | 348366 | - | transcript | RPL23AP24 | 1 |
| 1 | 1173055 | 1179555 | - | transcript | TTLL10-AS1 | 1 |
| 1 | 14403 | 29570 | - | transcript | WASH7P | 1 |
| 1 | 185216 | 195411 | - | transcript | WASH9P | 1 |
+--------------+-----------+-----------+--------------+--------------+-------------+-----------+
Stranded PyRanges object has 807 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
if strand is None:
strand = self.stranded
kwargs = {"strand": strand, "count": count, "by": by, "count_col": count_col, "slack": slack}
if not kwargs["by"]:
kwargs["sparse"] = {"self": True}
from pyranges.methods.merge import _merge
df = pyrange_apply_single(_merge, self, **kwargs)
else:
kwargs["sparse"] = {"self": False}
from pyranges.methods.merge import _merge_by
df = pyrange_apply_single(_merge_by, self, **kwargs)
return PyRanges(df)
def mp(self, n=8, formatting=None):
"""Merge location and print.
See Also
--------
PyRanges.print : print PyRanges."""
print(tostring(self, n=n, merge_position=True, formatting=formatting))
def mpc(self, n=8, formatting=None):
"""Merge location, print and return self.
See Also
--------
PyRanges.print : print PyRanges."""
print(tostring(self, n=n, merge_position=True, formatting=formatting))
return self
def msp(self, n=30, formatting=None):
"""Sort on location, merge location info and print.
See Also
--------
PyRanges.print : print PyRanges."""
print(
tostring(
self,
n=n,
merge_position=True,
sort=True,
formatting=formatting))
def mspc(self, n=30, formatting=None):
"""Sort on location, merge location, print and return self.
See Also
--------
PyRanges.print : print PyRanges."""
print(
tostring(
self,
n=n,
merge_position=True,
sort=True,
formatting=formatting))
return self
def nearest(self, other, strandedness=None, overlap=True, how=None, suffix="_b", nb_cpu=1, apply_strand_suffix=None):
"""Find closest interval.
Parameters
----------
other : PyRanges
PyRanges to find nearest interval in.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
overlap : bool, default True
Whether to include overlaps.
how : {None, "upstream", "downstream"}, default None, i.e. both directions
Whether to only look for nearest in one direction. Always with respect to the PyRanges
it is called on.
suffix : str, default "_b"
Suffix to give columns with shared name in other.
apply_strand_suffix : bool, default None
If first pyranges is unstranded, but the second is not, the first will be given the strand column of the second.
apply_strand_suffix makes the added strand column a regular data column instead by adding a suffix.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
A PyRanges with columns representing nearest interval horizontally appended.
Notes
-----
A k_nearest also exists, but is less performant.
See also
--------
PyRanges.new_position : give joined PyRanges new coordinates
PyRanges.k_nearest : find k nearest intervals
Examples
--------
>>> f1 = pr.from_dict({'Chromosome': ['chr1', 'chr1', 'chr1'], 'Start': [3, 8, 5],
... 'End': [6, 9, 7], 'Strand': ['+', '+', '-']})
>>> f1
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 6 | + |
| chr1 | 8 | 9 | + |
| chr1 | 5 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f2 = pr.from_dict({'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [2, 7], 'Strand': ['+', '-']})
>>> f2
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 2 | + |
| chr1 | 6 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1.nearest(f2)
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Start_b | End_b | Strand_b | Distance |
| (category) | (int32) | (int32) | (category) | (int32) | (int32) | (category) | (int64) |
|--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------|
| chr1 | 3 | 6 | + | 6 | 7 | - | 1 |
| chr1 | 8 | 9 | + | 6 | 7 | - | 2 |
| chr1 | 5 | 7 | - | 6 | 7 | - | 0 |
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 3 rows and 8 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> f1.nearest(f2, how="upstream")
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
| Chromosome | Start | End | Strand | Start_b | End_b | Strand_b | Distance |
| (category) | (int32) | (int32) | (category) | (int32) | (int32) | (category) | (int64) |
|--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------|
| chr1 | 3 | 6 | + | 1 | 2 | + | 2 |
| chr1 | 8 | 9 | + | 6 | 7 | - | 2 |
| chr1 | 5 | 7 | - | 6 | 7 | - | 0 |
+--------------+-----------+-----------+--------------+-----------+-----------+--------------+------------+
Stranded PyRanges object has 3 rows and 8 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
from pyranges.methods.nearest import _nearest
kwargs = {"strandedness": strandedness, "how": how, "overlap": overlap, "nb_cpu": nb_cpu, "suffix": suffix, "apply_strand_suffix": apply_strand_suffix}
kwargs = fill_kwargs(kwargs)
if kwargs.get("how") in "upstream downstream".split():
assert other.stranded, "If doing upstream or downstream nearest, other pyranges must be stranded"
dfs = pyrange_apply(_nearest, self, other, **kwargs)
gr = PyRanges(dfs)
if not self.stranded and other.stranded:
if apply_strand_suffix is None:
import sys
print("join: Strand data from other will be added as strand data to self.\nIf this is undesired use the flag apply_strand_suffix=False.\nTo turn off the warning set apply_strand_suffix to True or False.", file=sys.stderr)
elif apply_strand_suffix:
gr.columns = gr.columns.str.replace("Strand", "Strand" + kwargs["suffix"])
return gr
def new_position(self, new_pos, columns=None):
"""Give new position.
The operation join produces a PyRanges with two pairs of start coordinates and two pairs of
end coordinates. This operation uses these to give the PyRanges a new position.
Parameters
----------
new_pos : {"union", "intersection", "swap"}
Change of coordinates.
columns : tuple of str, default None, i.e. auto
The name of the coordinate columns. By default uses the two first columns containing
"Start" and the two first columns containing "End".
See Also
--------
PyRanges.join : combine two PyRanges horizontally with SQL-style joins.
Returns
-------
PyRanges
PyRanges with new coordinates.
Examples
--------
>>> gr = pr.from_dict({'Chromosome': ['chr1', 'chr1', 'chr1'],
... 'Start': [3, 8, 5], 'End': [6, 9, 7]})
>>> gr
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 3 | 6 |
| chr1 | 8 | 9 |
| chr1 | 5 | 7 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2 = pr.from_dict({'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [4, 7]})
>>> gr2
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 1 | 4 |
| chr1 | 6 | 7 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> j = gr.join(gr2)
>>> j
+--------------+-----------+-----------+-----------+-----------+
| Chromosome | Start | End | Start_b | End_b |
| (category) | (int32) | (int32) | (int32) | (int32) |
|--------------+-----------+-----------+-----------+-----------|
| chr1 | 3 | 6 | 1 | 4 |
| chr1 | 5 | 7 | 6 | 7 |
+--------------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> j.new_position("swap")
+--------------+-----------+-----------+-----------+-----------+
| Chromosome | Start | End | Start_b | End_b |
| (category) | (int32) | (int32) | (int32) | (int32) |
|--------------+-----------+-----------+-----------+-----------|
| chr1 | 1 | 4 | 3 | 6 |
| chr1 | 6 | 7 | 5 | 7 |
+--------------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> j.new_position("union").mp()
+--------------------+-----------+-----------+
| - Position - | Start_b | End_b |
| (Multiple types) | (int32) | (int32) |
|--------------------+-----------+-----------|
| chr1 1-6 | 1 | 4 |
| chr1 5-7 | 6 | 7 |
+--------------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> j.new_position("intersection").mp()
+--------------------+-----------+-----------+
| - Position - | Start_b | End_b |
| (Multiple types) | (int32) | (int32) |
|--------------------+-----------+-----------|
| chr1 1-4 | 1 | 4 |
| chr1 6-7 | 6 | 7 |
+--------------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> j2 = pr.from_dict({"Chromosome": [1], "Start": [3],
... "End": [4], "A": [1], "B": [3], "C": [2], "D": [5]})
>>> j2
+--------------+-----------+-----------+-----------+-----------+-----------+-----------+
| Chromosome | Start | End | A | B | C | D |
| (category) | (int32) | (int32) | (int64) | (int64) | (int64) | (int64) |
|--------------+-----------+-----------+-----------+-----------+-----------+-----------|
| 1 | 3 | 4 | 1 | 3 | 2 | 5 |
+--------------+-----------+-----------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 1 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> j2.new_position("intersection", ("A", "B", "C", "D"))
+--------------+-----------+-----------+-----------+-----------+-----------+-----------+
| Chromosome | Start | End | A | B | C | D |
| (category) | (int32) | (int32) | (int64) | (int64) | (int64) | (int64) |
|--------------+-----------+-----------+-----------+-----------+-----------+-----------|
| 1 | 2 | 3 | 1 | 3 | 2 | 5 |
+--------------+-----------+-----------+-----------+-----------+-----------+-----------+
Unstranded PyRanges object has 1 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
from pyranges.methods.new_position import _new_position
if self.empty:
return self
kwargs = {"strand": None}
kwargs["sparse"] = {"self": False}
kwargs["new_pos"] = new_pos
if columns is None:
start1, start2 = self.columns[self.columns.str.contains("Start")][:2]
end1, end2 = self.columns[self.columns.str.contains("End")][:2]
columns = (start1, end1, start2, end2)
kwargs["columns"] = columns
kwargs = fill_kwargs(kwargs)
dfs = pyrange_apply_single(_new_position, self, **kwargs)
return pr.PyRanges(dfs)
def overlap(self, other, strandedness=None, how="first", invert=False, nb_cpu=1):
"""Return overlapping intervals.
Returns the intervals in self which overlap with those in other.
Parameters
----------
other : PyRanges
PyRanges to find overlaps with.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
how : {"first", "containment", False, None}, default "first"
What intervals to report. By default reports every interval in self with overlap once.
"containment" reports all intervals where the overlapping is contained within it.
invert : bool, default False
Whether to return the intervals without overlaps.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
A PyRanges with overlapping intervals.
See also
--------
PyRanges.intersect : report overlapping subintervals
PyRanges.set_intersect : set-intersect PyRanges
Examples
--------
>>> gr = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [1, 4, 10],
... "End": [3, 9, 11], "ID": ["a", "b", "c"]})
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 3 | a |
| chr1 | 4 | 9 | b |
| chr1 | 10 | 11 | c |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2 = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [2, 2, 9], "End": [3, 9, 10]})
>>> gr2
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 2 | 3 |
| chr1 | 2 | 9 |
| chr1 | 9 | 10 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.overlap(gr2)
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 3 | a |
| chr1 | 4 | 9 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.overlap(gr2, how=None)
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 3 | a |
| chr1 | 1 | 3 | a |
| chr1 | 4 | 9 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.overlap(gr2, how="containment")
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 4 | 9 | b |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 1 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.overlap(gr2, invert=True)
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 10 | 11 | c |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 1 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
kwargs = {"strandedness": strandedness, "nb_cpu": nb_cpu}
kwargs["sparse"] = {"self": False, "other": True}
kwargs["how"] = how
kwargs["invert"] = invert
kwargs = fill_kwargs(kwargs)
if len(self) == 0:
return self
if invert:
self = self.copy()
self.__ix__ = np.arange(len(self))
dfs = pyrange_apply(_overlap, self, other, **kwargs)
result = pr.PyRanges(dfs)
if invert:
found_idxs = getattr(result, "__ix__", [])
result = self[~self.__ix__.isin(found_idxs)]
result = result.drop("__ix__")
return result
def pc(self, n=8, formatting=None):
"""Print and return self.
See Also
--------
PyRanges.print : print PyRanges."""
print(tostring(self, n=n, formatting=formatting))
return self
def print(self, n=8, merge_position=False, sort=False, formatting=None, chain=False):
"""Print the PyRanges.
Parameters
----------
n : int, default 8
The number of rows to print.
merge_postion : bool, default False
Print location in same column to save screen space.
sort : bool or str, default False
Sort the PyRanges before printing. Will print chromosomsomes or strands interleaved on
sort columns.
formatting : dict, default None
Formatting options per column.
chain : False
Return the PyRanges. Useful to print intermediate results in call chains.
See Also
--------
PyRanges.pc : print chain
PyRanges.sp : sort print
PyRanges.mp : merge print
PyRanges.spc : sort print chain
PyRanges.mpc : merge print chain
PyRanges.msp : merge sort print
PyRanges.mspc : merge sort print chain
PyRanges.rp : raw print dictionary of DataFrames
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1', 'chr1'], 'Start': [3, 8, 5000],
... 'End': [6, 9, 7000], 'Name': ['i1', 'i3', 'i2'],
... 'Score': [1.1, 2.3987, 5.9999995], 'Strand': ['+', '+', '-']}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+------------+-------------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (float64) | (category) |
|--------------+-----------+-----------+------------+-------------+--------------|
| chr1 | 3 | 6 | i1 | 1.1 | + |
| chr1 | 8 | 9 | i3 | 2.3987 | + |
| chr1 | 5000 | 7000 | i2 | 6 | - |
+--------------+-----------+-----------+------------+-------------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.print(formatting={"Start": "{:,}", "Score": "{:.2f}"})
+--------------+-----------+-----------+------------+-------------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (float64) | (category) |
|--------------+-----------+-----------+------------+-------------+--------------|
| chr1 | 3 | 6 | i1 | 1.1 | + |
| chr1 | 8 | 9 | i3 | 2.4 | + |
| chr1 | 5,000 | 7000 | i2 | 6 | - |
+--------------+-----------+-----------+------------+-------------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.print(merge_position=True) # gr.mp()
+--------------------+------------+-------------+
| - Position - | Name | Score |
| (Multiple types) | (object) | (float64) |
|--------------------+------------+-------------|
| chr1 3-6 + | i1 | 1.1 |
| chr1 8-9 + | i3 | 2.3987 |
| chr1 5000-7000 - | i2 | 6 |
+--------------------+------------+-------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> chipseq = pr.data.chipseq()
>>> chipseq
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 212609534 | 212609559 | U0 | 0 | + |
| chr1 | 169887529 | 169887554 | U0 | 0 | + |
| chr1 | 216711011 | 216711036 | U0 | 0 | + |
| chr1 | 144227079 | 144227104 | U0 | 0 | + |
| ... | ... | ... | ... | ... | ... |
| chrY | 15224235 | 15224260 | U0 | 0 | - |
| chrY | 13517892 | 13517917 | U0 | 0 | - |
| chrY | 8010951 | 8010976 | U0 | 0 | - |
| chrY | 7405376 | 7405401 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 10,000 rows and 6 columns from 24 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
To interleave strands in output, use print with `sort=True`:
>>> chipseq.print(sort=True, n=20) # chipseq.sp()
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 1325303 | 1325328 | U0 | 0 | - |
| chr1 | 1541598 | 1541623 | U0 | 0 | + |
| chr1 | 1599121 | 1599146 | U0 | 0 | + |
| chr1 | 1820285 | 1820310 | U0 | 0 | - |
| chr1 | 2448322 | 2448347 | U0 | 0 | - |
| chr1 | 3046141 | 3046166 | U0 | 0 | - |
| chr1 | 3437168 | 3437193 | U0 | 0 | - |
| chr1 | 3504032 | 3504057 | U0 | 0 | + |
| chr1 | 3637087 | 3637112 | U0 | 0 | - |
| chr1 | 3681903 | 3681928 | U0 | 0 | - |
| ... | ... | ... | ... | ... | ... |
| chrY | 15224235 | 15224260 | U0 | 0 | - |
| chrY | 15548022 | 15548047 | U0 | 0 | + |
| chrY | 16045242 | 16045267 | U0 | 0 | - |
| chrY | 16495497 | 16495522 | U0 | 0 | - |
| chrY | 21559181 | 21559206 | U0 | 0 | + |
| chrY | 21707662 | 21707687 | U0 | 0 | - |
| chrY | 21751211 | 21751236 | U0 | 0 | - |
| chrY | 21910706 | 21910731 | U0 | 0 | - |
| chrY | 22054002 | 22054027 | U0 | 0 | - |
| chrY | 22210637 | 22210662 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 10,000 rows and 6 columns from 24 chromosomes.
For printing, the PyRanges was sorted on Chromosome, Start, End and Strand.
>>> pr.data.chromsizes().print()
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 0 | 249250621 |
| chr2 | 0 | 243199373 |
| chr3 | 0 | 198022430 |
| chr4 | 0 | 191154276 |
| ... | ... | ... |
| chr22 | 0 | 51304566 |
| chrM | 0 | 16571 |
| chrX | 0 | 155270560 |
| chrY | 0 | 59373566 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 25 rows and 3 columns from 25 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
s = tostring(
self,
n=n,
merge_position=merge_position,
sort=sort,
formatting=formatting)
print(s)
if chain:
return self
def rp(self):
"""Print dict of DataFrames.
See Also
--------
PyRanges.print : print PyRanges."""
print(self.dfs)
def rpc(self):
"""Print dict of DataFrames and return self.
See Also
--------
PyRanges.print : print PyRanges."""
print(self.dfs)
return self
def sample(self, n=8, replace=False):
"""Subsample arbitrary rows of PyRanges.
If n is larger than length of PyRanges, replace must be True.
Parameters
----------
n : int, default 8
Number of rows to return
replace : bool, False
Reuse rows.
Examples
--------
>>> gr = pr.data.chipseq()
>>> np.random.seed(0)
>>> gr.sample(n=3)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr2 | 76564764 | 76564789 | U0 | 0 | + |
| chr3 | 185739979 | 185740004 | U0 | 0 | - |
| chr20 | 40373657 | 40373682 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.sample(10001)
Traceback (most recent call last):
...
ValueError: Cannot take a larger sample than population when 'replace=False'
"""
sample = np.random.choice(len(self), size=n, replace=False)
subsetter = np.zeros(len(self), dtype=np.bool)
subsetter[sample] = True
return self[subsetter]
def set_intersect(self, other, strandedness=None, how=None, new_pos=False, nb_cpu=1):
"""Return set-theoretical intersection.
Like intersect, but both PyRanges are merged first.
Parameters
----------
other : PyRanges
PyRanges to set-intersect.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
how : {None, "first", "last", "containment"}, default None, i.e. all
What intervals to report. By default reports all overlapping intervals. "containment"
reports intervals where the overlapping is contained within it.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
A PyRanges with overlapping subintervals.
See also
--------
PyRanges.intersect : find overlapping subintervals
PyRanges.overlap : report overlapping intervals
Examples
--------
>>> gr = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [1, 4, 10],
... "End": [3, 9, 11], "ID": ["a", "b", "c"]})
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 3 | a |
| chr1 | 4 | 9 | b |
| chr1 | 10 | 11 | c |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2 = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [2, 2, 9], "End": [3, 9, 10]})
>>> gr2
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 2 | 3 |
| chr1 | 2 | 9 |
| chr1 | 9 | 10 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.set_intersect(gr2)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 2 | 3 |
| chr1 | 4 | 9 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
In this simple unstranded case, this is the same as the below:
>>> gr.merge().intersect(gr2.merge())
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 2 | 3 |
| chr1 | 4 | 9 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.set_intersect(gr2, how="containment")
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 4 | 9 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 1 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
kwargs = {"strandedness": strandedness, "how": how, "nb_cpu": nb_cpu, "new_pos": new_pos}
kwargs = fill_kwargs(kwargs)
strand = True if strandedness else False
self_clusters = self.merge(strand=strand)
other_clusters = other.merge(strand=strand)
dfs = pyrange_apply(_intersection, self_clusters, other_clusters,
**kwargs)
return PyRanges(dfs)
def set_union(self, other, strandedness=None, nb_cpu=1):
"""Return set-theoretical union.
Parameters
----------
other : PyRanges
PyRanges to do union with.
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
A PyRanges with the union of intervals.
See also
--------
PyRanges.set_intersect : set-theoretical intersection
PyRanges.overlap : report overlapping intervals
Examples
--------
>>> gr = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [1, 4, 10],
... "End": [3, 9, 11], "ID": ["a", "b", "c"]})
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 3 | a |
| chr1 | 4 | 9 | b |
| chr1 | 10 | 11 | c |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2 = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [2, 2, 9], "End": [3, 9, 10]})
>>> gr2
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 2 | 3 |
| chr1 | 2 | 9 |
| chr1 | 9 | 10 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.set_union(gr2)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 1 | 11 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 1 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
if self.empty and other.empty:
return pr.PyRanges()
strand = True if strandedness else False
if not strand:
self = self.unstrand()
other = other.unstrand()
if strandedness == "opposite" and len(other):
other = other.copy()
other.Strand = other.Strand.replace({"+": "-", "-": "+"})
gr = pr.concat([self, other], strand)
gr = gr.merge(strand=strand)
return gr
def sort(self, by=None, nb_cpu=1):
"""Sort by position or columns.
Parameters
----------
by : str or list of str, default None
Columns to sort by. Default is Start and End.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Note
----
Since a PyRanges contains multiple DataFrames, the sorting only happens within dataframes.
Returns
-------
PyRanges
Sorted PyRanges
See Also
--------
pyranges.multioverlap : find overlaps with multiple PyRanges
Examples
--------
>>> gr = pr.data.f1()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 3 | 6 | interval1 | 0 | + |
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.split(between=True)
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Strand |
| (object) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 3 | 6 | + |
| chr1 | 6 | 8 | + |
| chr1 | 8 | 9 | + |
| chr1 | 5 | 7 | - |
+--------------+-----------+-----------+------------+
Stranded PyRanges object has 4 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.split(strand=False)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (object) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 3 | 5 |
| chr1 | 5 | 6 |
| chr1 | 6 | 7 |
| chr1 | 8 | 9 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 4 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.split(strand=False, between=True)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (object) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 3 | 5 |
| chr1 | 5 | 6 |
| chr1 | 6 | 7 |
| chr1 | 7 | 8 |
| chr1 | 8 | 9 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 5 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
from pyranges.methods.sort import _sort
kwargs = {"strand": self.stranded}
kwargs["sparse"] = {"self": False}
if by:
kwargs["by"] = by
kwargs = fill_kwargs(kwargs)
return PyRanges(
pyrange_apply_single(_sort, self, **kwargs))
def sp(self, n=30, formatting=None):
"""Sort on location and print.
See Also
--------
PyRanges.print : print PyRanges."""
print(tostring(self, n=n, sort=True, formatting=formatting))
def spc(self, n=30, formatting=None):
"""Sort on location, print and return self.
See Also
--------
PyRanges.print : print PyRanges."""
print(tostring(self, n=n, sort=True, formatting=formatting))
return self
def slack(self, slack):
""" Deprecated: this function has been moved to Pyranges.extend"""
return self.extend(slack)
def spliced_subsequence(self, start=0, end=None, by=None, strand=None, **kwargs):
""" Get subsequences of the intervals, using coordinates mapping to spliced transcripts (without introns)
The returned intervals are subregions of self, cut according to specifications.
Start and end are relative to the 5' end: 0 means the leftmost nucleotide for + strand
intervals, while it means the rightmost one for - strand.
This method also allows to manipulate groups of intervals (e.g. exons belonging to same transcripts)
through the 'by' argument. When using it, start and end refer to the spliced transcript coordinates,
meaning that introns are in the count.
Parameters
----------
start : int
Start of subregion, 0-based and included, counting from the 5' end.
Use a negative int to count from the 3' (e.g. -1 is the last nucleotide)
end : int, default None
End of subregion, 0-based and excluded, counting from the 5' end.
Use a negative int to count from the 3' (e.g. -1 is the last nucleotide)
If None, the existing 3' end is returned.
by : list of str, default None
intervals are grouped by this/these ID column(s) beforehand, e.g. exons belonging to same transcripts
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
Returns
-------
PyRanges
Subregion of self, subsequenced as specified by arguments
Note
----
If the request goes out of bounds (e.g. requesting 100 nts for a 90nt region), only the existing portion is returned
See also
--------
subsequence : analogous to this method, but input coordinates refer to the unspliced transcript
Examples
--------
>>> p = pr.from_dict({"Chromosome": [1, 1, 2, 2, 3],
... "Strand": ["+", "+", "-", "-", "+"],
... "Start": [1, 40, 10, 70, 140],
... "End": [11, 60, 25, 80, 152],
... "transcript_id":["t1", "t1", "t2", "t2", "t3"] })
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 1 | 11 | t1 |
| 1 | + | 40 | 60 | t1 |
| 2 | - | 10 | 25 | t2 |
| 2 | - | 70 | 80 | t2 |
| 3 | + | 140 | 152 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 5 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get the first 15 nucleotides of *each spliced transcript*, grouping exons by transcript_id:
>>> p.spliced_subsequence(0, 15, by='transcript_id')
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int64) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 1 | 11 | t1 |
| 1 | + | 40 | 45 | t1 |
| 2 | - | 20 | 25 | t2 |
| 2 | - | 70 | 80 | t2 |
| 3 | + | 140 | 152 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 5 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get the last 20 nucleotides of each spliced transcript:
>>> p.spliced_subsequence(-20, by='transcript_id')
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int64) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 40 | 60 | t1 |
| 2 | - | 10 | 25 | t2 |
| 2 | - | 70 | 75 | t2 |
| 3 | + | 140 | 155 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 4 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get region from 25 to 60 of each spliced transcript, or their existing subportion:
>>> p.spliced_subsequence(25, 60, by='transcript_id')
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 55 | 60 | t1 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 1 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get region of each spliced transcript which excludes their first and last 3 nucleotides:
>>> p.spliced_subsequence(3, -3, by='transcript_id')
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 1 | 11 | t1 |
| 1 | + | 40 | 60 | t1 |
| 2 | - | 10 | 25 | t2 |
| 2 | - | 70 | 80 | t2 |
| 3 | + | 140 | 155 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 5 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
from pyranges.methods.spliced_subsequence import _spliced_subseq
if strand is None:
strand=True if self.stranded else False
kwargs.update({"strand": strand, "by": by, "start": start, "end": end})
kwargs = fill_kwargs(kwargs)
self = self.sort()
result = pyrange_apply_single(_spliced_subseq, self, **kwargs)
return pr.PyRanges(result)
def spliced_subsequence(self, start=0, end=None, by=None, strand=None, **kwargs):
""" Get subsequences of the intervals, using coordinates mapping to spliced transcripts (without introns)
The returned intervals are subregions of self, cut according to specifications.
Start and end are relative to the 5' end: 0 means the leftmost nucleotide for + strand
intervals, while it means the rightmost one for - strand.
This method also allows to manipulate groups of intervals (e.g. exons belonging to same transcripts)
through the 'by' argument. When using it, start and end refer to the spliced transcript coordinates,
meaning that introns are in the count.
Parameters
----------
start : int
Start of subregion, 0-based and included, counting from the 5' end.
Use a negative int to count from the 3' (e.g. -1 is the last nucleotide)
end : int, default None
End of subregion, 0-based and excluded, counting from the 5' end.
If None, the existing 3' end is returned.
by : list of str, default None
intervals are grouped by this/these ID column(s) beforehand, e.g. exons belonging to same transcripts
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
Returns
-------
PyRanges
Subregion of self, subsequenced as specified by arguments
Note
----
If the request goes out of bounds (e.g. requesting 100 nts for a 90nt region), only the existing portion is returned
See also
--------
subsequence : analogous to this method, but input coordinates refer to the unspliced transcript
"""
from pyranges.methods.spliced_subsequence import _spliced_subseq
if strand is None:
strand=True if self.stranded else False
kwargs.update({"strand": strand, "by": by, "start": start, "end": end})
kwargs = fill_kwargs(kwargs)
self = self.sort()
result = pyrange_apply_single(_spliced_subseq, self, **kwargs)
return pr.PyRanges(result)
def split(self, strand=None, between=False, nb_cpu=1):
"""Split into non-overlapping intervals.
Parameters
----------
strand : bool, default None, i.e. auto
Whether to ignore strand information if PyRanges is stranded.
between : bool, default False
Include lengths between intervals.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
PyRanges
PyRanges with intervals split at overlap points.
See Also
--------
pyranges.multioverlap : find overlaps with multiple PyRanges
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1', 'chr1', 'chr1'], 'Start': [3, 5, 5, 11],
... 'End': [6, 9, 7, 12], 'Strand': ['+', '+', '-', '-']}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 3 | 6 | + |
| chr1 | 5 | 9 | + |
| chr1 | 5 | 7 | - |
| chr1 | 11 | 12 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 4 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.split()
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Strand |
| (object) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 3 | 5 | + |
| chr1 | 5 | 6 | + |
| chr1 | 6 | 9 | + |
| chr1 | 5 | 7 | - |
| chr1 | 11 | 12 | - |
+--------------+-----------+-----------+------------+
Stranded PyRanges object has 5 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.split(between=True)
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | Strand |
| (object) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 3 | 5 | + |
| chr1 | 5 | 6 | + |
| chr1 | 6 | 9 | + |
| chr1 | 5 | 7 | - |
| chr1 | 7 | 11 | - |
| chr1 | 11 | 12 | - |
+--------------+-----------+-----------+------------+
Stranded PyRanges object has 6 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.split(strand=False)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (object) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 3 | 5 |
| chr1 | 5 | 6 |
| chr1 | 6 | 7 |
| chr1 | 7 | 9 |
| chr1 | 11 | 12 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 5 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.split(strand=False, between=True)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (object) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 3 | 5 |
| chr1 | 5 | 6 |
| chr1 | 6 | 7 |
| chr1 | 7 | 9 |
| chr1 | 9 | 11 |
| chr1 | 11 | 12 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 6 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
if strand is None:
strand = self.stranded
kwargs = fill_kwargs({"strand": strand})
from pyranges.methods.split import _split
df = pyrange_apply_single(_split, self, **kwargs)
split = pr.PyRanges(df)
if not between:
strandedness = "same" if strand else False
split = split.overlap(self, strandedness=strandedness)
return split
@property
def stranded(self):
"""Whether PyRanges has (valid) strand info.
Note
----
A PyRanges can have invalid values in the Strand-column. It is not considered stranded.
See Also
--------
PyRanges.strands : return the strands
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [5, 8], 'Strand': ['+', '.']}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 5 | + |
| chr1 | 6 | 8 | . |
+--------------+-----------+-----------+--------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
Considered unstranded due to these Strand values: '.'
>>> gr.stranded
False
>>> "Strand" in gr.columns
True
"""
keys = self.keys()
if not len(keys):
# so that stranded ops work with empty dataframes
return True
key = keys[0]
return isinstance(key, tuple)
@property
def strands(self):
"""Return strands.
Notes
-----
If the strand-column contains an invalid value, [] is returned.
See Also
--------
PyRanges.stranded : whether has valid strand info
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [5, 8], 'Strand': ['+', '.']}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 5 | + |
| chr1 | 6 | 8 | . |
+--------------+-----------+-----------+--------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
Considered unstranded due to these Strand values: '.'
>>> gr.strands
[]
>>> gr.Strand.drop_duplicates().to_list()
['+', '.']
>>> gr.Strand = ["+", "-"]
>>> gr.strands
['+', '-']
"""
if not self.stranded:
return []
return natsorted(set([k[1] for k in self.keys()]))
def subset(self, f, strand=None, **kwargs):
"""Return a subset of the rows.
Parameters
----------
f : function
Function which returns boolean Series equal to length of df.
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
nb_cpu : int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
**kwargs
Additional keyword arguments to pass as keyword arguments to `f`
Notes
-----
PyRanges can also be subsetted directly with a boolean Series. This function is slightly
faster, but more cumbersome.
Returns
-------
PyRanges
PyRanges subset on rows.
Examples
--------
>>> gr = pr.data.f1()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 3 | 6 | interval1 | 0 | + |
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.subset(lambda df: df.Start > 4)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
Also possible:
>>> gr[gr.Start > 4]
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 8 | 9 | interval3 | 0 | + |
| chr1 | 5 | 7 | interval2 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 2 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
kwargs = fill_kwargs(kwargs)
if strand is None:
strand = self.stranded
if self.stranded and not strand:
self = self.unstrand()
kwargs.update({"strand": strand})
result = pyrange_apply_single(f, self, **kwargs)
if not result:
return pr.PyRanges()
first_result = next(iter(result.values()))
assert first_result.dtype == bool, "result of subset function must be bool, but is {}".format(
first_result.dtype)
return self[result]
def subsequence(self, start=0, end=None, by=None, strand=None, **kwargs):
""" Get subsequences of the intervals.
The returned intervals are subregions of self, cut according to specifications.
Start and end are relative to the 5' end: 0 means the leftmost nucleotide for + strand
intervals, while it means the rightmost one for - strand.
This method also allows to manipulate groups of intervals (e.g. exons belonging to same transcripts)
through the 'by' argument. When using it, start and end refer to the unspliced transcript coordinates,
meaning that introns are included in the count.
Parameters
----------
start : int
Start of subregion, 0-based and included, counting from the 5' end.
Use a negative int to count from the 3' (e.g. -1 is the last nucleotide)
end : int, default None
End of subregion, 0-based and excluded, counting from the 5' end.
Use a negative int to count from the 3' (e.g. -1 is the last nucleotide)
If None, the existing 3' end is returned.
by : list of str, default None
intervals are grouped by this/these ID column(s) beforehand, e.g. exons belonging to same transcripts
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
Returns
-------
PyRanges
Subregion of self, subsequenced as specified by arguments
Note
----
If the request goes out of bounds (e.g. requesting 100 nts for a 90nt region), only the existing portion is returned
See also
--------
spliced_subsequence : analogous to this method, but intronic regions are not counted, so that input coordinates refer to the spliced transcript
Examples
--------
>>> p = pr.from_dict({"Chromosome": [1, 1, 2, 2, 3],
... "Strand": ["+", "+", "-", "-", "+"],
... "Start": [1, 40, 2, 30, 140],
... "End": [20, 60, 13, 45, 155],
... "transcript_id":["t1", "t1", "t2", "t2", "t3"] })
>>> p
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 1 | 20 | t1 |
| 1 | + | 40 | 60 | t1 |
| 2 | - | 2 | 13 | t2 |
| 2 | - | 30 | 45 | t2 |
| 3 | + | 140 | 155 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 5 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get the first 10 nucleotides (at the 5') of *each interval* (each line of the dataframe):
>>> p.subsequence(0, 10)
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 1 | 20 | t1 |
| 1 | + | 40 | 60 | t1 |
| 2 | - | 2 | 13 | t2 |
| 2 | - | 30 | 45 | t2 |
| 3 | + | 140 | 155 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 5 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get the first 10 nucleotides of *each transcript*, grouping exons by transcript_id:
>>> p.subsequence(0, 10, by='transcript_id')
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 1 | 11 | t1 |
| 2 | - | 35 | 45 | t2 |
| 3 | + | 140 | 150 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 3 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get the last 20 nucleotides of each transcript:
>>> p.subsequence(-20, by='transcript_id')
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 40 | 60 | t1 |
| 2 | - | 30 | 39 | t2 |
| 2 | - | 2 | 13 | t2 |
| 3 | + | 140 | 150 | t3 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 4 rows and 5 columns from 3 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
# Get region from 30 to 330 of each transcript, or their existing subportion:
>>> p.subsequence(30, 300, by='transcript_id')
+--------------+--------------+-----------+-----------+-----------------+
| Chromosome | Strand | Start | End | transcript_id |
| (category) | (category) | (int32) | (int32) | (object) |
|--------------+--------------+-----------+-----------+-----------------|
| 1 | + | 51 | 60 | t1 |
+--------------+--------------+-----------+-----------+-----------------+
Stranded PyRanges object has 1 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
from pyranges.methods.subsequence import _subseq
kwargs.update({"strand": strand, "by": by, "start": start, "end": end})
kwargs = fill_kwargs(kwargs)
self = self.sort()
result = pyrange_apply_single(_subseq, self, **kwargs)
return pr.PyRanges(result)
def subtract(self, other, strandedness=None, nb_cpu=1):
"""Subtract intervals.
Parameters
----------
strandedness : {None, "same", "opposite", False}, default None, i.e. auto
Whether to compare PyRanges on the same strand, the opposite or ignore strand
information. The default, None, means use "same" if both PyRanges are strande,
otherwise ignore the strand information.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
See Also
--------
pyranges.PyRanges.overlap : use with invert=True to return all intervals without overlap
Examples
--------
>>> gr = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [1, 4, 10],
... "End": [3, 9, 11], "ID": ["a", "b", "c"]})
>>> gr2 = pr.from_dict({"Chromosome": ["chr1"] * 3, "Start": [2, 2, 9], "End": [3, 9, 10]})
>>> gr
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 3 | a |
| chr1 | 4 | 9 | b |
| chr1 | 10 | 11 | c |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 3 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr2
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 2 | 3 |
| chr1 | 2 | 9 |
| chr1 | 9 | 10 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 3 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.subtract(gr2)
+--------------+-----------+-----------+------------+
| Chromosome | Start | End | ID |
| (category) | (int32) | (int32) | (object) |
|--------------+-----------+-----------+------------|
| chr1 | 1 | 2 | a |
| chr1 | 10 | 11 | c |
+--------------+-----------+-----------+------------+
Unstranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
from pyranges.methods.subtraction import _subtraction
kwargs = {"strandedness": strandedness}
kwargs["sparse"] = {"self": False, "other": True}
kwargs = fill_kwargs(kwargs)
strand = True if strandedness else False
other_clusters = other.merge(strand=strand)
self = self.count_overlaps(other_clusters, strandedness=strandedness, overlap_col="__num__")
result = pyrange_apply(_subtraction, self, other_clusters, **kwargs)
self = self.drop("__num__")
return PyRanges(result).drop("__num__")
def summary(self, to_stdout=True, return_df=False):
"""Return info.
Count refers to the number of intervals, the rest to the lengths.
The column "pyrange" describes the data as is. "coverage_forward" and "coverage_reverse"
describe the data after strand-specific merging of overlapping intervals.
"coverage_unstranded" describes the data after merging, without considering the strands.
The row "count" is the number of intervals and "sum" is their total length. The rest describe the lengths of the
intervals.
Parameters
----------
to_stdout : bool, default True
Print summary.
return_df : bool, default False
Return df with summary.
Returns
-------
None or DataFrame with summary.
Examples
--------
>>> gr = pr.data.ensembl_gtf()[["Feature", "gene_id"]]
>>> gr
+--------------+--------------+-----------+-----------+--------------+-----------------+
| Chromosome | Feature | Start | End | Strand | gene_id |
| (category) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+--------------+-----------+-----------+--------------+-----------------|
| 1 | gene | 11868 | 14409 | + | ENSG00000223972 |
| 1 | transcript | 11868 | 14409 | + | ENSG00000223972 |
| 1 | exon | 11868 | 12227 | + | ENSG00000223972 |
| 1 | exon | 12612 | 12721 | + | ENSG00000223972 |
| ... | ... | ... | ... | ... | ... |
| 1 | gene | 1173055 | 1179555 | - | ENSG00000205231 |
| 1 | transcript | 1173055 | 1179555 | - | ENSG00000205231 |
| 1 | exon | 1179364 | 1179555 | - | ENSG00000205231 |
| 1 | exon | 1173055 | 1176396 | - | ENSG00000205231 |
+--------------+--------------+-----------+-----------+--------------+-----------------+
Stranded PyRanges object has 2,446 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.summary()
+-------+------------------+--------------------+--------------------+-----------------------+
| | pyrange | coverage_forward | coverage_reverse | coverage_unstranded |
|-------+------------------+--------------------+--------------------+-----------------------|
| count | 2446 | 39 | 23 | 32 |
| mean | 2291.92 | 7058.1 | 30078.6 | 27704.2 |
| std | 11906.9 | 10322.3 | 59467.7 | 67026.9 |
| min | 1 | 83 | 154 | 83 |
| 25% | 90 | 1051 | 1204 | 1155 |
| 50% | 138 | 2541 | 6500 | 6343 |
| 75% | 382.25 | 7168 | 23778 | 20650.8 |
| max | 241726 | 43065 | 241726 | 291164 |
| sum | 5.60603e+06 | 275266 | 691807 | 886534 |
+-------+------------------+--------------------+--------------------+-----------------------+
>>> gr.summary(return_df=True, to_stdout=False)
pyrange coverage_forward coverage_reverse coverage_unstranded
count 2.446000e+03 39.000000 23.000000 32.000000
mean 2.291918e+03 7058.102564 30078.565217 27704.187500
std 1.190685e+04 10322.309347 59467.695265 67026.868647
min 1.000000e+00 83.000000 154.000000 83.000000
25% 9.000000e+01 1051.000000 1204.000000 1155.000000
50% 1.380000e+02 2541.000000 6500.000000 6343.000000
75% 3.822500e+02 7168.000000 23778.000000 20650.750000
max 2.417260e+05 43065.000000 241726.000000 291164.000000
sum 5.606031e+06 275266.000000 691807.000000 886534.000000
"""
from pyranges.methods.summary import _summary
return _summary(self, to_stdout, return_df)
def tail(self, n=8):
"""Return the n last rows.
Parameters
----------
n : int, default 8
Return n rows.
Returns
-------
PyRanges
PyRanges with the n last rows.
See Also
--------
PyRanges.head : return the first rows
PyRanges.sample : return random rows
Examples
--------
>>> gr = pr.data.chipseq()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 212609534 | 212609559 | U0 | 0 | + |
| chr1 | 169887529 | 169887554 | U0 | 0 | + |
| chr1 | 216711011 | 216711036 | U0 | 0 | + |
| chr1 | 144227079 | 144227104 | U0 | 0 | + |
| ... | ... | ... | ... | ... | ... |
| chrY | 15224235 | 15224260 | U0 | 0 | - |
| chrY | 13517892 | 13517917 | U0 | 0 | - |
| chrY | 8010951 | 8010976 | U0 | 0 | - |
| chrY | 7405376 | 7405401 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 10,000 rows and 6 columns from 24 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.tail(3)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chrY | 13517892 | 13517917 | U0 | 0 | - |
| chrY | 8010951 | 8010976 | U0 | 0 | - |
| chrY | 7405376 | 7405401 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 3 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
subsetter = np.zeros(len(self), dtype=np.bool)
subsetter[(len(self) - n):] = True
return self[subsetter]
def tile(self, tile_size, overlap=False, strand=None, nb_cpu=1):
"""Return overlapping genomic tiles.
The genome is divided into bookended tiles of length `tile_size` and one is returned per
overlapping interval.
Parameters
----------
tile_size : int
Length of the tiles.
overlap : bool, default False
Add column of nucleotide overlap to each tile.
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
**kwargs
Additional keyword arguments to pass as keyword arguments to `f`
Returns
-------
PyRanges
Tiled PyRanges.
See also
--------
pyranges.PyRanges.window : divide intervals into windows
Examples
--------
>>> gr = pr.data.ensembl_gtf()[["Feature", "gene_name"]]
>>> gr
+--------------+--------------+-----------+-----------+--------------+-------------+
| Chromosome | Feature | Start | End | Strand | gene_name |
| (category) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+--------------+-----------+-----------+--------------+-------------|
| 1 | gene | 11868 | 14409 | + | DDX11L1 |
| 1 | transcript | 11868 | 14409 | + | DDX11L1 |
| 1 | exon | 11868 | 12227 | + | DDX11L1 |
| 1 | exon | 12612 | 12721 | + | DDX11L1 |
| ... | ... | ... | ... | ... | ... |
| 1 | gene | 1173055 | 1179555 | - | TTLL10-AS1 |
| 1 | transcript | 1173055 | 1179555 | - | TTLL10-AS1 |
| 1 | exon | 1179364 | 1179555 | - | TTLL10-AS1 |
| 1 | exon | 1173055 | 1176396 | - | TTLL10-AS1 |
+--------------+--------------+-----------+-----------+--------------+-------------+
Stranded PyRanges object has 2,446 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.tile(200)
+--------------+--------------+-----------+-----------+--------------+-------------+
| Chromosome | Feature | Start | End | Strand | gene_name |
| (category) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+--------------+-----------+-----------+--------------+-------------|
| 1 | gene | 11800 | 12000 | + | DDX11L1 |
| 1 | gene | 12000 | 12200 | + | DDX11L1 |
| 1 | gene | 12200 | 12400 | + | DDX11L1 |
| 1 | gene | 12400 | 12600 | + | DDX11L1 |
| ... | ... | ... | ... | ... | ... |
| 1 | exon | 1175600 | 1175800 | - | TTLL10-AS1 |
| 1 | exon | 1175800 | 1176000 | - | TTLL10-AS1 |
| 1 | exon | 1176000 | 1176200 | - | TTLL10-AS1 |
| 1 | exon | 1176200 | 1176400 | - | TTLL10-AS1 |
+--------------+--------------+-----------+-----------+--------------+-------------+
Stranded PyRanges object has 30,538 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.tile(100, overlap=True)
+--------------+--------------+-----------+-----------+--------------+-------------+---------------+
| Chromosome | Feature | Start | End | Strand | gene_name | TileOverlap |
| (category) | (category) | (int32) | (int32) | (category) | (object) | (int32) |
|--------------+--------------+-----------+-----------+--------------+-------------+---------------|
| 1 | gene | 11800 | 11900 | + | DDX11L1 | 32 |
| 1 | gene | 11900 | 12000 | + | DDX11L1 | 100 |
| 1 | gene | 12000 | 12100 | + | DDX11L1 | 100 |
| 1 | gene | 12100 | 12200 | + | DDX11L1 | 100 |
| ... | ... | ... | ... | ... | ... | ... |
| 1 | exon | 1176000 | 1176100 | - | TTLL10-AS1 | 100 |
| 1 | exon | 1176100 | 1176200 | - | TTLL10-AS1 | 100 |
| 1 | exon | 1176200 | 1176300 | - | TTLL10-AS1 | 100 |
| 1 | exon | 1176300 | 1176400 | - | TTLL10-AS1 | 96 |
+--------------+--------------+-----------+-----------+--------------+-------------+---------------+
Stranded PyRanges object has 58,516 rows and 7 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
from pyranges.methods.windows import _tiles
if strand is None:
strand = self.stranded
kwargs = {"strand": strand, "overlap": overlap}
kwargs["sparse"] = {"self": False}
kwargs["tile_size"] = tile_size
df = pyrange_apply_single(_tiles, self, **kwargs)
return PyRanges(df)
def to_example(self, n=10):
"""Return as dict.
Used for easily creating examples for copy and pasting.
Parameters
----------
n : int, default 10
Number of rows. Half is taken from the start, the other half from the end.
See Also
--------
PyRanges.from_dict : create PyRanges from dict
Examples
--------
>>> gr = pr.data.chipseq()
>>> gr
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 212609534 | 212609559 | U0 | 0 | + |
| chr1 | 169887529 | 169887554 | U0 | 0 | + |
| chr1 | 216711011 | 216711036 | U0 | 0 | + |
| chr1 | 144227079 | 144227104 | U0 | 0 | + |
| ... | ... | ... | ... | ... | ... |
| chrY | 15224235 | 15224260 | U0 | 0 | - |
| chrY | 13517892 | 13517917 | U0 | 0 | - |
| chrY | 8010951 | 8010976 | U0 | 0 | - |
| chrY | 7405376 | 7405401 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 10,000 rows and 6 columns from 24 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> d = gr.to_example(n=4)
>>> d
{'Chromosome': ['chr1', 'chr1', 'chrY', 'chrY'], 'Start': [212609534, 169887529, 8010951, 7405376], 'End': [212609559, 169887554, 8010976, 7405401], 'Name': ['U0', 'U0', 'U0', 'U0'], 'Score': [0, 0, 0, 0], 'Strand': ['+', '+', '-', '-']}
>>> pr.from_dict(d)
+--------------+-----------+-----------+------------+-----------+--------------+
| Chromosome | Start | End | Name | Score | Strand |
| (category) | (int32) | (int32) | (object) | (int64) | (category) |
|--------------+-----------+-----------+------------+-----------+--------------|
| chr1 | 212609534 | 212609559 | U0 | 0 | + |
| chr1 | 169887529 | 169887554 | U0 | 0 | + |
| chrY | 8010951 | 8010976 | U0 | 0 | - |
| chrY | 7405376 | 7405401 | U0 | 0 | - |
+--------------+-----------+-----------+------------+-----------+--------------+
Stranded PyRanges object has 4 rows and 6 columns from 2 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
nrows_half = int(min(n, len(self))/2)
if n < len(self):
first = self.head(nrows_half)
last = self.tail(nrows_half)
example = pr.concat([first, last])
else:
example = self
d = {c: list(getattr(example, c)) for c in example.columns}
return d
def three_end(self):
"""Return the 3'-end.
The 3'-end is the start of intervals on the reverse strand and the end of intervals on the
forward strand.
Returns
-------
PyRanges
PyRanges with the 3'.
See Also
--------
PyRanges.five_end : return the five prime end
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [5, 8], 'Strand': ['+', '-']}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 5 | + |
| chr1 | 6 | 8 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.three_end()
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 5 | 6 | + |
| chr1 | 6 | 7 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
assert self.stranded, "Need stranded pyrange to find 3'."
kwargs = fill_kwargs({"strand": True})
return PyRanges(
pyrange_apply_single(_tes, self, **kwargs))
# def to_bam(self, path=None, header=None, chromosome_sizes=None, chain=False):
# r"""Write to bam.
# Parameters
# ----------
# path : str, default None
# Where to write. If None, returns string representation.
# keep : bool, default True
# Whether to keep all columns, not just Chromosome, Start, End,
# Name, Score, Strand when writing.
# compression : str, compression type to use, by default infer based on extension.
# See pandas.DataFree.to_csv for more info.
# header : dict
# Header to use in the bamfile. See the pysam docs for how it should look.
# Or use the header attribute from another pyasam.AlignmentFile.
# chromosome_sizes : PyRanges or dict
# If dict: map of chromosome names to chromosome length.
# chain : bool, default False
# Whether to return the PyRanges after writing.
# Note
# ----
# The following pyranges columns are used when writing:
# Chromosome, Start, End, Strand, MapQ, Flag, QueryStart, QueryEnd, Name, Cigar, Quality
# Examples
# --------
# >>> header = {"SQ": [{"SN": 1, "LN": 249250621}]}
# >>> c = '''Name Flag Chromosome Start End MapQ Cigar QuerySequence Quality
# read1 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:65536 ZL:i:25
# read2 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:214748 ZL:i:25
# read3 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:2147484 ZL:i:25
# read4 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:2147483647 ZL:i:25
# read5 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:-65536 ZL:i:25
# read6 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:-214748 ZL:i:25
# read7 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:-2147484 ZL:i:25
# read8 115 1 142618765 142618790 255 25M CGACCCACTCCGCCATTTTCATCCG IIGIIIHIGIIFIIIIIIIGIGIII NM:i:0ZP:i:-2147483647 ZL:i:25'''
# >>>
# """
def to_bed(self, path=None, keep=True, compression="infer", chain=False):
r"""Write to bed.
Parameters
----------
path : str, default None
Where to write. If None, returns string representation.
keep : bool, default True
Whether to keep all columns, not just Chromosome, Start, End,
Name, Score, Strand when writing.
compression : str, compression type to use, by default infer based on extension.
See pandas.DataFree.to_csv for more info.
chain : bool, default False
Whether to return the PyRanges after writing.
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [5, 8], 'Strand': ['+', '-'], "Gene": [1, 2]}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+-----------+
| Chromosome | Start | End | Strand | Gene |
| (category) | (int32) | (int32) | (category) | (int64) |
|--------------+-----------+-----------+--------------+-----------|
| chr1 | 1 | 5 | + | 1 |
| chr1 | 6 | 8 | - | 2 |
+--------------+-----------+-----------+--------------+-----------+
Stranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> print(gr.to_bed())
chr1 1 5 . . + 1
chr1 6 8 . . - 2
<BLANKLINE>
Does not include noncanonical bed-column `Gene`:
>>> print(gr.to_bed(keep=False))
chr1 1 5 . . +
chr1 6 8 . . -
<BLANKLINE>
>>> gr.to_bed("test.bed", chain=True)
+--------------+-----------+-----------+--------------+-----------+
| Chromosome | Start | End | Strand | Gene |
| (category) | (int32) | (int32) | (category) | (int64) |
|--------------+-----------+-----------+--------------+-----------|
| chr1 | 1 | 5 | + | 1 |
| chr1 | 6 | 8 | - | 2 |
+--------------+-----------+-----------+--------------+-----------+
Stranded PyRanges object has 2 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> open("test.bed").readlines()
['chr1\t1\t5\t.\t.\t+\t1\n', 'chr1\t6\t8\t.\t.\t-\t2\n']
"""
from pyranges.out import _to_bed
result = _to_bed(self, path, keep=keep, compression=compression)
if path and chain:
return self
else:
return result
def to_bigwig(self, path=None, chromosome_sizes=None, rpm=True, divide=None, value_col=None, dryrun=False, chain=False):
"""Write regular or value coverage to bigwig.
Note
----
To create one bigwig per strand, subset the PyRanges first.
Parameters
----------
path : str
Where to write bigwig.
chromosome_sizes : PyRanges or dict
If dict: map of chromosome names to chromosome length.
rpm : True
Whether to normalize data by dividing by total number of intervals and multiplying by
1e6.
divide : bool, default False
(Only useful with value_col) Divide value coverage by regular coverage and take log2.
value_col : str, default None
Name of column to compute coverage of.
dryrun : bool, default False
Return data that would be written without writing bigwigs.
chain : bool, default False
Whether to return the PyRanges after writing.
Note
----
Requires pybigwig to be installed.
If you require more control over the normalization process, use pyranges.to_bigwig()
See Also
--------
pyranges.to_bigwig : write pandas DataFrame to bigwig.
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1', 'chr1'], 'Start': [1, 4, 6],
... 'End': [7, 8, 10], 'Strand': ['+', '-', '-'],
... 'Value': [10, 20, 30]}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+-----------+
| Chromosome | Start | End | Strand | Value |
| (category) | (int32) | (int32) | (category) | (int64) |
|--------------+-----------+-----------+--------------+-----------|
| chr1 | 1 | 7 | + | 10 |
| chr1 | 4 | 8 | - | 20 |
| chr1 | 6 | 10 | - | 30 |
+--------------+-----------+-----------+--------------+-----------+
Stranded PyRanges object has 3 rows and 5 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.to_bigwig(dryrun=True, rpm=False)
+--------------+-----------+-----------+-------------+
| Chromosome | Start | End | Score |
| (category) | (int32) | (int32) | (float64) |
|--------------+-----------+-----------+-------------|
| chr1 | 1 | 4 | 1 |
| chr1 | 4 | 6 | 2 |
| chr1 | 6 | 7 | 3 |
| chr1 | 7 | 8 | 2 |
| chr1 | 8 | 10 | 1 |
+--------------+-----------+-----------+-------------+
Unstranded PyRanges object has 5 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.to_bigwig(dryrun=True, rpm=False, value_col="Value")
+--------------+-----------+-----------+-------------+
| Chromosome | Start | End | Score |
| (category) | (int32) | (int32) | (float64) |
|--------------+-----------+-----------+-------------|
| chr1 | 1 | 4 | 10 |
| chr1 | 4 | 6 | 30 |
| chr1 | 6 | 7 | 60 |
| chr1 | 7 | 8 | 50 |
| chr1 | 8 | 10 | 30 |
+--------------+-----------+-----------+-------------+
Unstranded PyRanges object has 5 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.to_bigwig(dryrun=True, rpm=False, value_col="Value", divide=True)
+--------------+-----------+-----------+-------------+
| Chromosome | Start | End | Score |
| (category) | (int32) | (int32) | (float64) |
|--------------+-----------+-----------+-------------|
| chr1 | 0 | 1 | nan |
| chr1 | 1 | 4 | 3.32193 |
| chr1 | 4 | 6 | 3.90689 |
| chr1 | 6 | 7 | 4.32193 |
| chr1 | 7 | 8 | 4.64386 |
| chr1 | 8 | 10 | 4.90689 |
+--------------+-----------+-----------+-------------+
Unstranded PyRanges object has 6 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
from pyranges.out import _to_bigwig
if chromosome_sizes is None:
chromosome_sizes = pr.data.chromsizes()
result = _to_bigwig(self, path, chromosome_sizes, rpm, divide, value_col, dryrun)
if dryrun:
return result
if chain:
return self
else:
pass
def to_csv(self, path=None, sep=",", header=True, compression="infer", chain=False):
r"""Write to comma- or other value-separated file.
Parameters
----------
path : str, default None, i.e. return string representation.
Where to write file.
sep : str, default ","
String of length 1. Field delimiter for the output file.
header : bool, default True
Write out the column names.
compression : {‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}, default "infer"
Which compression to use. Uses file extension to infer by default.
chain: bool, default False
Whether to return the PyRanges after writing.
Examples
--------
>>> d = {"Chromosome": [1] * 3, "Start": [1, 3, 5], "End": [4, 6, 9], "Feature": ["gene", "exon", "exon"]}
>>> gr = pr.from_dict(d)
>>> print(gr.to_csv(sep="\t"))
Chromosome Start End Feature
1 1 4 gene
1 3 6 exon
1 5 9 exon
<BLANKLINE>
"""
from pyranges.out import _to_csv
result = _to_csv(
self, path, sep=sep, header=header, compression=compression)
if path and chain:
return self
else:
return result
def to_gff3(self, path=None, compression="infer", chain=False):
"""Write to General Feature Format.
Parameters
----------
path : str, default None, i.e. return string representation.
Where to write file.
compression : {‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}, default "infer"
Which compression to use. Uses file extension to infer by default.
chain: bool, default False
Whether to return the PyRanges after writing.
Notes
-----
GTF uses a different naming-convention for columns than PyRanges.
This is the mapping between column names:
``{"seqname": "Chromosome", "source": "Source", "type": "Feature", "start": "Start", "end": "End", "score": "Score", "strand": "Strand", "phase": "Frame", "attributes": "Attribute"}``
All other columns are appended as a field in the attribute string.
Nonexisting columns will be added with a '.' to represent the missing values.
See Also
--------
pyranges.read_gff3 : read GFF3 files
pyranges.to_gtf : write to GTF format
Examples
--------
>>> d = {"Chromosome": [1] * 3, "Start": [1, 3, 5], "End": [4, 6, 9], "Feature": ["gene", "exon", "exon"]}
>>> gr = pr.from_dict(d)
>>> print(gr.to_gff3())
1 . gene 2 4 . . .
1 . exon 4 6 . . .
1 . exon 6 9 . . .
<BLANKLINE>
>>> gr.Gene = [1, 2, 3]
>>> gr.function = ["a b", "c", "def"]
>>> print(gr.to_gff3())
1 . gene 2 4 . . . Gene=1;function=a b
1 . exon 4 6 . . . Gene=2;function=c
1 . exon 6 9 . . . Gene=3;function=def
<BLANKLINE>
"""
from pyranges.out import _to_gff3
result = _to_gff3(self, path, compression=compression)
if path and chain:
return self
else:
return result
def to_gtf(self, path=None, compression="infer", chain=False):
"""Write to Gene Transfer Format.
Parameters
----------
path : str, default None, i.e. return string representation.
Where to write file.
compression : {‘infer’, ‘gzip’, ‘bz2’, ‘zip’, ‘xz’, None}, default "infer"
Which compression to use. Uses file extension to infer by default.
chain: bool, default False
Whether to return the PyRanges after writing.
Notes
-----
GTF uses a different naming-convention for columns than PyRanges.
This is the mapping between column names:
``{"seqname": "Chromosome", "source": "Source", "feature": "Feature", "start": "Start", "end": "End", "score": "Score", "strand": "Strand", "frame": "Frame", "attribute": "Attribute"}``
All other columns are appended as a field in the attribute string.
Nonexisting columns will be added with a '.' to represent the missing values.
See Also
--------
pyranges.read_gtf : read GTF files
pyranges.to_gff3 : write to GFF3 format
Examples
--------
>>> d = {"Chromosome": [1] * 3, "Start": [1, 3, 5], "End": [4, 6, 9], "Feature": ["gene", "exon", "exon"]}
>>> gr = pr.from_dict(d)
>>> print(gr.to_gtf())
1 . gene 2 4 . . .
1 . exon 4 6 . . .
1 . exon 6 9 . . .
<BLANKLINE>
>>> gr.name = ["Tim", "Eric", "Endre"]
>>> gr.prices = ["Cheap", "Premium", "Fine European"]
>>> print(gr.to_gtf())
1 . gene 2 4 . . . name "Tim"; prices "Cheap";
1 . exon 4 6 . . . name "Eric"; prices "Premium";
1 . exon 6 9 . . . name "Endre"; prices "Fine European";
<BLANKLINE>
"""
from pyranges.out import _to_gtf
result = _to_gtf(self, path, compression=compression)
if path and chain:
return self
else:
return result
def to_rle(self, value_col=None, strand=None, rpm=False, nb_cpu=1):
"""Return as RleDict.
Create collection of Rles representing the coverage or other numerical value.
Parameters
----------
value_col : str, default None
Numerical column to create RleDict from.
strand : bool, default None, i.e. auto
Whether to treat strands serparately.
rpm : bool, default False
Normalize by multiplying with `1e6/(number_intervals)`.
nb_cpu : int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
Returns
-------
pyrle.RleDict
Rle with coverage or other info from the PyRanges.
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1', 'chr1'], 'Start': [3, 8, 5],
... 'End': [6, 9, 7], 'Score': [0.1, 5, 3.14], 'Strand': ['+', '+', '-']}
>>> gr = pr.from_dict(d)
>>> gr.to_rle()
chr1 +
--
+--------+-----+-----+-----+-----+
| Runs | 3 | 3 | 2 | 1 |
|--------+-----+-----+-----+-----|
| Values | 0.0 | 1.0 | 0.0 | 1.0 |
+--------+-----+-----+-----+-----+
Rle of length 9 containing 4 elements (avg. length 2.25)
<BLANKLINE>
chr1 -
--
+--------+-----+-----+
| Runs | 5 | 2 |
|--------+-----+-----|
| Values | 0.0 | 1.0 |
+--------+-----+-----+
Rle of length 7 containing 2 elements (avg. length 3.5)
RleDict object with 2 chromosomes/strand pairs.
>>> gr.to_rle(value_col="Score")
chr1 +
--
+--------+-----+-----+-----+-----+
| Runs | 3 | 3 | 2 | 1 |
|--------+-----+-----+-----+-----|
| Values | 0.0 | 0.1 | 0.0 | 5.0 |
+--------+-----+-----+-----+-----+
Rle of length 9 containing 4 elements (avg. length 2.25)
<BLANKLINE>
chr1 -
--
+--------+-----+------+
| Runs | 5 | 2 |
|--------+-----+------|
| Values | 0.0 | 3.14 |
+--------+-----+------+
Rle of length 7 containing 2 elements (avg. length 3.5)
RleDict object with 2 chromosomes/strand pairs.
>>> gr.to_rle(value_col="Score", strand=False)
chr1
+--------+-----+-----+------+------+-----+-----+
| Runs | 3 | 2 | 1 | 1 | 1 | 1 |
|--------+-----+-----+------+------+-----+-----|
| Values | 0.0 | 0.1 | 3.24 | 3.14 | 0.0 | 5.0 |
+--------+-----+-----+------+------+-----+-----+
Rle of length 9 containing 6 elements (avg. length 1.5)
Unstranded RleDict object with 1 chromosome.
>>> gr.to_rle(rpm=True)
chr1 +
--
+--------+-----+-------------------+-----+-------------------+
| Runs | 3 | 3 | 2 | 1 |
|--------+-----+-------------------+-----+-------------------|
| Values | 0.0 | 333333.3333333333 | 0.0 | 333333.3333333333 |
+--------+-----+-------------------+-----+-------------------+
Rle of length 9 containing 4 elements (avg. length 2.25)
<BLANKLINE>
chr1 -
--
+--------+-----+-------------------+
| Runs | 5 | 2 |
|--------+-----+-------------------|
| Values | 0.0 | 333333.3333333333 |
+--------+-----+-------------------+
Rle of length 7 containing 2 elements (avg. length 3.5)
RleDict object with 2 chromosomes/strand pairs.
"""
if strand is None:
strand = self.stranded
from pyranges.methods.to_rle import _to_rle
return _to_rle(self, value_col, strand=strand, rpm=rpm, nb_cpu=nb_cpu)
def unstrand(self):
"""Remove strand.
Note
----
Removes Strand column even if PyRanges is not stranded.
See Also
--------
PyRanges.stranded : whether PyRanges contains valid strand info.
Examples
--------
>>> d = {'Chromosome': ['chr1', 'chr1'], 'Start': [1, 6],
... 'End': [5, 8], 'Strand': ['+', '-']}
>>> gr = pr.from_dict(d)
>>> gr
+--------------+-----------+-----------+--------------+
| Chromosome | Start | End | Strand |
| (category) | (int32) | (int32) | (category) |
|--------------+-----------+-----------+--------------|
| chr1 | 1 | 5 | + |
| chr1 | 6 | 8 | - |
+--------------+-----------+-----------+--------------+
Stranded PyRanges object has 2 rows and 4 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.unstrand()
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| chr1 | 1 | 5 |
| chr1 | 6 | 8 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
"""
if not self.stranded and "Strand" in self.columns:
return self.drop("Strand")
elif not self.stranded:
return self
gr = pr.concat([self["+"], self["-"]])
gr = gr.apply(lambda df: df.drop("Strand", axis=1).reset_index(drop=
True))
return pr.PyRanges(gr.dfs)
def values(self):
"""Return the underlying DataFrames."""
return [df for k, df in self.items() if not df.empty]
def window(self, window_size, strand=None):
"""Return overlapping genomic windows.
Windows of length `window_size` are returned.
Parameters
----------
window_size : int
Length of the windows.
strand : bool, default None, i.e. auto
Whether to do operations on chromosome/strand pairs or chromosomes. If None, will use
chromosome/strand pairs if the PyRanges is stranded.
nb_cpu: int, default 1
How many cpus to use. Can at most use 1 per chromosome or chromosome/strand tuple.
Will only lead to speedups on large datasets.
**kwargs
Additional keyword arguments to pass as keyword arguments to `f`
Returns
-------
PyRanges
Tiled PyRanges.
See also
--------
pyranges.PyRanges.tile : divide intervals into adjacent tiles.
Examples
--------
>>> gr = pr.from_dict({"Chromosome": [1], "Start": [895], "End": [1259]})
>>> gr
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| 1 | 895 | 1259 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 1 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr.window(200)
+--------------+-----------+-----------+
| Chromosome | Start | End |
| (category) | (int32) | (int32) |
|--------------+-----------+-----------|
| 1 | 895 | 1095 |
| 1 | 1095 | 1259 |
+--------------+-----------+-----------+
Unstranded PyRanges object has 2 rows and 3 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome.
>>> gr = pr.data.ensembl_gtf()[["Feature", "gene_name"]]
>>> gr
+--------------+--------------+-----------+-----------+--------------+-------------+
| Chromosome | Feature | Start | End | Strand | gene_name |
| (category) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+--------------+-----------+-----------+--------------+-------------|
| 1 | gene | 11868 | 14409 | + | DDX11L1 |
| 1 | transcript | 11868 | 14409 | + | DDX11L1 |
| 1 | exon | 11868 | 12227 | + | DDX11L1 |
| 1 | exon | 12612 | 12721 | + | DDX11L1 |
| ... | ... | ... | ... | ... | ... |
| 1 | gene | 1173055 | 1179555 | - | TTLL10-AS1 |
| 1 | transcript | 1173055 | 1179555 | - | TTLL10-AS1 |
| 1 | exon | 1179364 | 1179555 | - | TTLL10-AS1 |
| 1 | exon | 1173055 | 1176396 | - | TTLL10-AS1 |
+--------------+--------------+-----------+-----------+--------------+-------------+
Stranded PyRanges object has 2,446 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
>>> gr.window(1000)
+--------------+--------------+-----------+-----------+--------------+-------------+
| Chromosome | Feature | Start | End | Strand | gene_name |
| (category) | (category) | (int32) | (int32) | (category) | (object) |
|--------------+--------------+-----------+-----------+--------------+-------------|
| 1 | gene | 11868 | 12868 | + | DDX11L1 |
| 1 | gene | 12868 | 13868 | + | DDX11L1 |
| 1 | gene | 13868 | 14409 | + | DDX11L1 |
| 1 | transcript | 11868 | 12868 | + | DDX11L1 |
| ... | ... | ... | ... | ... | ... |
| 1 | exon | 1173055 | 1174055 | - | TTLL10-AS1 |
| 1 | exon | 1174055 | 1175055 | - | TTLL10-AS1 |
| 1 | exon | 1175055 | 1176055 | - | TTLL10-AS1 |
| 1 | exon | 1176055 | 1176396 | - | TTLL10-AS1 |
+--------------+--------------+-----------+-----------+--------------+-------------+
Stranded PyRanges object has 7,516 rows and 6 columns from 1 chromosomes.
For printing, the PyRanges was sorted on Chromosome and Strand.
"""
from pyranges.methods.windows import _windows
if strand is None:
strand = self.stranded
kwargs = {"strand": strand}
kwargs["sparse"] = {"self": False}
kwargs["window_size"] = window_size
df = pyrange_apply_single(_windows, self, **kwargs)
return PyRanges(df)
def __getstate__(self):
return self.dfs
def __setstate__(self, d):
self.__dict__["dfs"] = d
|
biocore-ntnu/pyranges
|
pyranges/pyranges.py
|
Python
|
mit
| 254,818
|
[
"pysam"
] |
b34f235cb1cc8b0e1f529c58c85d3f77269ff6066a441187c23386a12d8f7260
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd.interactions import HarmonicBond
from espressomd.interactions import FeneBond
from espressomd.observables import StressTensor
from tests_common import fene_force2
import numpy as np
# allowed deviation from analytical results
tol = 1.0e-13
# analytical result for convective stress
def stress_kinetic(vel):
return np.einsum('ij,ik->jk', vel, vel) / np.prod(system.box_l)
# analytical result for stress originating from bond force
def stress_bonded(pos):
stress = np.zeros([3, 3])
for p1, p2 in zip(pos[0::2], pos[1::2]):
r = p1 - p2
f = -1.0e4 * r
stress += np.einsum('i,j', f, r) / np.prod(system.box_l)
return stress
# analytical result for stress originating from non-bonded force
def stress_nonbonded(particle_pairs):
stress = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if (p1.type == 0 and p2.type == 0) or (p1.type == 1 and p2.type == 2):
d = p1.pos - p2.pos
r = np.sqrt(np.sum(d**2))
r_hat = d / r
f = (24.0 * 1.0 * (2.0 * 1.0**12 / r**13 - 1.0**6 / r**7)) * r_hat
stress += np.einsum('i,j', f, d) / np.prod(system.box_l)
return stress
def stress_nonbonded_inter(particle_pairs):
stress = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if p1.type == 1 and p2.type == 2 and p1.mol_id != p2.mol_id:
r = p1.pos - p2.pos
d = np.sqrt(np.sum(r**2))
r_hat = r / d
f = (24.0 * 1.0 * (2.0 * 1.0**12 / d**13 - 1.0**6 / d**7)) * r_hat
stress += np.einsum('i,j', f, r) / np.prod(system.box_l)
return stress
def stress_nonbonded_intra(particle_pairs):
stress = np.zeros([3, 3])
for p1, p2 in particle_pairs:
if p1.type == 0 and p2.type == 0 and p1.mol_id == p2.mol_id:
r = p1.pos - p2.pos
d = np.sqrt(np.sum(r**2))
r_hat = r / d
f = (24.0 * 1.0 * (2.0 * 1.0**12 / d**13 - 1.0**6 / d**7)) * r_hat
stress += np.einsum('i,j', f, r) / np.prod(system.box_l)
return stress
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
@utx.skipIfMissingFeatures(['LENNARD_JONES'])
class Stress(ut.TestCase):
def test(self):
# system parameters
system.box_l = 3 * [10.0]
skin = 0.4
time_step = 0.01
system.time_step = time_step
# thermostat and cell system
system.thermostat.set_langevin(kT=0.0, gamma=1.0, seed=41)
system.cell_system.skin = skin
system.periodicity = [1, 1, 1]
# particles and bond
system.part.add(id=0, pos=[9.9, 9.75, 9.9], type=0, mol_id=0)
system.part.add(id=1, pos=[9.9, 10.25, 9.9], type=0, mol_id=0)
system.part.add(id=2, pos=[0.1, 9.7, 0.1], type=1, mol_id=1)
system.part.add(id=3, pos=[0.1, 10.3, 0.1], type=2, mol_id=2)
harmonic = HarmonicBond(k=1e4, r_0=0)
system.bonded_inter.add(harmonic)
system.part[0].add_bond((harmonic, 1))
system.part[2].add_bond((harmonic, 3))
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.non_bonded_inter[1, 2].lennard_jones.set_params(
epsilon=1.0, sigma=1.0, cutoff=2.0, shift=0)
system.integrator.run(steps=0)
system.part[0].v = [10.0, 20.0, 30.0]
system.part[1].v = [-15, -25, -35]
system.part[2].v = [27.0, 23.0, 17.0]
system.part[3].v = [13.0, 11.0, 19.0]
pos = system.part[:].pos
vel = system.part[:].v
sim_stress_kinetic = system.analysis.stress_tensor()['kinetic']
sim_stress_bonded = system.analysis.stress_tensor()['bonded']
sim_stress_bonded_harmonic = system.analysis.stress_tensor()[
'bonded', len(system.bonded_inter) - 1]
sim_stress_nonbonded = system.analysis.stress_tensor()['non_bonded']
sim_stress_nonbonded_inter = system.analysis.stress_tensor()[
'non_bonded_inter']
sim_stress_nonbonded_inter12 = system.analysis.stress_tensor()[
'non_bonded_inter', 1, 2]
sim_stress_nonbonded_intra = system.analysis.stress_tensor()[
'non_bonded_intra']
sim_stress_nonbonded_intra00 = system.analysis.stress_tensor()[
'non_bonded_intra', 0, 0]
sim_stress_total = system.analysis.stress_tensor()['total']
sim_pressure_kinetic = system.analysis.pressure()['kinetic']
sim_pressure_bonded = system.analysis.pressure()['bonded']
sim_pressure_bonded_harmonic = system.analysis.pressure()[
'bonded', len(system.bonded_inter) - 1]
sim_pressure_nonbonded = system.analysis.pressure()['non_bonded']
sim_pressure_nonbonded_inter = system.analysis.pressure()[
'non_bonded_inter']
sim_pressure_nonbonded_inter12 = system.analysis.pressure()[
'non_bonded_inter', 1, 2]
sim_pressure_nonbonded_intra = system.analysis.pressure()[
'non_bonded_intra']
sim_pressure_nonbonded_intra00 = system.analysis.pressure()[
'non_bonded_intra', 0, 0]
sim_pressure_total = system.analysis.pressure()['total']
anal_stress_kinetic = stress_kinetic(vel)
anal_stress_bonded = stress_bonded(pos)
anal_stress_nonbonded = stress_nonbonded(system.part.pairs())
anal_stress_nonbonded_inter = stress_nonbonded_inter(
system.part.pairs())
anal_stress_nonbonded_intra = stress_nonbonded_intra(
system.part.pairs())
anal_stress_total = anal_stress_kinetic + \
anal_stress_bonded + anal_stress_nonbonded
anal_pressure_kinetic = np.einsum('ii', anal_stress_kinetic) / 3.0
anal_pressure_bonded = np.einsum('ii', anal_stress_bonded) / 3.0
anal_pressure_nonbonded = np.einsum('ii', anal_stress_nonbonded) / 3.0
anal_pressure_nonbonded_inter = np.einsum(
'ii', anal_stress_nonbonded_inter) / 3.0
anal_pressure_nonbonded_intra = np.einsum(
'ii', anal_stress_nonbonded_intra) / 3.0
anal_pressure_total = anal_pressure_kinetic + \
anal_pressure_bonded + anal_pressure_nonbonded
system.part.clear()
self.assertLess(np.max(np.abs(sim_stress_kinetic - anal_stress_kinetic)),
tol, 'kinetic stress does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_bonded - anal_stress_bonded)),
tol, 'bonded stress does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_bonded_harmonic - anal_stress_bonded)),
tol, 'bonded stress harmonic bond does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_nonbonded - anal_stress_nonbonded)),
tol, 'non-bonded stress does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_nonbonded_inter - anal_stress_nonbonded_inter)),
tol, 'non-bonded intermolecular stress does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_nonbonded_inter12 - anal_stress_nonbonded_inter)),
tol, 'non-bonded intermolecular stress molecules 1 and 2 does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_nonbonded_intra - anal_stress_nonbonded_intra)),
tol, 'non-bonded intramolecular stress does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_nonbonded_intra00 - anal_stress_nonbonded_intra)),
tol, 'non-bonded intramolecular stress molecule 0 does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_total - anal_stress_total)),
tol, 'total stress does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_total - sim_stress_kinetic - sim_stress_bonded - sim_stress_nonbonded)),
tol, 'total stress is not given as the sum of all major stress components')
self.assertLess(np.abs(sim_pressure_kinetic - anal_pressure_kinetic),
tol, 'kinetic pressure does not match analytical result')
self.assertLess(np.abs(sim_pressure_bonded - anal_pressure_bonded),
tol, 'bonded pressure does not match analytical result')
self.assertLess(np.abs(sim_pressure_bonded_harmonic - anal_pressure_bonded),
tol, 'bonded pressure harmonic bond does not match analytical result')
self.assertLess(np.abs(sim_pressure_nonbonded - anal_pressure_nonbonded),
tol, 'non-bonded pressure does not match analytical result')
self.assertLess(np.abs(sim_pressure_nonbonded_inter - anal_pressure_nonbonded_inter),
tol, 'non-bonded intermolecular pressure does not match analytical result')
self.assertLess(
np.abs(sim_pressure_nonbonded_inter12 -
anal_pressure_nonbonded_inter), tol,
'non-bonded intermolecular pressure molecule 1 and 2 does not match analytical result')
self.assertLess(np.abs(sim_pressure_nonbonded_intra - anal_pressure_nonbonded_intra),
tol, 'non-bonded intramolecular pressure does not match analytical result')
self.assertLess(np.abs(sim_pressure_nonbonded_intra00 - anal_pressure_nonbonded_intra),
tol, 'non-bonded intramolecular pressure molecule 0 does not match analytical result')
self.assertLess(np.abs(sim_pressure_total - anal_pressure_total),
tol, 'total pressure does not match analytical result')
self.assertLess(np.max(np.abs(sim_pressure_total - sim_pressure_kinetic - sim_pressure_bonded - sim_pressure_nonbonded)),
tol, 'total pressure is not given as the sum of all major pressure components')
# Compare stress tensor observable to stress tensor from analysis
np.testing.assert_allclose(
StressTensor().calculate(),
system.analysis.stress_tensor()["total"].reshape(9),
atol=1E-10)
@utx.skipIfMissingFeatures(['EXTERNAL_FORCES'])
class StressFENE(ut.TestCase):
def get_anal_stress_fene(self, pos_1, pos_2, k, d_r_max, r_0):
stress = np.zeros([3, 3])
vec_r = pos_1 - pos_2
f = -fene_force2(vec_r, k, d_r_max, r_0)
stress += np.einsum('i,j', f, vec_r) / np.prod(system.box_l)
return stress
def test_fene(self):
# system parameters
system.box_l = 3 * [10.0]
skin = 0.4
time_step = 0.01
system.time_step = time_step
# thermostat and cell system
system.cell_system.skin = skin
system.periodicity = [1, 1, 1]
# particles and bond
system.part.add(
id=0, pos=[9.9, 9.75, 9.9], type=0, mol_id=0, fix=[1, 1, 1])
system.part.add(
id=1, pos=[9.9, 10.25, 9.9], type=0, mol_id=0, fix=[1, 1, 1])
k = 1e4
d_r_max = 1.5
r_0 = 0.1
fene = FeneBond(k=k, d_r_max=d_r_max, r_0=r_0)
system.bonded_inter.add(fene)
system.part[0].add_bond((fene, 1))
system.integrator.run(steps=0)
sim_stress_bonded = system.analysis.stress_tensor()['bonded']
sim_stress_fene = system.analysis.stress_tensor()[
'bonded', len(system.bonded_inter) - 1]
total_bonded_stresses = np.zeros([3, 3])
for i in range(len(system.bonded_inter)):
total_bonded_stresses = np.add(
total_bonded_stresses, system.analysis.stress_tensor()['bonded', i])
anal_stress_fene = self.get_anal_stress_fene(
system.part[0].pos, system.part[1].pos, k, d_r_max, r_0)
self.assertLess(np.max(np.abs(sim_stress_bonded - anal_stress_fene)),
tol, 'bonded stress does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_fene - anal_stress_fene)),
tol, 'bonded stress for fene does not match analytical result')
self.assertLess(np.max(np.abs(sim_stress_bonded - total_bonded_stresses)),
tol, 'bonded stresses do not sum up to the total value')
sim_pressure_fene = system.analysis.pressure()[
'bonded', len(system.bonded_inter) - 1]
anal_pressure_fene = np.einsum("ii", anal_stress_fene) / 3.0
self.assertLess(np.max(np.abs(sim_pressure_fene - anal_pressure_fene)),
tol, 'bonded pressure for fene does not match analytical result')
# Compare stress tensor observable to stress tensor from analysis
np.testing.assert_allclose(
StressTensor().calculate(),
system.analysis.stress_tensor()["total"].reshape(9),
atol=1E-10)
system.part.clear()
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/stress.py
|
Python
|
gpl-3.0
| 13,812
|
[
"ESPResSo"
] |
4c710a6a8fff63a24a6181856f676bb1cd21b4f2f97a47c28b42aa053ae1259f
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.